diff --git a/.azure-pipelines/windows.yml b/.azure-pipelines/windows.yml new file mode 100644 index 0000000000..8b7e990fe2 --- /dev/null +++ b/.azure-pipelines/windows.yml @@ -0,0 +1,51 @@ + +parameters: + name: '' + vmImage: '' + matrix: [] + +jobs: +- job: ${{ parameters.name }} + pool: + vmImage: ${{ parameters.vmImage }} + variables: + EXTRA_WHEELS: "https://5cf40426d9f06eb7461d-6fe47d9331aba7cd62fc36c7196769e4.ssl.cf2.rackcdn.com" + DEPENDS: numpy scipy matplotlib h5py pydicom + CHECK_TYPE: test + strategy: + matrix: + ${{ insert }}: ${{ parameters.matrix }} + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '$(PYTHON_VERSION)' + addToPath: true + architecture: '$(PYTHON_ARCH)' + - script: | + echo %PYTHONHASHSEED% + displayName: 'Display hash seed' + - script: | + python -m pip install --upgrade pip setuptools>=30.3.0 wheel + displayName: 'Update build tools' + - script: | + python -m pip install --find-links %EXTRA_WHEELS% %DEPENDS% + displayName: 'Install dependencies' + - script: | + python -m pip install .[$(CHECK_TYPE)] + SET NIBABEL_DATA_DIR=%CD%\\nibabel-data + displayName: 'Install nibabel' + - script: | + mkdir for_testing + cd for_testing + cp ../.coveragerc . + pytest --doctest-modules --cov nibabel -v --pyargs nibabel + displayName: 'Pytest tests' + condition: and(succeeded(), eq(variables['CHECK_TYPE'], 'test')) + - script: | + python -m pip install codecov + cd for_testing + codecov + displayName: 'Upload To Codecov' + env: + CODECOV_TOKEN: $(CODECOV_TOKEN) diff --git a/.coveragerc b/.coveragerc index e2ec8ff3cd..57747ec0d8 100644 --- a/.coveragerc +++ b/.coveragerc @@ -6,3 +6,4 @@ omit = */externals/* */benchmarks/* */tests/* + nibabel/_version.py diff --git a/.gitattributes b/.gitattributes index d32a3d189c..9f3e8c9167 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1 @@ -nibabel/COMMIT_INFO.txt export-subst +nibabel/_version.py export-subst diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..b3e73ac8c8 --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,107 @@ +# Community Guidelines + +Nibabel is a [NIPY](https://nipy.org) project, and we strive to adhere to the +[NIPY Community Code](https://nipy.org/conduct.html), reproduced below. + +The NIPY community is a community of practice devoted to the use of the Python programming language +in the analysis of neuroimaging data. The following code of conduct is a guideline for our behavior +as we participate in this community. + +It is based on, and heavily inspired by a reading of the Python community code of conduct, the +Apache foundation code of conduct, the Debian code of conduct, and the Ten Principles of Burning +Man. + +## The code of conduct for the NIPY community + +The Neuroimaging in Python (NIPY) community is made up of members with a diverse set of skills, +personalities, background, and experiences. We welcome these differences because they are the +source of diverse ideas, solutions and decisions about our work. Decisions we make affect users, +colleagues, and through scientific results, the general public. We take these consequences +seriously when making decisions. When you are working with members of the community, we encourage +you to follow these guidelines, which help steer our interactions and help keep NIPY a positive, +successful, and growing community. + +### A member of the NIPY community is: + +#### Open + +Members of the community are open to collaboration. Be it on the reuse of data, on the +implementation of methods, on finding technical solutions, establishing best practices, and +otherwise. We are accepting of all who wish to take part in our activities, fostering an +environment where anyone can participate and everyone can make a difference. + +#### Be collaborative! + +Our work will be used by other people, and in turn we will depend on the work of others. When we +make something for the benefit of others, we are willing to explain to others how it works, so that +they can build on the work to make it even better. We are willing to provide constructive criticism +on the work of others and accept criticism of our own work, as the experiences and skill sets of +other members contribute to the whole of our efforts. + +#### Be inquisitive! + +Nobody knows everything! Asking questions early avoids many problems later, so questions are +encouraged, though they may be directed to the appropriate forum. Those who are asked should be +responsive and helpful, within the context of our shared goal of improving neuroimaging practice. + +#### Considerate + +Members of the community are considerate of their peers. We are thoughtful when addressing the +efforts of others, keeping in mind that often-times the labor was completed simply for the good of +the community. We are attentive in our communications, whether in person or online, and we are +tactful when approaching differing views. + +#### Be careful in the words that you choose: + +We value courtesy, kindness and inclusiveness in all our interactions. Therefore, we take +responsibility for our own speech. In particular, we avoid: + + * Personal insults. + * Violent threats or language directed against another person. + * Sexist, racist, or otherwise discriminatory jokes and language. + * Any form of sexual or violent material. + * Sharing private content, such as emails sent privately or non-publicly, or unlogged forums such + as IRC channel history. + * Excessive or unnecessary profanity. + * Repeated harassment of others. In general, if someone asks you to stop, then stop. + * Advocating for, or encouraging, any of the above behaviour. + +#### Try to be concise in communication + +Keep in mind that what you write once will be read by many others. Writing a short email means +people can understand the conversation as efficiently as possible. Even short emails should always +strive to be empathetic, welcoming, friendly and patient. When a long explanation is necessary, +consider adding a summary. + +Try to bring new ideas to a conversation, so that each message adds something unique to the +conversation. Keep in mind that, when using email, the rest of the thread still contains the other +messages with arguments that have already been made. + +Try to stay on topic, especially in discussions that are already fairly long and complex. + +#### Respectful + +Members of the community are respectful. We are respectful of others, their positions, their +skills, their commitments, and their efforts. We are respectful of the volunteer and professional +efforts that permeate the NIPY community. We are respectful of the processes set forth in the +community, and we work within them. When we disagree, we are courteous and kind in raising our +issues. + +## Incident Reporting + +We put great value on respectful, friendly and helpful communication. + +If you feel that any of our Nibabel communications lack respect, or are unfriendly or unhelpful, +please try the following steps: + +* If you feel able, please let the person who has sent the email or comment that you found it + disrespectful / unhelpful / unfriendly, and why; + +* If you don't feel able to do that, or that didn't work, please contact Chris Markiewicz directly + by email (), and he will do his best to resolve it. + If you don't feel comfortable contacting Chris, please email Matthew Brett + () instead. + +## Attribution + +The vast majority of the above was taken from the NIPY Code of Conduct. diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 0000000000..81687ac149 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,16 @@ +# Contributing to NiBabel + +Welcome to the NiBabel repository! +We're excited you're here and want to contribute. + +Please see the [NiBabel Developer Guidelines][link_devguide] on our +on our [documentation website][link_docs]. + +These guidelines are designed to make it as easy as possible to get involved. +If you have any questions that aren't discussed in our documentation, or it's +difficult to find what you're looking for, please let us know by opening an +[issue][link_issues]! + +[link_docs]: https://nipy.org/nibabel +[link_devguide]: https://nipy.org/nibabel/devel/devguide.html +[link_issues]: https://github.com/poldracklab/fmriprep/issues diff --git a/.gitignore b/.gitignore index df018f0ead..e876975c27 100644 --- a/.gitignore +++ b/.gitignore @@ -84,3 +84,5 @@ Thumbs.db doc/source/reference venv/ .buildbot.patch +.vscode +for_testing/ diff --git a/.gitmodules b/.gitmodules index 7ff8d61885..a0dc77c8ec 100644 --- a/.gitmodules +++ b/.gitmodules @@ -6,7 +6,7 @@ url = git://github.com/matthew-brett/nitest-minc2.git [submodule "nipy-ecattest"] path = nibabel-data/nipy-ecattest - url = https://github.com/freec84/nipy-ecattest + url = https://github.com/effigies/nipy-ecattest [submodule "nibabel-data/nitest-freesurfer"] path = nibabel-data/nitest-freesurfer url = https://bitbucket.org/nipy/nitest-freesurfer.git @@ -16,3 +16,6 @@ [submodule "nibabel-data/nitest-cifti2"] path = nibabel-data/nitest-cifti2 url = https://github.com/demianw/nibabel-nitest-cifti2.git +[submodule "nibabel-data/nitest-dicom"] + path = nibabel-data/nitest-dicom + url = https://github.com/effigies/nitest-dicom diff --git a/.mailmap b/.mailmap index d917d4d279..9701ddf503 100644 --- a/.mailmap +++ b/.mailmap @@ -13,7 +13,9 @@ B. Nolan Nichols Nolan Nichols bpinsard Basile Pinsard bpinsard Ben Cipollini Ben Cipollini +Benjamin C Darwin Bertrand Thirion bthirion +Cameron Riddell <31414128+CRiddler@users.noreply.github.com> Christian Haselgrove Christian Haselgrove Christopher J. Markiewicz Chris Johnson Christopher J. Markiewicz Chris Markiewicz @@ -38,18 +40,24 @@ Jean-Baptiste Poline jbpoline Jon Haitz Legarreta Jon Haitz Legarreta Gorroño Kesshi Jordan kesshijordan Kevin S. Hahn Kevin S. Hahn +Konstantinos Raktivan constracti Krish Subramaniam Krish Subramaniam +Krzysztof J. Gorgolewski +Krzysztof J. Gorgolewski Marc-Alexandre Côté Marc-Alexandre Cote Mathias Goncalves mathiasg +Matthew Cieslak Matt Cieslak Michael Hanke Michael Hanke -Nguyen, Ly lxn2 +Michiel Cottaar Michiel Cottaar +Ly Nguyen lxn2 Oliver P. Hinds ohinds +Oscar Esteban Paul McCarthy Paul McCarthy Satrajit Ghosh Satrajit Ghosh Serge Koudoro skoudoro Stephan Gerhard Stephan Gerhard Thomas Roos Roosted7 -Venky Reddy R3DDY97 +Venkateswara Reddy Reddam R3DDY97 Yaroslav O. Halchenko Yaroslav O. Halchenko Yaroslav Halchenko diff --git a/.pep8speaks.yml b/.pep8speaks.yml new file mode 100644 index 0000000000..0a0d8c619f --- /dev/null +++ b/.pep8speaks.yml @@ -0,0 +1,12 @@ +scanner: + diff_only: True # Only show errors caused by the patch + linter: flake8 + +message: # Customize the comment made by the bot + opened: # Messages when a new PR is submitted + header: "Hello @{name}, thank you for submitting the Pull Request!" + footer: "To test for issues locally, `pip install flake8` and then run `flake8 nibabel`." + updated: # Messages when new commits are added to the PR + header: "Hello @{name}, Thank you for updating!" + footer: "To test for issues locally, `pip install flake8` and then run `flake8 nibabel`." + no_errors: "Cheers! There are no style issues detected in this Pull Request. :beers: " diff --git a/.travis.yml b/.travis.yml index 367a105045..fe7fcee141 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,159 +4,151 @@ # for it to be on multiple physical lines, so long as you remember: - There # can't be any leading "-"s - All newlines will be removed, so use ";"s +os: linux dist: xenial -sudo: true language: python +cache: pip -cache: - directories: - - $HOME/.cache/pip env: global: - - DEPENDS="six numpy scipy matplotlib h5py pillow pydicom" - - OPTIONAL_DEPENDS="" + - SETUP_REQUIRES="pip setuptools>=30.3.0 wheel" + - DEPENDS="numpy scipy matplotlib h5py pillow pydicom indexed_gzip" - INSTALL_TYPE="setup" - CHECK_TYPE="test" - - EXTRA_WHEELS="https://5cf40426d9f06eb7461d-6fe47d9331aba7cd62fc36c7196769e4.ssl.cf2.rackcdn.com" + - EXTRA_WHEELS="https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com" - PRE_WHEELS="https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" - EXTRA_PIP_FLAGS="--find-links=$EXTRA_WHEELS" - PRE_PIP_FLAGS="--pre $EXTRA_PIP_FLAGS --find-links $PRE_WHEELS" + python: - - 3.5 - - 3.6 - 3.7 -matrix: + - 3.8 + +jobs: include: - - python: 3.4 - dist: trusty - sudo: false - # Absolute minimum dependencies - - python: 2.7 + # Basic dependencies only + - python: 3.6 env: - - DEPENDS="numpy==1.7.1" + - DEPENDS="-r requirements.txt" + # Clean install + - python: 3.6 + env: + - DEPENDS="" + - CHECK_TYPE=skiptests # Absolute minimum dependencies - - python: 2.7 + - python: 3.6 env: - - DEPENDS="numpy==1.7.1" - - CHECK_TYPE="import" + - SETUP_REQUIRES="setuptools==30.3.0" + - DEPENDS="-r min-requirements.txt" # Absolute minimum dependencies plus oldest MPL - # Check these against: - # nibabel/info.py - # doc/source/installation.rst - # requirements.txt - - python: 2.7 + - python: 3.6 env: - - DEPENDS="numpy==1.7.1 matplotlib==1.3.1" + - DEPENDS="-r min-requirements.txt matplotlib==1.5.3" # Minimum pydicom dependency - - python: 2.7 + - python: 3.6 env: - - DEPENDS="numpy==1.7.1 pydicom==0.9.9 pillow==2.6" + - DEPENDS="-r min-requirements.txt pydicom==0.9.9 pillow==2.6" # pydicom master branch - - python: 3.5 + - python: 3.6 env: - DEPENDS="numpy git+https://github.com/pydicom/pydicom.git@master" - # test 2.7 against pre-release builds of everything - - python: 2.7 + # test 3.8 against pre-release builds of everything + - python: 3.8 env: - EXTRA_PIP_FLAGS="$PRE_PIP_FLAGS" - # test 3.5 against pre-release builds of everything - - python: 3.5 + # OSX Python support is basically accidental. Take whatever version we can + # get and test with full dependencies... + - os: osx + language: minimal + # and pre-releases. No min-requirements.txt because we can't assume a wheel that old. + - os: osx + language: minimal env: - EXTRA_PIP_FLAGS="$PRE_PIP_FLAGS" - - python: 2.7 + # Test that PyPI installs from source pass + - python: 3.6 env: - INSTALL_TYPE=sdist - - python: 2.7 + # Wheels (binary distributions) + - python: 3.6 env: - INSTALL_TYPE=wheel - - python: 2.7 - env: - - INSTALL_TYPE=requirements - - python: 2.7 + # Install from git archive (e.g., https://github.com/nipy/nibabel/archive/master.zip) + - python: 3.6 env: - - CHECK_TYPE="style" - - python: 3.5 + - INSTALL_TYPE=archive + # Run flake8... Might not be needed now we have pep8speaks + - python: 3.6 env: - CHECK_TYPE="style" # Documentation doctests - - python: 2.7 + - python: 3.6 env: - - CHECK_TYPE="doc_doctests" - - python: 3.5 - env: - - CHECK_TYPE="doc_doctests" - # Run tests with indexed_gzip present - - python: 2.7 - env: - - OPTIONAL_DEPENDS="indexed_gzip" - - python: 3.5 - env: - - OPTIONAL_DEPENDS="indexed_gzip" + - CHECK_TYPE="doc" + +# Set up virtual environment, build package, build from depends before_install: - - travis_retry python -m pip install --upgrade pip - - travis_retry pip install --upgrade virtualenv - - virtualenv --python=python venv + - travis_retry python3 -m pip install --upgrade pip virtualenv + - virtualenv --python=python3 venv - source venv/bin/activate - - python --version # just to check - - travis_retry pip install -U pip setuptools>=27.0 wheel - - travis_retry pip install coverage - - if [ "${CHECK_TYPE}" == "test" ]; then - travis_retry pip install nose mock; - fi - - if [ "${CHECK_TYPE}" == "style" ]; then - travis_retry pip install flake8; + - python3 --version # just to check + - travis_retry python3 -m pip install -U $SETUP_REQUIRES + - which python3 + - which pip + - | + if [ "$INSTALL_TYPE" == "sdist" ]; then + python3 setup.py egg_info # check egg_info while we're here + python3 setup.py sdist + export ARCHIVE=$( ls dist/*.tar.gz ) + elif [ "$INSTALL_TYPE" == "wheel" ]; then + python3 setup.py bdist_wheel + export ARCHIVE=$( ls dist/*.whl ) + elif [ "$INSTALL_TYPE" == "archive" ]; then + export ARCHIVE="package.tar.gz" + git archive -o $ARCHIVE HEAD fi - - travis_retry pip install $EXTRA_PIP_FLAGS $DEPENDS $OPTIONAL_DEPENDS + - if [ -n "$DEPENDS" ]; then pip install $EXTRA_PIP_FLAGS $DEPENDS; fi + # command to install dependencies install: - | if [ "$INSTALL_TYPE" == "setup" ]; then - python setup.py install - elif [ "$INSTALL_TYPE" == "sdist" ]; then - python setup_egg.py egg_info # check egg_info while we're here - python setup_egg.py sdist - pip install $EXTRA_PIP_FLAGS dist/*.tar.gz - elif [ "$INSTALL_TYPE" == "wheel" ]; then - python setup_egg.py bdist_wheel - pip install $EXTRA_PIP_FLAGS dist/*.whl - elif [ "$INSTALL_TYPE" == "requirements" ]; then - pip install $EXTRA_PIP_FLAGS -r requirements.txt - python setup.py install + python3 setup.py install + else + pip install $EXTRA_PIP_FLAGS $ARCHIVE fi + # Basic import check + - python3 -c 'import nibabel; print(nibabel.__version__)' + - if [ "$CHECK_TYPE" == "skiptests" ]; then exit 0; fi + +before_script: # Point to nibabel data directory - export NIBABEL_DATA_DIR="$PWD/nibabel-data" + # Because nibabel is already installed, will just look up the extra + - python3 -m pip install $EXTRA_PIP_FLAGS "nibabel[$CHECK_TYPE]" + # command to run tests, e.g. python setup.py test script: - | if [ "${CHECK_TYPE}" == "style" ]; then # Run styles only on core nibabel code. flake8 nibabel - elif [ "${CHECK_TYPE}" == "import" ]; then - # Import nibabel without attempting to test - # Allows us to check missing dependencies masked by testing libraries - printf 'import nibabel\nprint(nibabel.__version__)\n' > import_only.py - cat import_only.py - coverage run import_only.py - elif [ "${CHECK_TYPE}" == "doc_doctests" ]; then + elif [ "${CHECK_TYPE}" == "doc" ]; then cd doc - pip install -r ../doc-requirements.txt - make html; - make doctest; + make html && make doctest elif [ "${CHECK_TYPE}" == "test" ]; then # Change into an innocuous directory and find tests from installation mkdir for_testing cd for_testing cp ../.coveragerc . - nosetests --with-doctest --with-coverage --cover-package nibabel nibabel + pytest --doctest-modules --cov nibabel -v --pyargs nibabel else false fi -after_success: - - | - if [ "${CHECK_TYPE}" == "test" ]; then - travis_retry pip install codecov - codecov - fi + +after_script: + - travis_retry python3 -m pip install codecov + - codecov notifications: webhooks: http://nipy.bic.berkeley.edu:54856/travis diff --git a/.zenodo.json b/.zenodo.json index 79f1e2081b..17cc83715f 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -38,6 +38,11 @@ "name": "Halchenko, Yaroslav O.", "orcid": "0000-0003-3456-2493" }, + { + "affiliation": "Wellcome Centre for Integrative Neuroimaging, University of Oxford, UK", + "name": "Cottaar, Michiel", + "orcid": "0000-0003-4679-7724" + }, { "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", @@ -63,6 +68,10 @@ "name": "Lee, Gregory R.", "orcid": "0000-0001-8895-2740" }, + { + "name": "Wang, Hao-Ting", + "orcid": "0000-0003-4078-2038" + }, { "affiliation": "Harvard University - Psychology", "name": "Kastman, Erik", @@ -83,9 +92,14 @@ "name": "Moloney, Brendan" }, { - "affiliation": "Wellcome Centre for Integrative Neuroimaging, University of Oxford, UK", - "name": "Cottaar, Michiel", - "orcid": "0000-0003-4679-7724" + "affiliation": "MIT", + "name": "Goncalves, Mathias", + "orcid": "0000-0002-7252-7771" + }, + { + "affiliation": "Department of Psychology, University of California Davis, CA, USA", + "name": "Riddell, Cameron", + "orcid": "0000-0001-8950-0375" }, { "name": "Burns, Christopher" @@ -114,9 +128,24 @@ { "name": "Vincent, Robert D." }, + { + "affiliation": "Center for Magnetic Resonance Research, University of Minnesota", + "name": "Braun, Henry", + "orcid": "0000-0001-7003-9822" + }, { "name": "Subramaniam, Krish" }, + { + "affiliation": "MIT", + "name": "Jarecka, Dorota", + "orcid": "0000-0001-8282-2988" + }, + { + "affiliation": "Google", + "name": "Gorgolewski, Krzysztof J.", + "orcid": "0000-0003-3321-7583" + }, { "affiliation": "Rotman Research Institute, Baycrest Health Sciences, Toronto, ON, Canada", "name": "Raamana, Pradeep Reddy", @@ -130,11 +159,6 @@ { "name": "Baker, Eric M." }, - { - "affiliation": "MIT", - "name": "Goncalves, Mathias", - "orcid": "0000-0002-7252-7771" - }, { "name": "Hayashi, Soichi" }, @@ -147,6 +171,11 @@ { "name": "Hymers, Mark" }, + { + "affiliation": "Department of Psychology, Stanford University, CA, USA", + "name": "Esteban, Oscar", + "orcid": "0000-0001-8435-6191" + }, { "name": "Koudoro, Serge" }, @@ -162,9 +191,18 @@ { "name": "Nguyen, Ly" }, + { + "affiliation": "BrainSpec, Boston, MA", + "name": "Reddigari, Samir", + "orcid": "0000-0003-1472-5881" + }, { "name": "St-Jean, Samuel" }, + { + "name": "Panfilov, Egor", + "orcid": "0000-0002-2500-6375" + }, { "name": "Garyfallidis, Eleftherios" }, @@ -178,6 +216,11 @@ "name": "Kaczmarzyk, Jakub", "orcid": "0000-0002-5544-7577" }, + { + "affiliation": "Universit\u00e9 de Sherbrooke", + "name": "Legarreta, Jon Haitz", + "orcid": "0000-0002-9661-1396" + }, { "name": "Hahn, Kevin S." }, @@ -192,11 +235,6 @@ "name": "Poline, Jean-Baptiste", "orcid": "0000-0002-9794-749X" }, - { - "affiliation": "Universit\u00e9 de Sherbrooke", - "name": "Legarreta, Jon Haitz", - "orcid": "0000-0002-9661-1396" - }, { "affiliation": "University College London, London, UK", "name": "Stutters, Jon", @@ -207,6 +245,11 @@ "name": "Jordan, Kesshi", "orcid": "0000-0001-6313-0580" }, + { + "affiliation": "Department of Neuropsychiatry, University of Pennsylvania", + "name": "Cieslak, Matthew", + "orcid": "0000-0002-1931-4734" + }, { "name": "Moreno, Miguel Estevan" }, @@ -216,6 +259,10 @@ { "name": "Schwartz, Yannick" }, + { + "affiliation": "Hospital for Sick Children", + "name": "Darwin, Benjamin C" + }, { "affiliation": "INRIA", "name": "Thirion, Bertrand", @@ -239,6 +286,9 @@ "name": "Gonzalez, Ivan", "orcid": "0000-0002-6451-6909" }, + { + "name": "Palasubramaniam, Jath" + }, { "name": "Lecher, Justin" }, @@ -247,6 +297,10 @@ "name": "Leinweber, Katrin", "orcid": "0000-0001-5135-5758" }, + { + "affiliation": "National Technical University of Athens, Greece", + "name": "Raktivan, Konstantinos" + }, { "affiliation": "Friedrich-Alexander-Universit\u00e4t Erlangen-N\u00fcrnberg, Erlangen, Germany", "name": "Fischer, Peter", @@ -265,7 +319,12 @@ "name": "Roos, Thomas" }, { - "name": "Reddy, Venky" + "affiliation": "National Institute of Mental Health and Neuro-Sciences, India", + "name": "Reddam, Venkateswara Reddy", + "orcid": "0000-0001-6817-2966" + }, + { + "name": "Baratz, Zvi" }, { "name": "freec84" diff --git a/COPYING b/COPYING index 6f03ba5ccd..aadf96e90c 100644 --- a/COPYING +++ b/COPYING @@ -18,12 +18,13 @@ documentation is covered by the MIT license. The MIT License - Copyright (c) 2009-2014 Matthew Brett + Copyright (c) 2009-2019 Matthew Brett Copyright (c) 2010-2013 Stephan Gerhard Copyright (c) 2006-2014 Michael Hanke Copyright (c) 2011 Christian Haselgrove Copyright (c) 2010-2011 Jarrod Millman - Copyright (c) 2011-2014 Yaroslav Halchenko + Copyright (c) 2011-2019 Yaroslav Halchenko + Copyright (c) 2015-2019 Chris Markiewicz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -217,34 +218,3 @@ the PDDL version 1.0 available at http://opendatacommons.org/licenses/pddl/1.0/ is courtesy of the University of Massachusetts Medical School, also released under the PDDL. - - -Six --------------------- - -In ``nibabel/externals/six.py`` - -Copied from: https://pypi.python.org/packages/source/s/six/six-1.3.0.tar.gz#md5=ec47fe6070a8a64c802363d2c2b1e2ee - -:: - - Copyright (c) 2010-2013 Benjamin Peterson - - Permission is hereby granted, free of charge, to any person obtaining a copy of - this software and associated documentation files (the "Software"), to deal in - the Software without restriction, including without limitation the rights to - use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of - the Software, and to permit persons to whom the Software is furnished to do so, - subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS - FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR - COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER - IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - diff --git a/Changelog b/Changelog index d32936cfd0..d19daad3b6 100644 --- a/Changelog +++ b/Changelog @@ -18,12 +18,252 @@ The full VCS changelog is available here: Nibabel releases **************** -Most work on NiBabel so far has been by Matthew Brett (MB), Michael Hanke (MH) -Ben Cipollini (BC), Marc-Alexandre Côté (MC), Chris Markiewicz (CM), Stephan -Gerhard (SG), Eric Larson (EL), Yaroslav Halchenko (YOH) and Chris Cheng (CC). +Most work on NiBabel so far has been by Matthew Brett (MB), Chris Markiewicz +(CM), Michael Hanke (MH), Marc-Alexandre Côté (MC), Ben Cipollini (BC), Paul +McCarthy (PM), Chris Cheng (CC), Yaroslav Halchenko (YOH), Satra Ghosh (SG), +Eric Larson (EL), Demian Wassermann, and Stephan Gerhard. References like "pr/298" refer to github pull request numbers. +3.0.2 (Monday 9 March 2020) +=========================== + +Bug fixes +--------- +* Attempt to find versioneer version when building docs (pr/894) (CM) +* Delay import of h5py until neded (backport of pr/889) (YOH, reviewed by CM) + +Maintenance +----------- +* Fix typo in documentation (backport of pr/893) (Zvi Baratz, reviewed by CM) +* Set minimum matplotlib to 1.5.3 to ensure wheels are available on all + supported Python versions. (backport of pr/887) (CM) +* Remove ``pyproject.toml`` for now. (issue/859) (CM) + + +3.0.1 (Monday 27 January 2020) +============================== + +Bug fixes +--------- +* Test failed by using array method on tuple. (pr/860) (Ben Darwin, reviewed by + CM) +* Validate ``ExpiredDeprecationError``\s, promoted by 3.0 release from + ``DeprecationWarning``\s. (pr/857) (CM) + +Maintenance +----------- +* Remove logic accommodating numpy without float16 types. (pr/866) (CM) +* Accommodate new numpy dtype strings. (pr/858) (CM) + + +3.0.0 (Wednesday 18 December 2019) +================================== + +New features +------------ +* ArrayProxy ``__array__()`` now accepts a ``dtype`` parameter, allowing + ``numpy.array(dataobj, dtype=...)`` calls, as well as casting directly + with a dtype (for example, ``numpy.float32(dataobj)``) to control the + output type. Scale factors (slope, intercept) are applied, but may be + cast to narrower types, to control memory usage. This is now the basis + of ``img.get_fdata()``, which will scale data in single precision if + the output type is ``float32``. (pr/844) (CM, reviewed by Alejandro + de la Vega, Ross Markello) +* GiftiImage method ``agg_data()`` to return usable data arrays (pr/793) + (Hao-Ting Wang, reviewed by CM) +* Accept ``os.PathLike`` objects in place of filenames (pr/610) (Cameron + Riddell, reviewed by MB, CM) +* Function to calculate obliquity of affines (pr/815) (Oscar Esteban, + reviewed by MB) + +Enhancements +------------ +* Improve testing of data scaling in ArrayProxy API (pr/847) (CM, reviewed + by Alejandro de la Vega) +* Document ``SpatialImage.slicer`` interface (pr/846) (CM) +* ``get_fdata(dtype=np.float32)`` will attempt to avoid casting data to + ``np.float64`` when scaling parameters would otherwise promote the data + type unnecessarily. (pr/833) (CM, reviewed by Ross Markello) +* ``ArraySequence`` now supports a large set of Python operators to combine + or update in-place. (pr/811) (MC, reviewed by Serge Koudoro, Philippe Poulin, + CM, MB) +* Warn, rather than fail, on DICOMs with unreadable Siemens CSA tags (pr/818) + (Henry Braun, reviewed by CM) +* Improve clarity of coordinate system tutorial (pr/823) (Egor Panfilov, + reviewed by MB) + +Bug fixes +--------- +* Sliced ``Tractogram``\s no longer ``apply_affine`` to the original + ``Tractogram``'s streamlines. (pr/811) (MC, reviewed by Serge Koudoro, + Philippe Poulin, CM, MB) +* Change strings with invalid escapes to raw strings (pr/827) (EL, reviewed + by CM) +* Re-import externals/netcdf.py from scipy to resolve numpy deprecation + (pr/821) (CM) + +Maintenance +----------- +* Remove replicated metadata for packaged data from MANIFEST.in (pr/845) (CM) +* Support Python >=3.5.1, including Python 3.8.0 (pr/787) (CM) +* Manage versioning with slightly customized Versioneer (pr/786) (CM) +* Reference Nipy Community Code and Nibabel Developer Guidelines in + GitHub community documents (pr/778) (CM, reviewed by MB) + +API changes and deprecations +---------------------------- +* Fully remove deprecated ``checkwarns`` and ``minc`` modules. (pr/852) (CM) +* The ``keep_file_open`` argument to file load operations and ``ArrayProxy``\s + no longer acccepts the value ``"auto"``, raising a ``ValueError``. (pr/852) + (CM) +* Deprecate ``ArraySequence.data`` in favor of ``ArraySequence.get_data()``, + which will return a copy. ``ArraySequence.data`` now returns a read-only + view. (pr/811) (MC, reviewed by Serge Koudoro, Philippe Poulin, CM, MB) +* Deprecate ``DataobjImage.get_data()`` API, to be removed in nibabel 5.0 + (pr/794, pr/809) (CM, reviewed by MB) + + +2.5.1 (Monday 23 September 2019) +================================ + +Enhancements +------------ +* Ignore endianness in ``nib-diff`` if values match (pr/799) (YOH, reviewed + by CM) + +Bug fixes +--------- +* Correctly handle Philips DICOMs w/ derived volume (pr/795) (Mathias + Goncalves, reviewed by CM) +* Raise CSA tag limit to 1000, parametrize for future relaxing (pr/798, + backported to 2.5.x in pr/800) (Henry Braun, reviewed by CM, MB) +* Coerce data types to match NIfTI intent codes when writing GIFTI data + arrays (pr/806) (CM, reported by Tom Holroyd) + +Maintenance +----------- +* Require h5py 2.10 for Windows + Python < 3.6 to resolve unexpected dtypes + in Minc2 data (pr/804) (CM, reviewed by YOH) + +API changes and deprecations +---------------------------- +* Deprecate ``nicom.dicomwrappers.Wrapper.get_affine()`` in favor of ``affine`` + property; final removal in nibabel 4.0 (pr/796) (YOH, reviewed by CM) + +2.5.0 (Sunday 4 August 2019) +============================ + +The 2.5.x series is the last with support for either Python 2 or Python 3.4. +Extended support for this series 2.5 will last through December 2020. + +Thanks for the test ECAT file and fix provided by Andrew Crabb. + +Enhancements +------------ +* Add SerializableImage class with to/from_bytes methods (pr/644) (CM, + reviewed by MB) +* Check CIFTI-2 data shape matches shape described by header (pr/774) + (Michiel Cottaar, reviewed by CM) + +Bug fixes +--------- +* Handle stricter numpy casting rules in tests (pr/768) (CM) + reviewed by PM) +* TRK header fields flipped in files written on big-endian systems + (pr/782) (CM, reviewed by YOH, MB) +* Load multiframe ECAT images with Python 3 (CM and Andrew Crabb) + +Maintenance +----------- +* Fix CodeCov paths on Appveyor for more accurate coverage (pr/769) (CM) +* Move to setuptools and reduce use ``nisext`` functions (pr/764) (CM, + reviewed by YOH) +* Better handle test setup/teardown (pr/785) (CM, reviewed by YOH) + +API changes and deprecations +---------------------------- +* Effect threatened warnings and set some deprecation timelines (pr/755) (CM) + * Trackvis methods now default to v2 formats + * ``nibabel.trackvis`` scheduled for removal in nibabel 4.0 + * ``nibabel.minc`` and ``nibabel.MincImage`` will be removed in nibabel 3.0 + +2.4.1 (Monday 27 May 2019) +========================== + +Contributions from Egor Pafilov, Jath Palasubramaniam, Richard Nemec, and +Dave Allured. + +Enhancements +------------ +* Enable ``mmap``, ``keep_file_open`` options when loading any + ``DataobjImage`` (pr/759) (CM, reviewed by PM) + +Bug fixes +--------- +* Ensure loaded GIFTI files expose writable data arrays (pr/750) (CM, + reviewed by PM) +* Safer warning registry manipulation when checking for overflows (pr/753) + (CM, reviewed by MB) +* Correctly write .annot files with duplicate lables (pr/763) (Richard Nemec + with CM) + +Maintenance +----------- +* Fix typo in coordinate systems doc (pr/751) (Egor Panfilov, reviewed by + CM) +* Replace invalid MINC1 test file with fixed file (pr/754) (Dave Allured + with CM) +* Update Sphinx config to support recent Sphinx/numpydoc (pr/749) (CM, + reviewed by PM) +* Pacify ``FutureWarning`` and ``DeprecationWarning`` from h5py, numpy + (pr/760) (CM) +* Accommodate Python 3.8 deprecation of collections.MutableMapping + (pr/762) (Jath Palasubramaniam, reviewed by CM) + +API changes and deprecations +---------------------------- +* Deprecate ``keep_file_open == 'auto'`` (pr/761) (CM, reviewed by PM) + +2.4.0 (Monday 1 April 2019) +============================ + +New features +------------ +* Alternative ``Axis``-based interface for manipulating CIFTI-2 headers + (pr/641) (Michiel Cottaar, reviewed by Demian Wassermann, CM, SG) + +Enhancements +------------ +* Accept TCK files produced by tools with other delimiter/EOF defaults + (pr/720) (Soichi Hayashi, reviewed by CM, MB, MC) +* Allow BrainModels or Parcels to contain a single vertex in CIFTI + (pr/739) (Michiel Cottaar, reviewed by CM) +* Support for ``NIFTI_XFORM_TEMPLATE_OTHER`` xform code (pr/743) (CM) + +Bug fixes +--------- +* Skip refcheck in ArraySequence construction/extension (pr/719) (Ariel + Rokem, reviewed by CM, MC) +* Use safe resizing for ArraySequence extension (pr/724) (CM, reviewed + by MC) +* Fix typo in error message (pr/726) (Jon Haitz Legarreta Gorroño, + reviewed by CM) +* Support DICOM slice sorting in Python 3 (pr/728) (Samir Reddigari, + reviewed by CM) +* Correctly reorient dim_info when reorienting NIfTI images + (Konstantinos Raktivan, CM, reviewed by CM) + +Maintenance +----------- +* Import updates to reduce upstream deprecation warnings (pr/711, + pr/705, pr/738) (EL, YOH, reviewed by CM) +* Delay import of ``nibabel.testing``, ``nose`` and ``mock`` to speed up + import (pr/699) (CM) +* Increase coverage testing, drop coveralls (pr/722, pr/732) (CM) +* Add Zenodo metadata, sorted by commits (pr/732) (CM + others) +* Update author listing and copyrights (pr/742) (MB, reviewed by CM) + 2.3.3 (Wednesday 16 January 2019) ================================= @@ -116,16 +356,16 @@ Enhancements * Simplfiy MGHImage and add footer fields (pr/569) (CM, reviewed by MB) * Force sform/qform codes to be ints, rather than numpy types (pr/575) (Paul McCarthy, reviewed by MB, CM) -* Auto-fill color table in FreeSurfer annotation file (pr/592) (Paul McCarthy, +* Auto-fill color table in FreeSurfer annotation file (pr/592) (PM, reviewed by CM, MB) * Set default intent code for CIFTI2 images (pr/604) (Mathias Goncalves, - reviewed by CM, Satra Ghosh, MB, Tim Coalson) + reviewed by CM, SG, MB, Tim Coalson) * Raise informative error on empty files (pr/611) (Pradeep Raamana, reviewed by CM, MB) * Accept degenerate filenames such as ``.nii`` (pr/621) (Dimitri Papadopoulos-Orfanos, reviewed by Yaroslav Halchenko) * Take advantage of ``IndexedGzipFile`` ``drop_handles`` flag to release - filehandles by default (pr/614) (Paul McCarthy, reviewed by CM, MB) + filehandles by default (pr/614) (PM, reviewed by CM, MB) Bug fixes --------- @@ -135,7 +375,7 @@ Bug fixes CM, MB) * Accept lower-case orientation codes in TRK files (pr/600) (Kesshi Jordan, MB, reviewed by MB, MC, CM) -* Annotation file reading (pr/592) (Paul McCarthy, reviewed by CM, MB) +* Annotation file reading (pr/592) (PM, reviewed by CM, MB) * Fix buffer size calculation in ArraySequence (pr/597) (Serge Koudoro, reviewed by MC, MB, Eleftherios Garyfallidis, CM) * Resolve ``UnboundLocalError`` in Python 3 (pr/607) (Jakub Kaczmarzyk, @@ -175,14 +415,14 @@ Bug fixes * Set L/R labels in orthoview correctly (pr/564) (CM) * Defer use of ufunc / memmap test - allows "freezing" (pr/572) (MB, reviewed - by Satra Ghosh) + by SG) * Fix doctest failures with pre-release numpy (pr/582) (MB, reviewed by CM) Maintenance ----------- -* Update documentation around NIfTI qform/sform codes (pr/576) (Paul McCarthy, - reviewed by MB, CM) + (pr/580) (Bennet Fauber, reviewed by Paul McCarthy) +* Update documentation around NIfTI qform/sform codes (pr/576) (PM, + reviewed by MB, CM) + (pr/580) (Bennet Fauber, reviewed by PM) * Skip precision test on macOS, newer numpy (pr/583) (MB, reviewed by CM) * Simplify AppVeyor script, removing conda (pr/584) (MB, reviewed by CM) @@ -192,12 +432,11 @@ Maintenance New features ------------ -* CIFTI support (pr/249) (Satra Ghosh, Michiel Cottaar, BC, CM, Demian - Wassermann, MB) +* CIFTI support (pr/249) (SG, Michiel Cottaar, BC, CM, Demian Wassermann, MB) * Support for MRtrix TCK streamlines file format (pr/486) (MC, reviewed by MB, Arnaud Bore, J-Donald Tournier, Jean-Christophe Houde) * Added ``get_fdata()`` as default method to retrieve scaled floating point - data from ``DataobjImage``s (pr/551) (MB, reviewed by CM, Satra Ghosh) + data from ``DataobjImage``\s (pr/551) (MB, reviewed by CM, SG) Enhancements ------------ @@ -211,19 +450,19 @@ Enhancements * Allow dtype specifiers as fileslice input (pr/485) (MB) * Support "headerless" ArrayProxy specification, enabling memory-efficient ArrayProxy reshaping (pr/521) (CM) -* Allow unknown NIfTI intent codes, add FSL codes (pr/528) (Paul McCarthy) +* Allow unknown NIfTI intent codes, add FSL codes (pr/528) (PM) * Improve error handling for ``img.__getitem__`` (pr/533) (Ariel Rokem) * Delegate reorientation to SpatialImage classes (pr/544) (Mark Hymers, CM, reviewed by MB) * Enable using ``indexed_gzip`` to reduce memory usage when reading from - gzipped NIfTI and MGH files (pr/552) (Paul McCarthy, reviewed by MB, CM) + gzipped NIfTI and MGH files (pr/552) (PM, reviewed by MB, CM) Bug fixes --------- * Miscellaneous MINC reader fixes (pr/493) (Robert D. Vincent, reviewed by CM, MB) -* Fix corner case in ``wrapstruct.get`` (pr/516) (Paul McCarthy, reviewed by +* Fix corner case in ``wrapstruct.get`` (pr/516) (PM, reviewed by CM, MB) Maintenance @@ -524,7 +763,7 @@ Special thanks to Chris Burns, Jarrod Millman and Yaroslav Halchenko. * New feature release * Python 3.2 support -* Substantially enhanced gifti reading support (SG) +* Substantially enhanced gifti reading support (Stephan Gerhard) * Refactoring of trackvis read / write to allow reading and writing of voxel points and mm points in tracks. Deprecate use of negative voxel sizes; set voxel_order field in trackvis header. Thanks to Chris Filo diff --git a/MANIFEST.in b/MANIFEST.in index 11bf20b7c2..381cab34a5 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,11 +1,7 @@ -include AUTHOR COPYING Makefile* MANIFEST.in setup* README.* +include AUTHOR COPYING Makefile* MANIFEST.in setup* README.* pyproject.toml include Changelog TODO requirements.txt recursive-include doc * recursive-include bin * recursive-include tools * -# put this stuff back into setup.py (package_data) once I'm enlightened -# enough to accomplish this herculean task -recursive-include nibabel/tests/data * -recursive-include nibabel/nicom/tests/data * -recursive-include nibabel/gifti/tests/data * -include nibabel/COMMIT_INFO.txt +include versioneer.py +include nibabel/_version.py diff --git a/README.rst b/README.rst index fc3c3dd70f..1afdbc511a 100644 --- a/README.rst +++ b/README.rst @@ -18,14 +18,16 @@ Read / write access to some common neuroimaging file formats This package provides read +/- write access to some common medical and neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), -GIFTI_, NIfTI1_, NIfTI2_, MINC1_, MINC2_, MGH_ and ECAT_ as well as Philips -PAR/REC. We can read and write FreeSurfer_ geometry, annotation and -morphometry files. There is some very limited support for DICOM_. NiBabel is -the successor of PyNIfTI_. +GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, MGH_ and +ECAT_ as well as Philips PAR/REC. We can read and write FreeSurfer_ geometry, +annotation and morphometry files. There is some very limited support for +DICOM_. NiBabel is the successor of PyNIfTI_. .. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm +.. _AFNI BRIK/HEAD: https://afni.nimh.nih.gov/pub/dist/src/README.attributes .. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ .. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ +.. _CIFTI-2: https://www.nitrc.org/projects/cifti/ .. _MINC1: https://en.wikibooks.org/wiki/MINC/Reference/MINC1_File_Format_Reference .. _MINC2: diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 4b34c61447..0000000000 --- a/appveyor.yml +++ /dev/null @@ -1,41 +0,0 @@ -# vim ft=yaml -# CI on Windows via appveyor - -environment: - - matrix: - - PYTHON: C:\Python27 - - PYTHON: C:\Python27-x64 - - PYTHON: C:\Python34 - - PYTHON: C:\Python34-x64 - - PYTHON: C:\Python35 - - PYTHON: C:\Python35-x64 - - PYTHON: C:\Python36 - - PYTHON: C:\Python36-x64 - - PYTHON: C:\Python37 - - PYTHON: C:\Python37-x64 - -install: - # Prepend newly installed Python to the PATH of this build (this cannot be - # done from inside the powershell script as it would require to restart - # the parent CMD process). - - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% - - # Install the dependencies of the project. - - python -m pip install --upgrade pip setuptools wheel - - pip install numpy scipy matplotlib h5py pydicom - - pip install nose mock coverage codecov - - pip install . - - SET NIBABEL_DATA_DIR=%CD%\nibabel-data - -build: false # Not a C# project, build stuff at the test step instead. - -test_script: - # Change into an innocuous directory and find tests from installation - - mkdir for_testing - - cd for_testing - - cp ../.coveragerc . - - nosetests --with-doctest --with-coverage --cover-package nibabel nibabel - -after_test: - - codecov diff --git a/azure-pipelines.yml b/azure-pipelines.yml new file mode 100644 index 0000000000..b00c54209f --- /dev/null +++ b/azure-pipelines.yml @@ -0,0 +1,26 @@ +# Adapted from https://github.com/pandas-dev/pandas/blob/master/azure-pipelines.yml +jobs: +- template: .azure-pipelines/windows.yml + parameters: + name: Windows + vmImage: windows-2019 + matrix: + py35-x86: + py36-x86: + PYTHON_VERSION: '3.6' + PYTHON_ARCH: 'x86' + py36-x64: + PYTHON_VERSION: '3.6' + PYTHON_ARCH: 'x64' + py37-x86: + PYTHON_VERSION: '3.7' + PYTHON_ARCH: 'x86' + py37-x64: + PYTHON_VERSION: '3.7' + PYTHON_ARCH: 'x64' + py38-x86: + PYTHON_VERSION: '3.8' + PYTHON_ARCH: 'x86' + py38-x64: + PYTHON_VERSION: '3.8' + PYTHON_ARCH: 'x64' diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000000..0285fa4b06 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,2 @@ +fixes: + - "venv/Lib/site-packages/::" diff --git a/dev-requirements.txt b/dev-requirements.txt index f63af96cf4..69302061bc 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,4 +1,3 @@ # Requirements for running tests -r requirements.txt -nose -mock +pytest diff --git a/doc-requirements.txt b/doc-requirements.txt index 348e6cce9a..2036a0d6fb 100644 --- a/doc-requirements.txt +++ b/doc-requirements.txt @@ -1,7 +1,6 @@ # Requirements for building docs -r requirements.txt -# Sphinx >= 1.6 breaks the math_dollar extension -sphinx<=1.5.6 +sphinx numpydoc texext -matplotlib>=1.3 +matplotlib >=1.3.1 diff --git a/doc/source/_templates/reggie.html b/doc/source/_templates/reggie.html new file mode 100644 index 0000000000..835c2570d3 --- /dev/null +++ b/doc/source/_templates/reggie.html @@ -0,0 +1 @@ +

Reggie -- the one

diff --git a/doc/source/api.rst b/doc/source/api.rst index 1ae1bb416c..0f3cf1de26 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -23,6 +23,7 @@ File Formats analyze spm2analyze spm99analyze + cifti2 gifti freesurfer minc1 diff --git a/doc/source/conf.py b/doc/source/conf.py index 9a63ba7e32..d3e75237ab 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -21,6 +21,8 @@ import sys import os +from runpy import run_path +from configparser import ConfigParser # Check for external Sphinx extensions we depend on try: @@ -47,13 +49,16 @@ # -- General configuration ---------------------------------------------------- # We load the nibabel release info into a dict by explicit execution -rel = {} -with open(os.path.join('..', '..', 'nibabel', 'info.py'), 'r') as fobj: - exec(fobj.read(), rel) +rel = run_path(os.path.join('..', '..', 'nibabel', 'info.py')) # Write long description from info with open('_long_description.inc', 'wt') as fobj: - fobj.write(rel['LONG_DESCRIPTION']) + fobj.write(rel['long_description']) + +# Load metadata from setup.cfg +config = ConfigParser() +config.read(os.path.join('..', '..', 'setup.cfg')) +metadata = config['metadata'] # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. @@ -87,7 +92,7 @@ # General information about the project. project = u'NiBabel' -copyright = u'2006-2019, %(MAINTAINER)s <%(AUTHOR_EMAIL)s>' % rel +copyright = u'2006-2020, %(maintainer)s <%(author_email)s>' % metadata # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -201,7 +206,7 @@ #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -html_sidebars = {'index': 'indexsidebar.html'} +html_sidebars = {'index': ['localtoc.html', 'relations.html', 'sourcelink.html', 'indexsidebar.html', 'searchbox.html', 'reggie.html']} # Additional templates that should be rendered to pages, maps page names to # template names. diff --git a/doc/source/coordinate_systems.rst b/doc/source/coordinate_systems.rst index ffb24a2e78..9541dc6f82 100644 --- a/doc/source/coordinate_systems.rst +++ b/doc/source/coordinate_systems.rst @@ -23,8 +23,8 @@ their brain, a single EPI volume, and a structural scan. In general we never use the person's name in the image filenames, but we make an exception in this case: -* :download:`somones_epi.nii.gz `. -* :download:`somones_anatomy.nii.gz `. +* :download:`someones_epi.nii.gz `. +* :download:`someones_anatomy.nii.gz `. We can load up the EPI image to get the image data array: @@ -215,7 +215,7 @@ From scanner to subject If the subject is lying in the usual position for a brain scan, face up and head first in the scanner, then scanner-left/right is also the left-right -axis of the subject's head, scanner-floor/ceiling is the anterior-posterior +axis of the subject's head, scanner-floor/ceiling is the posterior-anterior axis of the head and scanner-bore is the inferior-superior axis of the head. Sometimes the subject is not lying in the standard position. For example, the @@ -231,14 +231,14 @@ position of the subject. The most common subject-centered scanner coordinate system in neuroimaging is called "scanner RAS" (right, anterior, superior). Here the scanner axes are reordered and flipped so that the first axis is the scanner axis that is closest to the left to right axis of the subject, the -second is the closest scanner axis to the anterior-posterior axis of the +second is the closest scanner axis to the posterior-anterior axis of the subject, and the third is the closest scanner axis to the inferior-superior axis of the subject. For example, if the subject was lying face to the right in the scanner, then the first (X) axis of the reference system would be scanner-floor/ceiling, but reversed so that positive values are towards the floor. This axis goes from left to right in the subject, with positive values to the right. The second (Y) axis would be scanner-left/right -(anterior-posterior in the subject), and the Z axis would be scanner-bore +(posterior-anterior in the subject), and the Z axis would be scanner-bore (inferior-superior). Naming reference spaces diff --git a/doc/source/devel/advanced_testing.rst b/doc/source/devel/advanced_testing.rst index 0dc365ea1d..77b6522cb1 100644 --- a/doc/source/devel/advanced_testing.rst +++ b/doc/source/devel/advanced_testing.rst @@ -25,7 +25,7 @@ Long-running tests Long-running tests are not enabled by default, and can be resource-intensive. To run these tests: * Set environment variable ``NIPY_EXTRA_TESTS=slow``; -* Run ``nosetests``. +* Run ``pytest nibabel``. Note that some tests may require a machine with >4GB of RAM. diff --git a/doc/source/devel/devguide.rst b/doc/source/devel/devguide.rst index 370f4c8d2a..5e155b75ec 100644 --- a/doc/source/devel/devguide.rst +++ b/doc/source/devel/devguide.rst @@ -28,7 +28,7 @@ Code Documentation All documentation should be written using Numpy documentation conventions: - https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt#docstring-standard + https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard Git Repository diff --git a/doc/source/devel/make_release.rst b/doc/source/devel/make_release.rst index 25db5210b7..6a09d280b2 100644 --- a/doc/source/devel/make_release.rst +++ b/doc/source/devel/make_release.rst @@ -79,7 +79,7 @@ Release checklist * Make sure all tests pass (from the nibabel root directory):: - nosetests --with-doctest nibabel + pytest --doctest-modules nibabel * Make sure you are set up to use the ``try_branch.py`` - see https://github.com/nipy/nibotmi/blob/master/install.rst#trying-a-set-of-changes-on-the-buildbots diff --git a/doc/source/dicom/dicom_mosaic.rst b/doc/source/dicom/dicom_mosaic.rst index cf597169d3..7e5a157a94 100644 --- a/doc/source/dicom/dicom_mosaic.rst +++ b/doc/source/dicom/dicom_mosaic.rst @@ -67,7 +67,7 @@ The first two values of $\mathbf{s}$ ($s_1, s_2$) are given by the ``PixelSpacing`` field. We get $s_3$ (the slice scaling value) from ``SpacingBetweenSlices``. -The :ref:`spm-dicom` code has a comment saying that mosaic DICOM imagqes +The :ref:`spm-dicom` code has a comment saying that mosaic DICOM images have an incorrect ``ImagePositionPatient`` field. The ``ImagePositionPatient`` field usually gives the $\mathbf{t}$ vector. The comments imply that Siemens has derived ``ImagePositionPatient`` diff --git a/doc/source/index.rst b/doc/source/index.rst index b768a57d71..c57cfcef4e 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -27,9 +27,9 @@ discussions, release procedure and more. Authors and Contributors ======================== -The main authors of NiBabel are `Matthew Brett`_, `Michael Hanke`_, `Ben -Cipollini`_, `Marc-Alexandre Côté`_, Chris Markiewicz, `Stephan Gerhard`_ and -`Eric Larson`_. The authors are grateful to the following people who have +Most work on NiBabel so far has been by `Matthew Brett`_, Chris Markiewicz, +`Michael Hanke`_, `Marc-Alexandre Côté`_, `Ben Cipollini`_, Paul McCarthy and +Chris Cheng. The authors are grateful to the following people who have contributed code and discussion (in rough order of appearance): * `Yaroslav O. Halchenko`_ @@ -92,6 +92,20 @@ contributed code and discussion (in rough order of appearance): * Igor Solovey * Jon Haitz Legarreta Gorroño * Katrin Leinweber +* Soichi Hayashi +* Samir Reddigari +* Konstantinos Raktivan +* Matt Cieslak +* Egor Panfilov +* Jath Palasubramaniam +* Henry Braun +* Oscar Esteban +* Cameron Riddell +* Hao-Ting Wang +* Dorota Jarecka +* Chris Gorgolewski +* Benjamin C Darwin +* Zvi Baratz License reprise =============== diff --git a/doc/source/installation.rst b/doc/source/installation.rst index ec942bd043..fe02bcdbf2 100644 --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -81,19 +81,16 @@ is for you. Requirements ------------ -.. check these against: - nibabel/info.py - requirements.txt - .travis.yml - -* Python_ 2.7, or >= 3.4 -* NumPy_ 1.7.1 or greater -* Six_ 1.3 or greater +.. check these against setup.cfg + +* Python_ 3.5.1 or greater +* NumPy_ 1.13 or greater +* Packaging_ 14.3 or greater * SciPy_ (optional, for full SPM-ANALYZE support) +* h5py_ (optional, for MINC2 support) * PyDICOM_ 0.9.9 or greater (optional, for DICOM support) * `Python Imaging Library`_ (optional, for PNG conversion in DICOMFS) -* nose_ 0.11 or greater (optional, to run the tests) -* mock_ (optional, to run the tests) +* pytest_ (optional, to run the tests) * sphinx_ (optional, to build the documentation) Get the development sources @@ -131,7 +128,7 @@ module to see if everything is fine. It should look something like this:: >>> -To run the nibabel test suite, from the terminal run ``nosetests nibabel`` or +To run the nibabel test suite, from the terminal run ``pytest nibabel`` or ``python -c "import nibabel; nibabel.test()``. To run an extended test suite that validates ``nibabel`` for long-running and diff --git a/doc/source/links_names.txt b/doc/source/links_names.txt index 7082b812fd..1a1b688cd4 100644 --- a/doc/source/links_names.txt +++ b/doc/source/links_names.txt @@ -83,7 +83,7 @@ .. _emacs_python_mode: http://www.emacswiki.org/cgi-bin/wiki/PythonMode .. _doctest-mode: http://ed.loper.org/projects/doctestmode/ .. _nose: http://somethingaboutorange.com/mrl/projects/nose -.. _mock: https://github.com/testing-cabal/mock +.. _pytest: https://docs.pytest.org/ .. _`python coverage tester`: http://nedbatchelder.com/code/coverage/ .. _bitbucket: https://bitbucket.org .. _six: http://pythonhosted.org/six @@ -112,6 +112,8 @@ .. _twine: https://pypi.python.org/pypi/twine .. _datapkg: https://pythonhosted.org/datapkg/ .. _python imaging library: https://pypi.python.org/pypi/Pillow +.. _h5py: https://www.h5py.org/ +.. _packaging: https://packaging.pypa.io .. Python imaging projects .. _PyMVPA: http://www.pymvpa.org @@ -223,6 +225,7 @@ .. _`wikipedia shear matrix`: https://en.wikipedia.org/wiki/Shear_matrix .. _`wikipedia reflection`: https://en.wikipedia.org/wiki/Reflection_(mathematics) .. _`wikipedia direction cosine`: https://en.wikipedia.org/wiki/Direction_cosine +.. _`wikipedia aliasing`: https://en.wikipedia.org/wiki/Aliasing .. Programming ideas .. _proxy: https://en.wikipedia.org/wiki/Proxy_pattern diff --git a/doc/source/nibabel_images.rst b/doc/source/nibabel_images.rst index f14debcc93..2c62fea478 100644 --- a/doc/source/nibabel_images.rst +++ b/doc/source/nibabel_images.rst @@ -282,6 +282,80 @@ True See :doc:`images_and_memory` for more details on managing image memory and controlling the image cache. +.. _image-slicing: + +Image slicing +============= + +At times it is useful to manipulate an image's shape while keeping it in the +same coordinate system. +The ``slicer`` attribute provides an array-slicing interface to produce new +images with an appropriately adjusted header, such that the data at a given +RAS+ location is unchanged. + +>>> cropped_img = img.slicer[32:-32, ...] +>>> cropped_img.shape +(64, 96, 24, 2) + +The data is identical to cropping the data block directly: + +>>> np.array_equal(cropped_img.get_fdata(), img.get_fdata()[32:-32, ...]) +True + +However, unused data did not need to be loaded into memory or scaled. +Additionally, the image affine was adjusted so that the X-translation is +32 voxels (64mm) less: + +>>> cropped_img.affine +array([[ -2. , 0. , 0. , 53.86], + [ -0. , 1.97, -0.36, -35.72], + [ 0. , 0.32, 2.17, -7.25], + [ 0. , 0. , 0. , 1. ]]) + +>>> img.affine - cropped_img.affine +array([[ 0., 0., 0., 64.], + [ 0., 0., 0., 0.], + [ 0., 0., 0., 0.], + [ 0., 0., 0., 0.]]) + +Another use for the slicer object is to choose specific volumes from a +time series: + +>>> vol0 = img.slicer[..., 0] +>>> vol0.shape +(128, 96, 24) + +Or a selection of volumes: + +>>> img.slicer[..., :1].shape +(128, 96, 24, 1) +>>> img.slicer[..., :2].shape +(128, 96, 24, 2) + +It is also possible to use an integer step when slicing, downsampling +the image without filtering. +Note that this *will induce artifacts* in the frequency spectrum +(`aliasing `_) along any axis that is down-sampled. + +>>> downsampled = vol0.slicer[::2, ::2, ::2] +>>> downsampled.header.get_zooms() +(4.0, 4.0, 4.399998) + +Finally, an image can be flipped along an axis, maintaining an appropriate +affine matrix: + +>>> nib.orientations.aff2axcodes(img.affine) +('L', 'A', 'S') +>>> ras = img.slicer[::-1] +>>> nib.orientations.aff2axcodes(ras.affine) +('R', 'A', 'S') +>>> ras.affine +array([[ 2. , 0. , 0. , 117.86], + [ 0. , 1.97, -0.36, -35.72], + [ -0. , 0.32, 2.17, -7.25], + [ 0. , 0. , 0. , 1. ]]) + + ****************** Loading and saving ****************** diff --git a/doc/source/scripts/make_coord_examples.py b/doc/source/scripts/make_coord_examples.py index 9079cea141..f763b28c28 100644 --- a/doc/source/scripts/make_coord_examples.py +++ b/doc/source/scripts/make_coord_examples.py @@ -15,7 +15,6 @@ * someones_epi.nii.gz (pretend single EPI volume) * someones_anatomy.nii.gz (pretend single subject structural) """ -from __future__ import division, print_function import math diff --git a/doc/tools/build_modref_templates.py b/doc/tools/build_modref_templates.py index 53a8be6ec4..da752b6c42 100755 --- a/doc/tools/build_modref_templates.py +++ b/doc/tools/build_modref_templates.py @@ -1,11 +1,11 @@ #!/usr/bin/env python """Script to auto-generate our API docs. """ -from __future__ import print_function, division # stdlib imports import sys import re +import os from os.path import join as pjoin # local imports @@ -49,12 +49,25 @@ def abort(error): installed_version = V(module.__version__) - info_file = pjoin('..', package, 'info.py') - info_lines = open(info_file).readlines() - source_version = '.'.join([v.split('=')[1].strip(" '\n.") - for v in info_lines if re.match( - '^_version_(major|minor|micro|extra)', v - )]) + version_file = pjoin('..', package, '_version.py') + source_version = None + if os.path.exists(version_file): + # Versioneer + from runpy import run_path + try: + source_version = run_path(version_file)['get_versions']()['version'] + except (FileNotFoundError, KeyError): + pass + if source_version == '0+unknown': + source_version = None + if source_version is None: + # Legacy fall-back + info_file = pjoin('..', package, 'info.py') + info_lines = open(info_file).readlines() + source_version = '.'.join([v.split('=')[1].strip(" '\n.") + for v in info_lines if re.match( + '^_version_(major|minor|micro|extra)', v + )]) print('***', source_version) if source_version != installed_version: @@ -69,6 +82,7 @@ def abort(error): r'.*test.*$', r'\.info.*$', r'\.pkg_info.*$', + r'\.py3k.*$', ] docwriter.write_api_docs(outdir) docwriter.write_index(outdir, 'index', relative_to=outdir) diff --git a/min-requirements.txt b/min-requirements.txt new file mode 100644 index 0000000000..0d749072bf --- /dev/null +++ b/min-requirements.txt @@ -0,0 +1,3 @@ +# Auto-generated by tools/update_requirements.py +numpy ==1.13 +packaging ==14.3 diff --git a/nibabel-data/nipy-ecattest b/nibabel-data/nipy-ecattest index 12c9ee6d18..9a0a592057 160000 --- a/nibabel-data/nipy-ecattest +++ b/nibabel-data/nipy-ecattest @@ -1 +1 @@ -Subproject commit 12c9ee6d18d50235e3453897a4be60c19bf126c0 +Subproject commit 9a0a592057bc16894c20c77b03ea1ebb5f8ca8f9 diff --git a/nibabel-data/nitest-dicom b/nibabel-data/nitest-dicom new file mode 160000 index 0000000000..2246c92726 --- /dev/null +++ b/nibabel-data/nitest-dicom @@ -0,0 +1 @@ +Subproject commit 2246c9272658693c02810836bdf820c1c6607624 diff --git a/nibabel/COMMIT_INFO.txt b/nibabel/COMMIT_INFO.txt deleted file mode 100644 index dcaee0b8ed..0000000000 --- a/nibabel/COMMIT_INFO.txt +++ /dev/null @@ -1,6 +0,0 @@ -# This is an ini file that may contain information about the code state -[commit hash] -# The line below may contain a valid hash if it has been substituted during 'git archive' -archive_subst_hash=$Format:%h$ -# This line may be modified by the install process -install_hash= diff --git a/nibabel/__init__.py b/nibabel/__init__.py index fca22ccc99..f99e9e0b06 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -9,7 +9,8 @@ import os -from .info import __version__, long_description as __doc__ +from .pkg_info import __version__ +from .info import long_description as __doc__ __doc__ += """ Quickstart ========== @@ -22,7 +23,7 @@ img2 = nib.load('other_file.nii.gz') img3 = nib.load('spm_file.img') - data = img1.get_data() + data = img1.get_fdata() affine = img1.affine print(img1) @@ -35,18 +36,6 @@ For more detailed information see the :ref:`manual`. """ - -def setup_test(): - """ Set numpy print options to "legacy" for new versions of numpy - - If imported into a file, nosetest will run this before any doctests. - """ - import numpy - from distutils.version import LooseVersion - if LooseVersion(numpy.__version__) >= LooseVersion('1.14'): - numpy.set_printoptions(legacy="1.13") - - # module imports from . import analyze as ana from . import spm99analyze as spm99 @@ -65,9 +54,7 @@ def setup_test(): from .minc1 import Minc1Image from .minc2 import Minc2Image from .cifti2 import Cifti2Header, Cifti2Image -# Deprecated backwards compatiblity for MINC1 -from .deprecated import ModuleProxy as _ModuleProxy -minc = _ModuleProxy('nibabel.minc') +from .gifti import GiftiImage from .minc1 import MincImage from .freesurfer import MGHImage from .funcs import (squeeze_image, concat_images, four_to_three, @@ -76,26 +63,107 @@ def setup_test(): flip_axis, OrientationError, apply_orientation, aff2axcodes) from .imageclasses import class_map, ext_map, all_image_classes -from . import trackvis +from .deprecated import ModuleProxy as _ModuleProxy +trackvis = _ModuleProxy('nibabel.trackvis') from . import mriutils from . import streamlines from . import viewers -import pkgutil - -if not pkgutil.find_loader('mock'): - def test(*args, **kwargs): - raise RuntimeError('Need "mock" package for tests') -else: - from numpy.testing import Tester - test = Tester().test - bench = Tester().bench - del Tester - -del pkgutil - from .pkg_info import get_pkg_info as _get_pkg_info def get_info(): return _get_pkg_info(os.path.dirname(__file__)) + + +def test(label=None, verbose=1, extra_argv=None, + doctests=False, coverage=False, raise_warnings=None, + timer=False): + """ + Run tests for nibabel using pytest + + The protocol mimics the ``numpy.testing.NoseTester.test()``. + Not all features are currently implemented. + + Parameters + ---------- + label : None + Unused. + verbose: int, optional + Verbosity value for test outputs. Positive values increase verbosity, and + negative values decrease it. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytest. + doctests: bool, optional + If True, run doctests in module. Default is False. + coverage: bool, optional + If True, report coverage of NumPy code. Default is False. + (This requires the + `coverage module `_). + raise_warnings : None + Unused. + timer : False + Unused. + + Returns + ------- + code : ExitCode + Returns the result of running the tests as a ``pytest.ExitCode`` enum + """ + import pytest + args = [] + + if label is not None: + raise NotImplementedError("Labels cannot be set at present") + + verbose = int(verbose) + if verbose > 0: + args.append("-" + "v" * verbose) + elif verbose < 0: + args.append("-" + "q" * -verbose) + + if extra_argv: + args.extend(extra_argv) + if doctests: + args.append("--doctest-modules") + if coverage: + args.extend(["--cov", "nibabel"]) + if raise_warnings is not None: + raise NotImplementedError("Warning filters are not implemented") + if timer: + raise NotImplementedError("Timing is not implemented") + + args.extend(["--pyargs", "nibabel"]) + + return pytest.main(args=args) + + +def bench(label=None, verbose=1, extra_argv=None): + """ + Run benchmarks for nibabel using pytest + + The protocol mimics the ``numpy.testing.NoseTester.bench()``. + Not all features are currently implemented. + + Parameters + ---------- + label : None + Unused. + verbose: int, optional + Verbosity value for test outputs. Positive values increase verbosity, and + negative values decrease it. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytest. + + Returns + ------- + code : ExitCode + Returns the result of running the tests as a ``pytest.ExitCode`` enum + """ + from pkg_resources import resource_filename + config = resource_filename("nibabel", "benchmarks/pytest.benchmark.ini") + args = [] + if extra_argv is not None: + args.extend(extra_argv) + args.extend(["-c", config]) + return test(label, verbose, extra_argv=args) diff --git a/nibabel/_h5py_compat.py b/nibabel/_h5py_compat.py new file mode 100644 index 0000000000..2c0b0eb2c0 --- /dev/null +++ b/nibabel/_h5py_compat.py @@ -0,0 +1,12 @@ +import sys +import os +from .optpkg import optional_package + +# PY35: A bug affected Windows installations of h5py in Python3 versions <3.6 +# due to random dictionary ordering, causing float64 data arrays to sometimes be +# loaded as longdouble (also 64 bit on Windows). This caused stochastic failures +# to correctly handle data caches, and possibly other subtle bugs we never +# caught. This was fixed in h5py 2.10. +# Please see https://github.com/nipy/nibabel/issues/665 for details. +min_h5py = '2.10' if os.name == 'nt' and (3,) <= sys.version_info < (3, 6) else None +h5py, have_h5py, setup_module = optional_package('h5py', min_version=min_h5py) diff --git a/nibabel/_version.py b/nibabel/_version.py new file mode 100644 index 0000000000..60031b4d17 --- /dev/null +++ b/nibabel/_version.py @@ -0,0 +1,534 @@ + +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. Generated by +# versioneer-0.18 (https://github.com/warner/python-versioneer) + +"""Git implementation of _version.py.""" + +import errno +import os +import re +import subprocess +import sys +import runpy + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "$Format:%d$" + git_full = "$Format:%H$" + git_date = "$Format:%ci$" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "" + cfg.parentdir_prefix = "" + cfg.versionfile_source = "nibabel/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, + env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + p = None + for c in commands: + try: + dispcmd = str([c] + args) + # remember shell=False, so use git.cmd on windows, not just git + p = subprocess.Popen([c] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None)) + break + except EnvironmentError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %s" % dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %s" % (commands,)) + return None, None + stdout = p.communicate()[0].strip() + if sys.version_info[0] >= 3: + stdout = stdout.decode() + if p.returncode != 0: + if verbose: + print("unable to run %s (error)" % dispcmd) + print("stdout was %s" % stdout) + return None, p.returncode + return stdout, p.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for i in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + else: + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + f = open(versionfile_abs, "r") + for line in f.readlines(): + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + f.close() + except EnvironmentError: + pass + # CJM: Nibabel hack to ensure we can git-archive off-release versions and + # revert to old X.Y.Zdev versions + githash + try: + rel = runpy.run_path(os.path.join(os.path.dirname(versionfile_abs), "info.py")) + keywords["fallback"] = rel["VERSION"] + except (FileNotFoundError, KeyError): + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + # CJM: Nibabel fix to avoid hitting unguarded dictionary lookup, better explanation + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = set([r.strip() for r in refnames.strip("()").split(",")]) + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = set([r for r in refs if re.search(r'\d', r)]) + if verbose: + print("discarding '%s', no digits" % ",".join(refs - tags)) + if verbose: + print("likely tags: %s" % ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + # CJM: Nibabel fix to filter out refs that exactly match prefix + # or that don't start with a number once the prefix is stripped + # (Mostly a concern when prefix is '') + if not re.match(r'\d', r): + continue + if verbose: + print("picking %s" % r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so inspect ./info.py + if verbose: + print("no suitable tags, falling back to info.VERSION or 0+unknown") + return {"version": keywords.get("fallback", "0+unknown"), + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=True) + if rc != 0: + if verbose: + print("Directory %s not under git control" % root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long", + "--match", "%s*" % tag_prefix], + cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], + cwd=root)[0].strip() + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces): + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + """TAG[.post.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post.devDISTANCE + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post.dev%d" % pieces["distance"] + else: + # exception #1 + rendered = "0.post.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Eexceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for i in cfg.versionfile_source.split('/'): + root = os.path.dirname(root) + except NameError: + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None} + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", "date": None} diff --git a/nibabel/affines.py b/nibabel/affines.py index 057233e454..c2b2a3b1d0 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -2,11 +2,9 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """ Utility routines for working with points and affine transforms """ - import numpy as np -from six.moves import reduce -from . import setup_test # noqa +from functools import reduce class AffineError(ValueError): @@ -297,3 +295,31 @@ def voxel_sizes(affine): """ top_left = affine[:-1, :-1] return np.sqrt(np.sum(top_left ** 2, axis=0)) + + +def obliquity(affine): + r""" + Estimate the *obliquity* an affine's axes represent. + + The term *obliquity* is defined here as the rotation of those axes with + respect to the cardinal axes. + This implementation is inspired by `AFNI's implementation + `_. + For further details about *obliquity*, check `AFNI's documentation + `_. + + Parameters + ---------- + affine : 2D array-like + Affine transformation array. Usually shape (4, 4), but can be any 2D + array. + + Returns + ------- + angles : 1D array-like + The *obliquity* of each axis with respect to the cardinal axes, in radians. + + """ + vs = voxel_sizes(affine) + best_cosines = np.abs(affine[:-1, :-1] / vs).max(axis=1) + return np.arccos(best_cosines) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index ae7c8f69e6..f14e2c2a22 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -95,7 +95,6 @@ from .fileholders import copy_file_map from .batteryrunners import Report from .arrayproxy import ArrayProxy -from .keywordonly import kw_only_meth # Sub-parts of standard analyze header from # Mayo dbh.h file @@ -933,9 +932,12 @@ def set_data_dtype(self, dtype): self._header.set_data_dtype(dtype) @classmethod - @kw_only_meth(1) - def from_file_map(klass, file_map, mmap=True, keep_file_open=None): - '''class method to create image from mapping in `file_map `` + def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): + ''' Class method to create image from mapping in ``file_map`` + + .. deprecated:: 2.4.1 + ``keep_file_open='auto'`` is redundant with `False` and has + been deprecated. It raises an error as of nibabel 3.0. Parameters ---------- @@ -950,18 +952,14 @@ def from_file_map(klass, file_map, mmap=True, keep_file_open=None): `mmap` value of True gives the same behavior as ``mmap='c'``. If image data file cannot be memory-mapped, ignore `mmap` value and read array from file. - keep_file_open : { None, 'auto', True, False }, optional, keyword only + keep_file_open : { None, True, False }, optional, keyword only `keep_file_open` controls whether a new file handle is created every time the image is accessed, or a single file handle is created and used for the lifetime of this ``ArrayProxy``. If ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behaviour is the same as if - ``keep_file_open is False``. If ``file_map`` refers to an open - file handle, this setting has no effect. The default value - (``None``) will result in the value of + a new file handle is created every time the image is accessed. + If ``file_map`` refers to an open file handle, this setting has no + effect. The default value (``None``) will result in the value of ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. Returns @@ -988,47 +986,6 @@ def from_file_map(klass, file_map, mmap=True, keep_file_open=None): 'file_map': copy_file_map(file_map)} return img - @classmethod - @kw_only_meth(1) - def from_filename(klass, filename, mmap=True, keep_file_open=None): - '''class method to create image from filename `filename` - - Parameters - ---------- - filename : str - Filename of image to load - mmap : {True, False, 'c', 'r'}, optional, keyword only - `mmap` controls the use of numpy memory mapping for reading image - array data. If False, do not try numpy ``memmap`` for data array. - If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A - `mmap` value of True gives the same behavior as ``mmap='c'``. If - image data file cannot be memory-mapped, ignore `mmap` value and - read array from file. - keep_file_open : { None, 'auto', True, False }, optional, keyword only - `keep_file_open` controls whether a new file handle is created - every time the image is accessed, or a single file handle is - created and used for the lifetime of this ``ArrayProxy``. If - ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behaviour is the same as if - ``keep_file_open is False``. The default value (``None``) will - result in the value of - ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. - - Returns - ------- - img : Analyze Image instance - ''' - if mmap not in (True, False, 'c', 'r'): - raise ValueError("mmap should be one of {True, False, 'c', 'r'}") - file_map = klass.filespec_to_file_map(filename) - return klass.from_file_map(file_map, mmap=mmap, - keep_file_open=keep_file_open) - - load = from_filename - @staticmethod def _get_fileholders(file_map): """ Return fileholder for header and image @@ -1050,7 +1007,7 @@ def to_file_map(self, file_map=None): ''' if file_map is None: file_map = self.file_map - data = self.get_data() + data = np.asanyarray(self.dataobj) self.update_header() hdr = self._header out_dtype = self.get_data_dtype() diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index c74386b0ac..b45405b6fb 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -32,31 +32,27 @@ from .deprecated import deprecate_with_version from .volumeutils import array_from_file, apply_read_scaling -from .fileslice import fileslice -from .keywordonly import kw_only_meth +from .fileslice import fileslice, canonical_slicers from . import openers """This flag controls whether a new file handle is created every time an image is accessed through an ``ArrayProxy``, or a single file handle is created and used for the lifetime of the ``ArrayProxy``. It should be set to one of -``True``, ``False``, or ``'auto'``. +``True`` or ``False``. Management of file handles will be performed either by ``ArrayProxy`` objects, or by the ``indexed_gzip`` package if it is used. If this flag is set to ``True``, a single file handle is created and used. If -``False``, a new file handle is created every time the image is accessed. For -gzip files, if ``'auto'``, and the optional ``indexed_gzip`` dependency is -present, a single file handle is created and persisted. If ``indexed_gzip`` is -not available, behaviour is the same as if ``keep_file_open is False``. +``False``, a new file handle is created every time the image is accessed. If this is set to any other value, attempts to create an ``ArrayProxy`` without specifying the ``keep_file_open`` flag will result in a ``ValueError`` being raised. -.. warning:: Setting this flag to a value of ``'auto'`` will become deprecated - behaviour in version 2.4.0. Support for ``'auto'`` will be removed +.. warning:: Setting this flag to a value of ``'auto'`` became deprecated + behaviour in version 2.4.1. Support for ``'auto'`` was removed in version 3.0.0. """ KEEP_FILE_OPEN_DEFAULT = False @@ -96,10 +92,13 @@ class ArrayProxy(object): order = 'F' _header = None - @kw_only_meth(2) - def __init__(self, file_like, spec, mmap=True, keep_file_open=None): + def __init__(self, file_like, spec, *, mmap=True, keep_file_open=None): """Initialize array proxy instance + .. deprecated:: 2.4.1 + ``keep_file_open='auto'`` is redundant with `False` and has + been deprecated. It raises an error as of nibabel 3.0. + Parameters ---------- file_like : object @@ -127,18 +126,15 @@ def __init__(self, file_like, spec, mmap=True, keep_file_open=None): True gives the same behavior as ``mmap='c'``. If `file_like` cannot be memory-mapped, ignore `mmap` value and read array from file. - keep_file_open : { None, 'auto', True, False }, optional, keyword only + keep_file_open : { None, True, False }, optional, keyword only `keep_file_open` controls whether a new file handle is created every time the image is accessed, or a single file handle is created and used for the lifetime of this ``ArrayProxy``. If ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behaviour is the same as if - ``keep_file_open is False``. If ``file_like`` is an open file - handle, this setting has no effect. The default value (``None``) - will result in the value of ``KEEP_FILE_OPEN_DEFAULT`` being used. + a new file handle is created every time the image is accessed. + If ``file_like`` is an open file handle, this setting has no + effect. The default value (``None``) will result in the value of + ``KEEP_FILE_OPEN_DEFAULT`` being used. """ if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") @@ -236,24 +232,16 @@ def _should_keep_file_open(self, file_like, keep_file_open): In this case, file handle management is delegated to the ``indexed_gzip`` library. - 5. If ``keep_file_open`` is ``'auto'``, ``file_like`` is a path to a - ``.gz`` file, and ``indexed_gzip`` is present, both internal flags - are set to ``True``. - - 6. If ``keep_file_open`` is ``'auto'``, and ``file_like`` is not a - path to a ``.gz`` file, or ``indexed_gzip`` is not present, both - internal flags are set to ``False``. - - Note that a value of ``'auto'`` for ``keep_file_open`` will become - deprecated behaviour in version 2.4.0, and support for ``'auto'`` will - be removed in version 3.0.0. + .. deprecated:: 2.4.1 + ``keep_file_open='auto'`` is redundant with `False` and has + been deprecated. It raises an error as of nibabel 3.0. Parameters ---------- file_like : object File-like object or filename, as passed to ``__init__``. - keep_file_open : { 'auto', True, False } + keep_file_open : { True, False } Flag as passed to ``__init__``. Returns @@ -266,20 +254,20 @@ def _should_keep_file_open(self, file_like, keep_file_open): """ if keep_file_open is None: keep_file_open = KEEP_FILE_OPEN_DEFAULT - if keep_file_open not in ('auto', True, False): - raise ValueError('keep_file_open should be one of {None, ' - '\'auto\', True, False}') + if keep_file_open not in (True, False): + raise ValueError("nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT must be boolean. " + "Found: {}".format(keep_file_open)) + elif keep_file_open not in (True, False): + raise ValueError('keep_file_open must be one of {None, True, False}') + # file_like is a handle - keep_file_open is irrelevant if hasattr(file_like, 'read') and hasattr(file_like, 'seek'): return False, False # if the file is a gzip file, and we have_indexed_gzip, have_igzip = openers.HAVE_INDEXED_GZIP and file_like.endswith('.gz') - if keep_file_open == 'auto': - return have_igzip, have_igzip - elif keep_file_open: - return True, True - else: - return False, have_igzip + + persist_opener = keep_file_open or have_igzip + return keep_file_open, persist_opener @property @deprecate_with_version('ArrayProxy.header deprecated', '2.2', '3.0') @@ -337,36 +325,76 @@ def _get_fileobj(self): self.file_like, keep_open=False) as opener: yield opener - def get_unscaled(self): - """ Read of data from file - - This is an optional part of the proxy API - """ - with self._get_fileobj() as fileobj, self._lock: - raw_data = array_from_file(self._shape, + def _get_unscaled(self, slicer): + if canonical_slicers(slicer, self._shape, False) == \ + canonical_slicers((), self._shape, False): + with self._get_fileobj() as fileobj, self._lock: + return array_from_file(self._shape, self._dtype, fileobj, offset=self._offset, order=self.order, mmap=self._mmap) - return raw_data + with self._get_fileobj() as fileobj: + return fileslice(fileobj, + slicer, + self._shape, + self._dtype, + self._offset, + order=self.order, + lock=self._lock) + + def _get_scaled(self, dtype, slicer): + # Ensure scale factors have dtypes + scl_slope = np.asanyarray(self._slope) + scl_inter = np.asanyarray(self._inter) + use_dtype = scl_slope.dtype if dtype is None else dtype + + if np.can_cast(scl_slope, use_dtype): + scl_slope = scl_slope.astype(use_dtype) + if np.can_cast(scl_inter, use_dtype): + scl_inter = scl_inter.astype(use_dtype) + # Read array and upcast as necessary for big slopes, intercepts + scaled = apply_read_scaling(self._get_unscaled(slicer=slicer), scl_slope, scl_inter) + if dtype is not None: + scaled = scaled.astype(np.promote_types(scaled.dtype, dtype), copy=False) + return scaled + + def get_unscaled(self): + """ Read data from file - def __array__(self): - # Read array and scale - raw_data = self.get_unscaled() - return apply_read_scaling(raw_data, self._slope, self._inter) + This is an optional part of the proxy API + """ + return self._get_unscaled(slicer=()) + + def __array__(self, dtype=None): + """ Read data from file and apply scaling, casting to ``dtype`` + + If ``dtype`` is unspecified, the dtype of the returned array is the + narrowest dtype that can represent the data without overflow. + Generally, it is the wider of the dtypes of the slopes or intercepts. + + The types of the scale factors will generally be determined by the + parameter size in the image header, and so should be consistent for a + given image format, but may vary across formats. + + Parameters + ---------- + dtype : numpy dtype specifier, optional + A numpy dtype specifier specifying the type of the returned array. + + Returns + ------- + array + Scaled image data with type `dtype`. + """ + arr = self._get_scaled(dtype=dtype, slicer=()) + if dtype is not None: + arr = arr.astype(dtype, copy=False) + return arr def __getitem__(self, slicer): - with self._get_fileobj() as fileobj: - raw_data = fileslice(fileobj, - slicer, - self._shape, - self._dtype, - self._offset, - order=self.order, - lock=self._lock) - # Upcast as necessary for big slopes, intercepts - return apply_read_scaling(raw_data, self._slope, self._inter) + return self._get_scaled(dtype=None, slicer=slicer) def reshape(self, shape): """ Return an ArrayProxy with a new shape, without modifying data """ diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 2bd29e4ca4..c5c0efb706 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -28,7 +28,6 @@ def __init__(self, array, out_dtype=None) something else to make sense of conversions between float and int, or between larger ints and smaller. """ -from __future__ import division, absolute_import import warnings @@ -50,7 +49,7 @@ class ScalingError(WriterError): class ArrayWriter(object): def __init__(self, array, out_dtype=None, **kwargs): - """ Initialize array writer + r""" Initialize array writer Parameters ---------- @@ -247,7 +246,7 @@ class SlopeArrayWriter(ArrayWriter): def __init__(self, array, out_dtype=None, calc_scale=True, scaler_dtype=np.float32, **kwargs): - """ Initialize array writer + r""" Initialize array writer Parameters ---------- @@ -478,7 +477,7 @@ class SlopeInterArrayWriter(SlopeArrayWriter): def __init__(self, array, out_dtype=None, calc_scale=True, scaler_dtype=np.float32, **kwargs): - """ Initialize array writer + r""" Initialize array writer Parameters ---------- @@ -751,7 +750,7 @@ def get_slope_inter(writer): def make_array_writer(data, out_type, has_slope=True, has_intercept=True, **kwargs): - """ Make array writer instance for array `data` and output type `out_type` + r""" Make array writer instance for array `data` and output type `out_type` Parameters ---------- diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index be3977111a..b77c8b8858 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -141,7 +141,7 @@ def check_only(self, obj): ------- reports : sequence sequence of report objects reporting on result of running - checks (withou fixes) on `obj` + checks (without fixes) on `obj` ''' reports = [] for check in self._checks: diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index 36921a106a..ee0d25044d 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -5,15 +5,10 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_load_save.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_array_to_file.py """ -from __future__ import division, print_function import sys from io import BytesIO # NOQA diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index c880aa0700..2ed9ec9ccd 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -5,20 +5,16 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_arrayproxy_slicing.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_arrayproxy_slicing.py """ from timeit import timeit import gc import itertools as it import numpy as np -import mock +from unittest import mock import nibabel as nib from nibabel.tmpdirs import InTemporaryDirectory diff --git a/nibabel/benchmarks/bench_fileslice.py b/nibabel/benchmarks/bench_fileslice.py index b9568c65a0..8763784dc6 100644 --- a/nibabel/benchmarks/bench_fileslice.py +++ b/nibabel/benchmarks/bench_fileslice.py @@ -3,15 +3,10 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_fileslice.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_fileslice.py """ -from __future__ import division, print_function import sys from timeit import timeit diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index 5f268eb285..1ca2bf95d0 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -5,15 +5,10 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_finite_range + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_finite_range.py """ -from __future__ import division, print_function import sys diff --git a/nibabel/benchmarks/bench_load_save.py b/nibabel/benchmarks/bench_load_save.py index c2ee68578a..46118df43e 100644 --- a/nibabel/benchmarks/bench_load_save.py +++ b/nibabel/benchmarks/bench_load_save.py @@ -5,15 +5,10 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also -run the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_load_save.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_load_save.py """ -from __future__ import division, print_function import sys diff --git a/nibabel/benchmarks/bench_streamlines.py b/nibabel/benchmarks/bench_streamlines.py index c076657d27..5c49c9e177 100644 --- a/nibabel/benchmarks/bench_streamlines.py +++ b/nibabel/benchmarks/bench_streamlines.py @@ -5,19 +5,13 @@ import nibabel as nib nib.bench() -If you have doctests enabled by default in nose (with a noserc file or -environment variable), and you have a numpy version <= 1.6.1, this will also run -the doctests, let's hope they pass. +Run this benchmark with:: -Run this benchmark with: - - nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench' /path/to/bench_streamlines.py + pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_streamlines.py """ -from __future__ import division, print_function import numpy as np -from six.moves import zip from nibabel.tmpdirs import InTemporaryDirectory from numpy.testing import assert_array_equal diff --git a/nibabel/benchmarks/butils.py b/nibabel/benchmarks/butils.py index 36e42f270d..bea5872272 100644 --- a/nibabel/benchmarks/butils.py +++ b/nibabel/benchmarks/butils.py @@ -1,6 +1,5 @@ """ Benchmarking utilities """ -from __future__ import print_function, division from .. import get_info diff --git a/nibabel/benchmarks/pytest.benchmark.ini b/nibabel/benchmarks/pytest.benchmark.ini new file mode 100644 index 0000000000..734e6c7d4c --- /dev/null +++ b/nibabel/benchmarks/pytest.benchmark.ini @@ -0,0 +1,4 @@ +[pytest] +python_files = bench_*.py +python_functions = bench_* +addopts = --capture=no diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 9e521e61b6..2afd5b2c89 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -27,18 +27,15 @@ am aware) always be >= 1. This permits sub-brick indexing common in AFNI programs (e.g., example4d+orig'[0]'). """ -from __future__ import print_function, division from copy import deepcopy import os import re import numpy as np -from six import string_types from .arrayproxy import ArrayProxy from .fileslice import strided_scalar -from .keywordonly import kw_only_meth from .spatialimages import ( SpatialImage, SpatialHeader, @@ -85,8 +82,8 @@ class AFNIHeaderError(HeaderDataError): DATA_OFFSET = 0 -TYPE_RE = re.compile('type\s*=\s*(string|integer|float)-attribute\s*\n') -NAME_RE = re.compile('name\s*=\s*(\w+)\s*\n') +TYPE_RE = re.compile(r'type\s*=\s*(string|integer|float)-attribute\s*\n') +NAME_RE = re.compile(r'name\s*=\s*(\w+)\s*\n') def _unpack_var(var): @@ -204,7 +201,7 @@ def parse_AFNI_header(fobj): [1, 1, 1] """ # edge case for being fed a filename instead of a file object - if isinstance(fobj, string_types): + if isinstance(fobj, str): with open(fobj, 'rt') as src: return parse_AFNI_header(src) # unpack variables in HEAD file @@ -222,11 +219,14 @@ class AFNIArrayProxy(ArrayProxy): None """ - @kw_only_meth(2) - def __init__(self, file_like, header, mmap=True, keep_file_open=None): + def __init__(self, file_like, header, *, mmap=True, keep_file_open=None): """ Initialize AFNI array proxy + .. deprecated:: 2.4.1 + ``keep_file_open='auto'`` is redundant with `False` and has + been deprecated. It raises an error as of nibabel 3.0. + Parameters ---------- file_like : file-like object @@ -240,19 +240,15 @@ def __init__(self, file_like, header, mmap=True, keep_file_open=None): True gives the same behavior as ``mmap='c'``. If `file_like` cannot be memory-mapped, ignore `mmap` value and read array from file. - keep_file_open : { None, 'auto', True, False }, optional, keyword only + keep_file_open : { None, True, False }, optional, keyword only `keep_file_open` controls whether a new file handle is created every time the image is accessed, or a single file handle is created and used for the lifetime of this ``ArrayProxy``. If ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behavior is the same as if - ``keep_file_open is False``. If ``file_like`` refers to an open - file handle, this setting has no effect. The default value - (``None``) will result in the value of - ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT` being used. + a new file handle is created every time the image is accessed. + If ``file_like`` refers to an open file handle, this setting has no + effect. The default value (``None``) will result in the value of + ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. """ super(AFNIArrayProxy, self).__init__(file_like, header, @@ -264,19 +260,24 @@ def __init__(self, file_like, header, mmap=True, keep_file_open=None): def scaling(self): return self._scaling - def __array__(self): - raw_data = self.get_unscaled() - # datatype may change if applying self._scaling - return raw_data if self.scaling is None else raw_data * self.scaling + def _get_scaled(self, dtype, slicer): + raw_data = self._get_unscaled(slicer=slicer) + if self.scaling is None: + if dtype is None: + return raw_data + final_type = np.promote_types(raw_data.dtype, dtype) + return raw_data.astype(final_type, copy=False) + + # Broadcast scaling to shape of original data + fake_data = strided_scalar(self._shape) + _, scaling = np.broadcast_arrays(fake_data, self.scaling) + + final_type = np.result_type(raw_data, scaling) + if dtype is not None: + final_type = np.promote_types(final_type, dtype) - def __getitem__(self, slicer): - raw_data = super(AFNIArrayProxy, self).__getitem__(slicer) - # apply volume specific scaling (may change datatype!) - if self.scaling is not None: - fake_data = strided_scalar(self._shape) - _, scaling = np.broadcast_arrays(fake_data, self.scaling) - raw_data = raw_data * scaling[slicer] - return raw_data + # Slice scaling to give output shape + return raw_data * scaling[slicer].astype(final_type) class AFNIHeader(SpatialHeader): @@ -295,8 +296,8 @@ def __init__(self, info): -------- >>> fname = os.path.join(datadir, 'example4d+orig.HEAD') >>> header = AFNIHeader(parse_AFNI_header(fname)) - >>> header.get_data_dtype() - dtype('int16') + >>> header.get_data_dtype().str + '>> header.get_zooms() (3.0, 3.0, 3.0, 3.0) >>> header.get_data_shape() @@ -488,7 +489,7 @@ class AFNIImage(SpatialImage): [ 0. , 0. , 3. , -52.3511], [ 0. , 0. , 0. , 1. ]]) >>> head = load(os.path.join(datadir, 'example4d+orig.HEAD')) - >>> np.array_equal(head.get_data(), brik.get_data()) + >>> np.array_equal(head.get_fdata(), brik.get_fdata()) True """ @@ -501,11 +502,14 @@ class AFNIImage(SpatialImage): ImageArrayProxy = AFNIArrayProxy @classmethod - @kw_only_meth(1) - def from_file_map(klass, file_map, mmap=True, keep_file_open=None): + def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): """ Creates an AFNIImage instance from `file_map` + .. deprecated:: 2.4.1 + ``keep_file_open='auto'`` is redundant with `False` and has + been deprecated. It raises an error as of nibabel 3.0. + Parameters ---------- file_map : dict @@ -518,19 +522,15 @@ def from_file_map(klass, file_map, mmap=True, keep_file_open=None): `mmap` value of True gives the same behavior as ``mmap='c'``. If image data file cannot be memory-mapped, ignore `mmap` value and read array from file. - keep_file_open : {None, 'auto', True, False}, optional, keyword only + keep_file_open : {None, True, False}, optional, keyword only `keep_file_open` controls whether a new file handle is created every time the image is accessed, or a single file handle is created and used for the lifetime of this ``ArrayProxy``. If ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behavior is the same as if - ``keep_file_open is False``. If ``file_like`` refers to an open - file handle, this setting has no effect. The default value - (``None``) will result in the value of - ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT` being used. + a new file handle is created every time the image is accessed. + If ``file_like`` refers to an open file handle, this setting has no + effect. The default value (``None``) will result in the value of + ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. """ with file_map['header'].get_prepare_fileobj('rt') as hdr_fobj: hdr = klass.header_class.from_fileobj(hdr_fobj) @@ -541,41 +541,6 @@ def from_file_map(klass, file_map, mmap=True, keep_file_open=None): return klass(data, hdr.get_affine(), header=hdr, extra=None, file_map=file_map) - @classmethod - @kw_only_meth(1) - def from_filename(klass, filename, mmap=True, keep_file_open=None): - """ - Creates an AFNIImage instance from `filename` - - Parameters - ---------- - filename : str - Path to BRIK or HEAD file to be loaded - mmap : {True, False, 'c', 'r'}, optional, keyword only - `mmap` controls the use of numpy memory mapping for reading image - array data. If False, do not try numpy ``memmap`` for data array. - If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A - `mmap` value of True gives the same behavior as ``mmap='c'``. If - image data file cannot be memory-mapped, ignore `mmap` value and - read array from file. - keep_file_open : {None, 'auto', True, False}, optional, keyword only - `keep_file_open` controls whether a new file handle is created - every time the image is accessed, or a single file handle is - created and used for the lifetime of this ``ArrayProxy``. If - ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behavior is the same as if - ``keep_file_open is False``. If ``file_like`` refers to an open - file handle, this setting has no effect. The default value - (``None``) will result in the value of - ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT` being used. - """ - file_map = klass.filespec_to_file_map(filename) - return klass.from_file_map(file_map, mmap=mmap, - keep_file_open=keep_file_open) - @classmethod def filespec_to_file_map(klass, filespec): """ @@ -585,6 +550,7 @@ def filespec_to_file_map(klass, filespec): afni.nimh.nih.gov/pub/dist/doc/program_help/README.compression.html. Thus, if you have AFNI files my_image.HEAD and my_image.BRIK.gz and you want to load the AFNI BRIK / HEAD pair, you can specify: + * The HEAD filename - e.g., my_image.HEAD * The BRIK filename w/o compressed extension - e.g., my_image.BRIK * The full BRIK filename - e.g., my_image.BRIK.gz @@ -621,7 +587,5 @@ def filespec_to_file_map(klass, filespec): file_map[key].filename = fname return file_map - load = from_filename - load = AFNIImage.load diff --git a/nibabel/casting.py b/nibabel/casting.py index 3709ee1dea..8406824dbe 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -8,7 +8,6 @@ from platform import processor, machine import numpy as np -from . import setup_test # noqa class CastingError(Exception): @@ -172,12 +171,6 @@ def shared_range(flt_type, int_type): # types. # ---------------------------------------------------------------------------- -try: - _float16 = np.float16 -except AttributeError: # float16 not present in np < 1.6 - _float16 = None - - class FloatingError(Exception): pass @@ -243,7 +236,7 @@ def type_info(np_type): minexp=info.minexp, maxexp=info.maxexp, width=width) - if np_type in (_float16, np.float32, np.float64, + if np_type in (np.float16, np.float32, np.float64, np.complex64, np.complex128): return ret info_64 = np.finfo(np.float64) diff --git a/nibabel/checkwarns.py b/nibabel/checkwarns.py deleted file mode 100644 index 01ef8fd10c..0000000000 --- a/nibabel/checkwarns.py +++ /dev/null @@ -1,37 +0,0 @@ -# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -# -# See COPYING file distributed along with the NiBabel package for the -# copyright and license terms. -# -### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Contexts for *with* statement allowing checks for warnings -''' -from __future__ import division, print_function - -import warnings - -from .testing import (error_warnings, suppress_warnings) - - -warnings.warn('The checkwarns module is deprecated and will be removed ' - 'in nibabel v3.0', FutureWarning) - - -class ErrorWarnings(error_warnings): - - def __init__(self, *args, **kwargs): - warnings.warn('ErrorWarnings is deprecated and will be removed in ' - 'nibabel v3.0; use nibabel.testing.error_warnings.', - FutureWarning) - super(ErrorWarnings, self).__init__(*args, **kwargs) - - -class IgnoreWarnings(suppress_warnings): - - def __init__(self, *args, **kwargs): - warnings.warn('IgnoreWarnings is deprecated and will be removed in ' - 'nibabel v3.0; use nibabel.testing.suppress_warnings.', - FutureWarning) - super(IgnoreWarnings, self).__init__(*args, **kwargs) diff --git a/nibabel/cifti2/__init__.py b/nibabel/cifti2/__init__.py index 3025a6f991..c0933c9041 100644 --- a/nibabel/cifti2/__init__.py +++ b/nibabel/cifti2/__init__.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -"""CIfTI format IO +"""CIFTI-2 format IO .. currentmodule:: nibabel.cifti2 @@ -14,6 +14,7 @@ :toctree: ../generated cifti2 + cifti2_axes """ from .parse_cifti2 import Cifti2Extension @@ -25,3 +26,4 @@ Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, Cifti2Vertices, Cifti2Volume, CIFTI_BRAIN_STRUCTURES, Cifti2HeaderError, CIFTI_MODEL_TYPES, load, save) +from .cifti2_axes import (Axis, BrainModelAxis, ParcelsAxis, SeriesAxis, LabelAxis, ScalarAxis) diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 67dab1d0c2..f1886d4066 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -6,32 +6,25 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -''' Read / write access to CIfTI2 image format +''' Read / write access to CIFTI-2 image format Format of the NIFTI2 container format described here: http://www.nitrc.org/forum/message.php?msg_id=3738 -Definition of the CIFTI2 header format and file extensions attached to this -email: +Definition of the CIFTI-2 header format and file extensions can be found at: - http://www.nitrc.org/forum/forum.php?thread_id=4380&forum_id=1955 - -Filename is ``CIFTI-2_Main_FINAL_1March2014.pdf``. + http://www.nitrc.org/projects/cifti ''' -from __future__ import division, print_function, absolute_import import re -try: - from collections.abc import MutableSequence, MutableMapping, Iterable -except ImportError: - # PY2 compatibility - from collections import MutableSequence, MutableMapping, Iterable +from collections.abc import MutableSequence, MutableMapping, Iterable from collections import OrderedDict from .. import xmlutils as xml from ..filebasedimages import FileBasedHeader from ..dataobj_images import DataobjImage from ..nifti2 import Nifti2Image, Nifti2Header from ..arrayproxy import reshape_dataobj +from warnings import warn def _float_01(val): @@ -42,7 +35,7 @@ def _float_01(val): class Cifti2HeaderError(Exception): - """ Error in CIFTI2 header + """ Error in CIFTI-2 header """ @@ -178,7 +171,7 @@ def _to_xml_element(self): class Cifti2LabelTable(xml.XmlSerializable, MutableMapping): - """ CIFTI2 label table: a sequence of ``Cifti2Label``s + """ CIFTI-2 label table: a sequence of ``Cifti2Label``\s * Description - Used by NamedMap when IndicesMapToDataType is "CIFTI_INDEX_TYPE_LABELS" in order to associate names and display colors @@ -236,7 +229,7 @@ def _to_xml_element(self): class Cifti2Label(xml.XmlSerializable): - """ CIFTI2 label: association of integer key with a name and RGBA values + """ CIFTI-2 label: association of integer key with a name and RGBA values For all color components, value is floating point with range 0.0 to 1.0. @@ -314,7 +307,7 @@ def _to_xml_element(self): class Cifti2NamedMap(xml.XmlSerializable): - """CIFTI2 named map: association of name and optional data with a map index + """CIFTI-2 named map: association of name and optional data with a map index Associates a name, optional metadata, and possibly a LabelTable with an index in a map. @@ -432,7 +425,7 @@ def _to_xml_element(self): class Cifti2VoxelIndicesIJK(xml.XmlSerializable, MutableSequence): - """CIFTI2 VoxelIndicesIJK: Set of voxel indices contained in a structure + """CIFTI-2 VoxelIndicesIJK: Set of voxel indices contained in a structure * Description - Identifies the voxels that model a brain structure, or participate in a parcel. Note that when this is a child of BrainModel, @@ -514,7 +507,7 @@ def _to_xml_element(self): class Cifti2Vertices(xml.XmlSerializable, MutableSequence): - """CIFTI2 vertices - association of brain structure and a list of vertices + """CIFTI-2 vertices - association of brain structure and a list of vertices * Description - Contains a BrainStructure type and a list of vertex indices within a Parcel. @@ -580,7 +573,7 @@ def _to_xml_element(self): class Cifti2Parcel(xml.XmlSerializable): - """CIFTI2 parcel: association of a name with vertices and/or voxels + """CIFTI-2 parcel: association of a name with vertices and/or voxels * Description - Associates a name, plus vertices and/or voxels, with an index. @@ -695,7 +688,7 @@ def _to_xml_element(self): class Cifti2Volume(xml.XmlSerializable): - """CIFTI2 volume: information about a volume for mappings that use voxels + """CIFTI-2 volume: information about a volume for mappings that use voxels * Description - Provides information about the volume for any mappings that use voxels. @@ -738,7 +731,7 @@ def _to_xml_element(self): class Cifti2VertexIndices(xml.XmlSerializable, MutableSequence): - """CIFTI2 vertex indices: vertex indices for an associated brain model + """CIFTI-2 vertex indices: vertex indices for an associated brain model The vertex indices (which are independent for each surface, and zero-based) that are used in this brain model[.] The parent @@ -933,8 +926,8 @@ class Cifti2MatrixIndicesMap(xml.XmlSerializable, MutableSequence): * Text Content: [NA] * Parent Element - Matrix - Attribute - --------- + Attributes + ---------- applies_to_matrix_dimension : list of ints Dimensions of this matrix that follow this mapping indices_map_to_data_type : str one of CIFTI_MAP_TYPES @@ -1081,7 +1074,7 @@ def _to_xml_element(self): class Cifti2Matrix(xml.XmlSerializable, MutableSequence): - """ CIFTI2 Matrix object + """ CIFTI-2 Matrix object This is a list-like container where the elements are instances of :class:`Cifti2MatrixIndicesMap`. @@ -1211,9 +1204,41 @@ def _to_xml_element(self): mat.append(mim._to_xml_element()) return mat + def get_axis(self, index): + ''' + Generates the Cifti2 axis for a given dimension + + Parameters + ---------- + index : int + Dimension for which we want to obtain the mapping. + + Returns + ------- + axis : :class:`.cifti2_axes.Axis` + ''' + from . import cifti2_axes + return cifti2_axes.from_index_mapping(self.get_index_map(index)) + + def get_data_shape(self): + """ + Returns data shape expected based on the CIFTI-2 header + + Any dimensions omitted in the CIFTI-2 header will be given a default size of None. + """ + from . import cifti2_axes + if len(self.mapped_indices) == 0: + return () + base_shape = [None] * (max(self.mapped_indices) + 1) + for mim in self: + size = len(cifti2_axes.from_index_mapping(mim)) + for idx in mim.applies_to_matrix_dimension: + base_shape[idx] = size + return tuple(base_shape) + class Cifti2Header(FileBasedHeader, xml.XmlSerializable): - ''' Class for CIFTI2 header extension ''' + ''' Class for CIFTI-2 header extension ''' def __init__(self, matrix=None, version="2.0"): FileBasedHeader.__init__(self) @@ -1268,9 +1293,42 @@ def get_index_map(self, index): ''' return self.matrix.get_index_map(index) + def get_axis(self, index): + ''' + Generates the Cifti2 axis for a given dimension + + Parameters + ---------- + index : int + Dimension for which we want to obtain the mapping. + + Returns + ------- + axis : :class:`.cifti2_axes.Axis` + ''' + return self.matrix.get_axis(index) + + @classmethod + def from_axes(cls, axes): + ''' + Creates a new Cifti2 header based on the Cifti2 axes + + Parameters + ---------- + axes : tuple of :class`.cifti2_axes.Axis` + sequence of Cifti2 axes describing each row/column of the matrix to be stored + + Returns + ------- + header : Cifti2Header + new header describing the rows/columns in a format consistent with Cifti2 + ''' + from . import cifti2_axes + return cifti2_axes.to_header(axes) + class Cifti2Image(DataobjImage): - """ Class for single file CIFTI2 format image + """ Class for single file CIFTI-2 format image """ header_class = Cifti2Header valid_exts = Nifti2Image.valid_exts @@ -1297,8 +1355,10 @@ def __init__(self, Object containing image data. It should be some object that returns an array from ``np.asanyarray``. It should have a ``shape`` attribute or property. - header : Cifti2Header instance - Header with data for / from XML part of CIFTI2 format. + header : Cifti2Header instance or sequence of :class:`cifti2_axes.Axis` + Header with data for / from XML part of CIFTI-2 format. + Alternatively a sequence of cifti2_axes.Axis objects can be provided + describing each dimension of the array. nifti_header : None or mapping or NIfTI2 header instance, optional Metadata for NIfTI2 component of this format. extra : None or mapping @@ -1306,22 +1366,30 @@ def __init__(self, file_map : mapping, optional Mapping giving file information for this image format. ''' + if not isinstance(header, Cifti2Header) and header: + header = Cifti2Header.from_axes(header) super(Cifti2Image, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) self._nifti_header = Nifti2Header.from_header(nifti_header) + # if NIfTI header not specified, get data type from input array if nifti_header is None: if hasattr(dataobj, 'dtype'): self._nifti_header.set_data_dtype(dataobj.dtype) self.update_headers() + if self._dataobj.shape != self.header.matrix.get_data_shape(): + warn("Dataobj shape {} does not match shape expected from CIFTI-2 header {}".format( + self._dataobj.shape, self.header.matrix.get_data_shape() + )) + @property def nifti_header(self): return self._nifti_header @classmethod - def from_file_map(klass, file_map): - """ Load a CIFTI2 image from a file_map + def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): + """ Load a CIFTI-2 image from a file_map Parameters ---------- @@ -1333,7 +1401,8 @@ def from_file_map(klass, file_map): Returns a Cifti2Image """ from .parse_cifti2 import _Cifti2AsNiftiImage, Cifti2Extension - nifti_img = _Cifti2AsNiftiImage.from_file_map(file_map) + nifti_img = _Cifti2AsNiftiImage.from_file_map(file_map, mmap=mmap, + keep_file_open=keep_file_open) # Get cifti2 header for item in nifti_img.header.extensions: @@ -1341,11 +1410,11 @@ def from_file_map(klass, file_map): cifti_header = item.get_content() break else: - raise ValueError('NIfTI2 header does not contain a CIFTI2 ' + raise ValueError('NIfTI2 header does not contain a CIFTI-2 ' 'extension') # Construct cifti image. - # User array proxy object where possible + # Use array proxy object where possible dataobj = nifti_img.dataobj return Cifti2Image(reshape_dataobj(dataobj, dataobj.shape[4:]), header=cifti_header, @@ -1388,6 +1457,11 @@ def to_file_map(self, file_map=None): header = self._nifti_header extension = Cifti2Extension(content=self.header.to_xml()) header.extensions.append(extension) + if self._dataobj.shape != self.header.matrix.get_data_shape(): + raise ValueError( + "Dataobj shape {} does not match shape expected from CIFTI-2 header {}".format( + self._dataobj.shape, self.header.matrix.get_data_shape() + )) # if intent code is not set, default to unknown CIFTI if header.get_intent()[0] == 'none': header.set_intent('NIFTI_INTENT_CONNECTIVITY_UNKNOWN') @@ -1400,7 +1474,7 @@ def to_file_map(self, file_map=None): img.to_file_map(file_map or self.file_map) def update_headers(self): - ''' Harmonize CIFTI2 and NIfTI headers with image data + ''' Harmonize NIfTI headers with image data >>> import numpy as np >>> data = np.zeros((2,3,4)) @@ -1420,33 +1494,5 @@ def set_data_dtype(self, dtype): self._nifti_header.set_data_dtype(dtype) -def load(filename): - """ Load cifti2 from `filename` - - Parameters - ---------- - filename : str - filename of image to be loaded - - Returns - ------- - img : Cifti2Image - cifti image instance - - Raises - ------ - ImageFileError: if `filename` doesn't look like cifti - IOError : if `filename` does not exist - """ - return Cifti2Image.from_filename(filename) - - -def save(img, filename): - """ Save cifti to `filename` - - Parameters - ---------- - filename : str - filename to which to save image - """ - Cifti2Image.instance_to_filename(img, filename) +load = Cifti2Image.from_filename +save = Cifti2Image.instance_to_filename diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py new file mode 100644 index 0000000000..c4c47007db --- /dev/null +++ b/nibabel/cifti2/cifti2_axes.py @@ -0,0 +1,1462 @@ +""" +Defines :class:`Axis` objects to create, read, and manipulate CIFTI-2 files + +These axes provide an alternative interface to the information in the CIFTI-2 header. +Each type of CIFTI-2 axes describing the rows/columns in a CIFTI-2 matrix is given a unique class: + +* :class:`BrainModelAxis`: each row/column is a voxel or vertex +* :class:`ParcelsAxis`: each row/column is a group of voxels and/or vertices +* :class:`ScalarAxis`: each row/column has a unique name (with optional meta-data) +* :class:`LabelAxis`: each row/column has a unique name and label table (with optional meta-data) +* :class:`SeriesAxis`: each row/column is a timepoint, which increases monotonically + +All of these classes are derived from the Axis class. + +After loading a CIFTI-2 file a tuple of axes describing the rows and columns can be obtained +from the :meth:`.cifti2.Cifti2Header.get_axis` method on the header object +(e.g. ``nibabel.load().header.get_axis()``). Inversely, a new +:class:`.cifti2.Cifti2Header` object can be created from existing Axis objects +using the :meth:`.cifti2.Cifti2Header.from_axes` factory method. + +CIFTI-2 Axis objects of the same type can be concatenated using the '+'-operator. +Numpy indexing also works on axes +(except for SeriesAxis objects, which have to remain monotonically increasing or decreasing). + +Creating new CIFTI-2 axes +------------------------- +New Axis objects can be constructed by providing a description for what is contained +in each row/column of the described tensor. For each Axis sub-class this descriptor is: + +* :class:`BrainModelAxis`: a CIFTI-2 structure name and a voxel or vertex index +* :class:`ParcelsAxis`: a name and a sequence of voxel and vertex indices +* :class:`ScalarAxis`: a name and optionally a dict of meta-data +* :class:`LabelAxis`: a name, dict of label index to name and colour, + and optionally a dict of meta-data +* :class:`SeriesAxis`: the time-point of each row/column is set by setting the start, stop, size, + and unit of the time-series + +Several helper functions exist to create new :class:`BrainModelAxis` axes: + +* :meth:`BrainModelAxis.from_mask` creates a new BrainModelAxis volume covering the + non-zero values of a mask +* :meth:`BrainModelAxis.from_surface` creates a new BrainModelAxis surface covering the provided + indices of a surface + +A :class:`ParcelsAxis` axis can be created from a sequence of :class:`BrainModelAxis` axes using +:meth:`ParcelsAxis.from_brain_models`. + +Examples +-------- +We can create brain models covering the left cortex and left thalamus using: + +>>> from nibabel import cifti2 +>>> import numpy as np +>>> bm_cortex = cifti2.BrainModelAxis.from_mask([True, False, True, True], +... name='cortex_left') +>>> bm_thal = cifti2.BrainModelAxis.from_mask(np.ones((2, 2, 2)), affine=np.eye(4), +... name='thalamus_left') + +In this very simple case ``bm_cortex`` describes a left cortical surface skipping the second +out of four vertices. ``bm_thal`` contains all voxels in a 2x2x2 volume. + +Brain structure names automatically get converted to valid CIFTI-2 indentifiers using +:meth:`BrainModelAxis.to_cifti_brain_structure_name`. +A 1-dimensional mask will be automatically interpreted as a surface element and a 3-dimensional +mask as a volume element. + +These can be concatenated in a single brain model covering the left cortex and thalamus by +simply adding them together + +>>> bm_full = bm_cortex + bm_thal + +Brain models covering the full HCP grayordinate space can be constructed by adding all the +volumetric and surface brain models together like this (or by reading one from an already +existing HCP file). + +Getting a specific brain region from the full brain model is as simple as: + +>>> assert bm_full[bm_full.name == 'CIFTI_STRUCTURE_CORTEX_LEFT'] == bm_cortex +>>> assert bm_full[bm_full.name == 'CIFTI_STRUCTURE_THALAMUS_LEFT'] == bm_thal + +You can also iterate over all brain structures in a brain model: + +>>> for idx, (name, slc, bm) in enumerate(bm_full.iter_structures()): +... print((str(name), slc)) +... assert bm == bm_full[slc] +... assert bm == bm_cortex if idx == 0 else bm_thal +('CIFTI_STRUCTURE_CORTEX_LEFT', slice(0, 3, None)) +('CIFTI_STRUCTURE_THALAMUS_LEFT', slice(3, None, None)) + +In this case there will be two iterations, namely: +('CIFTI_STRUCTURE_CORTEX_LEFT', slice(0, ), bm_cortex) +and +('CIFTI_STRUCTURE_THALAMUS_LEFT', slice(, None), bm_thal) + +ParcelsAxis can be constructed from selections of these brain models: + +>>> parcel = cifti2.ParcelsAxis.from_brain_models([ +... ('surface_parcel', bm_cortex[:2]), # contains first 2 cortical vertices +... ('volume_parcel', bm_thal), # contains thalamus +... ('combined_parcel', bm_full[[1, 8, 10]]), # contains selected voxels/vertices +... ]) + +Time series are represented by their starting time (typically 0), step size +(i.e. sampling time or TR), and number of elements: + +>>> series = cifti2.SeriesAxis(start=0, step=100, size=5000) + +So a header for fMRI data with a TR of 100 ms covering the left cortex and thalamus with +5000 timepoints could be created with + +>>> type(cifti2.Cifti2Header.from_axes((series, bm_cortex + bm_thal))) + + +Similarly the curvature and cortical thickness on the left cortex could be stored using a header +like: + +>>> type(cifti2.Cifti2Header.from_axes((cifti2.ScalarAxis(['curvature', 'thickness']), +... bm_cortex))) + +""" +import numpy as np +from . import cifti2 +from operator import xor +import abc + + +def from_index_mapping(mim): + """ + Parses the MatrixIndicesMap to find the appropriate CIFTI-2 axis describing the rows or columns + + Parameters + ---------- + mim : :class:`.cifti2.Cifti2MatrixIndicesMap` + + Returns + ------- + axis : subclass of :class:`Axis` + """ + return_type = {'CIFTI_INDEX_TYPE_SCALARS': ScalarAxis, + 'CIFTI_INDEX_TYPE_LABELS': LabelAxis, + 'CIFTI_INDEX_TYPE_SERIES': SeriesAxis, + 'CIFTI_INDEX_TYPE_BRAIN_MODELS': BrainModelAxis, + 'CIFTI_INDEX_TYPE_PARCELS': ParcelsAxis} + return return_type[mim.indices_map_to_data_type].from_index_mapping(mim) + + +def to_header(axes): + """ + Converts the axes describing the rows/columns of a CIFTI-2 vector/matrix to a Cifti2Header + + Parameters + ---------- + axes : iterable of :py:class:`Axis` objects + one or more axes describing each dimension in turn + + Returns + ------- + header : :class:`.cifti2.Cifti2Header` + """ + axes = tuple(axes) + mims_all = [] + matrix = cifti2.Cifti2Matrix() + for dim, ax in enumerate(axes): + if ax in axes[:dim]: + dim_prev = axes.index(ax) + mims_all[dim_prev].applies_to_matrix_dimension.append(dim) + mims_all.append(mims_all[dim_prev]) + else: + mim = ax.to_mapping(dim) + mims_all.append(mim) + matrix.append(mim) + return cifti2.Cifti2Header(matrix) + + +class Axis(abc.ABC): + """ + Abstract class for any object describing the rows or columns of a CIFTI-2 vector/matrix + + Mainly used for type checking. + + Base class for the following concrete CIFTI-2 axes: + + * :class:`BrainModelAxis`: each row/column is a voxel or vertex + * :class:`ParcelsAxis`: each row/column is a group of voxels and/or vertices + * :class:`ScalarAxis`: each row/column has a unique name with optional meta-data + * :class:`LabelAxis`: each row/column has a unique name and label table with optional meta-data + * :class:`SeriesAxis`: each row/column is a timepoint, which increases monotonically + """ + + @property + def size(self): + return len(self) + + @abc.abstractmethod + def __len__(self): + pass + + @abc.abstractmethod + def __eq__(self, other): + """ + Compares whether two Axes are equal + + Parameters + ---------- + other : Axis + other axis to compare to + + Returns + ------- + False if the axes don't have the same type or if their content differs + """ + pass + + @abc.abstractmethod + def __add__(self, other): + """ + Concatenates two Axes of the same type + + Parameters + ---------- + other : Axis + axis to be appended to the current one + + Returns + ------- + Axis of the same subtype as self and other + """ + pass + + @abc.abstractmethod + def __getitem__(self, item): + """ + Extracts definition of single row/column or new Axis describing a subset of the rows/columns + """ + pass + + +class BrainModelAxis(Axis): + """ + Each row/column in the CIFTI-2 vector/matrix represents a single vertex or voxel + + This Axis describes which vertex/voxel is represented by each row/column. + """ + + def __init__(self, name, voxel=None, vertex=None, affine=None, + volume_shape=None, nvertices=None): + """ + New BrainModelAxis axes can be constructed by passing on the greyordinate brain-structure + names and voxel/vertex indices to the constructor or by one of the + factory methods: + + - :py:meth:`~BrainModelAxis.from_mask`: creates surface or volumetric BrainModelAxis axis + from respectively 1D or 3D masks + - :py:meth:`~BrainModelAxis.from_surface`: creates a surface BrainModelAxis axis + + The resulting BrainModelAxis axes can be concatenated by adding them together. + + Parameters + ---------- + name : array_like + brain structure name or (N, ) string array with the brain structure names + voxel : array_like, optional + (N, 3) array with the voxel indices (can be omitted for CIFTI-2 files only + covering the surface) + vertex : array_like, optional + (N, ) array with the vertex indices (can be omitted for volumetric CIFTI-2 files) + affine : array_like, optional + (4, 4) array mapping voxel indices to mm space (not needed for CIFTI-2 files only + covering the surface) + volume_shape : tuple of three integers, optional + shape of the volume in which the voxels were defined (not needed for CIFTI-2 files only + covering the surface) + nvertices : dict from string to integer, optional + maps names of surface elements to integers (not needed for volumetric CIFTI-2 files) + """ + if voxel is None: + if vertex is None: + raise ValueError("At least one of voxel or vertex indices should be defined") + nelements = len(vertex) + self.voxel = np.full((nelements, 3), fill_value=-1, dtype=int) + else: + nelements = len(voxel) + self.voxel = np.asanyarray(voxel, dtype=int) + + if vertex is None: + self.vertex = np.full(nelements, fill_value=-1, dtype=int) + else: + self.vertex = np.asanyarray(vertex, dtype=int) + + if isinstance(name, str): + name = [self.to_cifti_brain_structure_name(name)] * self.vertex.size + self.name = np.asanyarray(name, dtype='U') + + if nvertices is None: + self.nvertices = {} + else: + self.nvertices = {self.to_cifti_brain_structure_name(name): number + for name, number in nvertices.items()} + + for name in list(self.nvertices.keys()): + if name not in self.name: + del self.nvertices[name] + + surface_mask = self.surface_mask + if surface_mask.all(): + self.affine = None + self.volume_shape = None + else: + if affine is None or volume_shape is None: + raise ValueError("Affine and volume shape should be defined " + "for BrainModelAxis containing voxels") + self.affine = np.asanyarray(affine) + self.volume_shape = volume_shape + + if np.any(self.vertex[surface_mask] < 0): + raise ValueError('Undefined vertex indices found for surface elements') + if np.any(self.voxel[~surface_mask] < 0): + raise ValueError('Undefined voxel indices found for volumetric elements') + + for check_name in ('name', 'voxel', 'vertex'): + shape = (self.size, 3) if check_name == 'voxel' else (self.size, ) + if getattr(self, check_name).shape != shape: + raise ValueError("Input {} has incorrect shape ({}) for BrainModelAxis axis".format( + check_name, getattr(self, check_name).shape)) + + @classmethod + def from_mask(cls, mask, name='other', affine=None): + """ + Creates a new BrainModelAxis axis describing the provided mask + + Parameters + ---------- + mask : array_like + all non-zero voxels will be included in the BrainModelAxis axis + should be (Nx, Ny, Nz) array for volume mask or (Nvertex, ) array for surface mask + name : str, optional + Name of the brain structure (e.g. 'CortexRight', 'thalamus_left' or 'brain_stem') + affine : array_like, optional + (4, 4) array with the voxel to mm transformation (defaults to identity matrix) + Argument will be ignored for surface masks + + Returns + ------- + BrainModelAxis which covers the provided mask + """ + if affine is None: + affine = np.eye(4) + else: + affine = np.asanyarray(affine) + if affine.shape != (4, 4): + raise ValueError("Affine transformation should be a 4x4 array or None, not %r" % affine) + + mask = np.asanyarray(mask) + if mask.ndim == 1: + return cls.from_surface(np.where(mask != 0)[0], mask.size, name=name) + elif mask.ndim == 3: + voxels = np.array(np.where(mask != 0)).T + return cls(name, voxel=voxels, affine=affine, volume_shape=mask.shape) + else: + raise ValueError("Mask should be either 1-dimensional (for surfaces) or " + "3-dimensional (for volumes), not %i-dimensional" % mask.ndim) + + @classmethod + def from_surface(cls, vertices, nvertex, name='Other'): + """ + Creates a new BrainModelAxis axis describing the vertices on a surface + + Parameters + ---------- + vertices : array_like + indices of the vertices on the surface + nvertex : int + total number of vertices on the surface + name : str + Name of the brain structure (e.g. 'CortexLeft' or 'CortexRight') + + Returns + ------- + BrainModelAxis which covers (part of) the surface + """ + cifti_name = cls.to_cifti_brain_structure_name(name) + return cls(cifti_name, vertex=vertices, + nvertices={cifti_name: nvertex}) + + @classmethod + def from_index_mapping(cls, mim): + """ + Creates a new BrainModel axis based on a CIFTI-2 dataset + + Parameters + ---------- + mim : :class:`.cifti2.Cifti2MatrixIndicesMap` + + Returns + ------- + BrainModelAxis + """ + nbm = sum(bm.index_count for bm in mim.brain_models) + voxel = np.full((nbm, 3), fill_value=-1, dtype=int) + vertex = np.full(nbm, fill_value=-1, dtype=int) + name = [] + + nvertices = {} + affine, shape = None, None + for bm in mim.brain_models: + index_end = bm.index_offset + bm.index_count + is_surface = bm.model_type == 'CIFTI_MODEL_TYPE_SURFACE' + name.extend([bm.brain_structure] * bm.index_count) + if is_surface: + vertex[bm.index_offset: index_end] = bm.vertex_indices + nvertices[bm.brain_structure] = bm.surface_number_of_vertices + else: + voxel[bm.index_offset: index_end, :] = bm.voxel_indices_ijk + if affine is None: + shape = mim.volume.volume_dimensions + affine = mim.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix + return cls(name, voxel, vertex, affine, shape, nvertices) + + def to_mapping(self, dim): + """ + Converts the brain model axis to a MatrixIndicesMap for storage in CIFTI-2 format + + Parameters + ---------- + dim : int + which dimension of the CIFTI-2 vector/matrix is described by this dataset (zero-based) + + Returns + ------- + :class:`.cifti2.Cifti2MatrixIndicesMap` + """ + mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_BRAIN_MODELS') + for name, to_slice, bm in self.iter_structures(): + is_surface = name in self.nvertices.keys() + if is_surface: + voxels = None + vertices = cifti2.Cifti2VertexIndices(bm.vertex) + nvertex = self.nvertices[name] + else: + voxels = cifti2.Cifti2VoxelIndicesIJK(bm.voxel) + vertices = None + nvertex = None + if mim.volume is None: + affine = cifti2.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, self.affine) + mim.volume = cifti2.Cifti2Volume(self.volume_shape, affine) + cifti_bm = cifti2.Cifti2BrainModel( + to_slice.start, len(bm), + 'CIFTI_MODEL_TYPE_SURFACE' if is_surface else 'CIFTI_MODEL_TYPE_VOXELS', + name, nvertex, voxels, vertices + ) + mim.append(cifti_bm) + return mim + + def iter_structures(self): + """ + Iterates over all brain structures in the order that they appear along the axis + + Yields + ------ + tuple with 3 elements: + - CIFTI-2 brain structure name + - slice to select the data associated with the brain structure from the tensor + - brain model covering that specific brain structure + """ + idx_start = 0 + start_name = self.name[idx_start] + for idx_current, name in enumerate(self.name): + if start_name != name: + yield start_name, slice(idx_start, idx_current), self[idx_start: idx_current] + idx_start = idx_current + start_name = self.name[idx_start] + yield start_name, slice(idx_start, None), self[idx_start:] + + @staticmethod + def to_cifti_brain_structure_name(name): + """ + Attempts to convert the name of an anatomical region in a format recognized by CIFTI-2 + + This function returns: + + - the name if it is in the CIFTI-2 format already + - if the name is a tuple the first element is assumed to be the structure name while + the second is assumed to be the hemisphere (left, right or both). The latter will default + to both. + - names like left_cortex, cortex_left, LeftCortex, or CortexLeft will be converted to + CIFTI_STRUCTURE_CORTEX_LEFT + + see :py:func:`nibabel.cifti2.tests.test_name` for examples of + which conversions are possible + + Parameters + ---------- + name: iterable of 2-element tuples of integer and string + input name of an anatomical region + + Returns + ------- + CIFTI-2 compatible name + + Raises + ------ + ValueError: raised if the input name does not match a known anatomical structure in CIFTI-2 + """ + if name in cifti2.CIFTI_BRAIN_STRUCTURES: + return name + if not isinstance(name, str): + if len(name) == 1: + structure = name[0] + orientation = 'both' + else: + structure, orientation = name + if structure.lower() in ('left', 'right', 'both'): + orientation, structure = name + else: + orient_names = ('left', 'right', 'both') + for poss_orient in orient_names: + idx = len(poss_orient) + if poss_orient == name.lower()[:idx]: + orientation = poss_orient + if name[idx] in '_ ': + structure = name[idx + 1:] + else: + structure = name[idx:] + break + if poss_orient == name.lower()[-idx:]: + orientation = poss_orient + if name[-idx - 1] in '_ ': + structure = name[:-idx - 1] + else: + structure = name[:-idx] + break + else: + orientation = 'both' + structure = name + if orientation.lower() == 'both': + proposed_name = 'CIFTI_STRUCTURE_%s' % structure.upper() + else: + proposed_name = 'CIFTI_STRUCTURE_%s_%s' % (structure.upper(), orientation.upper()) + if proposed_name not in cifti2.CIFTI_BRAIN_STRUCTURES: + raise ValueError('%s was interpreted as %s, which is not a valid CIFTI brain structure' + % (name, proposed_name)) + return proposed_name + + @property + def surface_mask(self): + """ + (N, ) boolean array which is true for any element on the surface + """ + return np.vectorize(lambda name: name in self.nvertices.keys())(self.name) + + @property + def volume_mask(self): + """ + (N, ) boolean array which is true for any element on the surface + """ + return np.vectorize(lambda name: name not in self.nvertices.keys())(self.name) + + _affine = None + + @property + def affine(self): + """ + Affine of the volumetric image in which the greyordinate voxels were defined + """ + return self._affine + + @affine.setter + def affine(self, value): + if value is not None: + value = np.asanyarray(value) + if value.shape != (4, 4): + raise ValueError('Affine transformation should be a 4x4 array') + self._affine = value + + _volume_shape = None + + @property + def volume_shape(self): + """ + Shape of the volumetric image in which the greyordinate voxels were defined + """ + return self._volume_shape + + @volume_shape.setter + def volume_shape(self, value): + if value is not None: + value = tuple(value) + if len(value) != 3: + raise ValueError("Volume shape should be a tuple of length 3") + if not all(isinstance(v, int) for v in value): + raise ValueError("All elements of the volume shape should be integers") + self._volume_shape = value + + _name = None + + @property + def name(self): + """The brain structure to which the voxel/vertices of belong + """ + return self._name + + @name.setter + def name(self, values): + self._name = np.array([self.to_cifti_brain_structure_name(name) for name in values]) + + def __len__(self): + return self.name.size + + def __eq__(self, other): + if not isinstance(other, BrainModelAxis) or len(self) != len(other): + return False + if xor(self.affine is None, other.affine is None): + return False + return ( + (self.affine is None or + np.allclose(self.affine, other.affine) and + self.volume_shape == other.volume_shape) and + self.nvertices == other.nvertices and + np.array_equal(self.name, other.name) and + np.array_equal(self.voxel[self.volume_mask], other.voxel[other.volume_mask]) and + np.array_equal(self.vertex[self.surface_mask], other.vertex[other.surface_mask]) + ) + + def __add__(self, other): + """ + Concatenates two BrainModels + + Parameters + ---------- + other : BrainModelAxis + brain model to be appended to the current one + + Returns + ------- + BrainModelAxis + """ + if not isinstance(other, BrainModelAxis): + return NotImplemented + if self.affine is None: + affine, shape = other.affine, other.volume_shape + else: + affine, shape = self.affine, self.volume_shape + if other.affine is not None and ( + not np.allclose(other.affine, affine) or + other.volume_shape != shape + ): + raise ValueError("Trying to concatenate two BrainModels defined " + "in a different brain volume") + + nvertices = dict(self.nvertices) + for name, value in other.nvertices.items(): + if name in nvertices.keys() and nvertices[name] != value: + raise ValueError("Trying to concatenate two BrainModels with inconsistent " + "number of vertices for %s" % name) + nvertices[name] = value + return self.__class__( + np.append(self.name, other.name), + np.concatenate((self.voxel, other.voxel), 0), + np.append(self.vertex, other.vertex), + affine, shape, nvertices + ) + + def __getitem__(self, item): + """ + Extracts part of the brain structure + + Parameters + ---------- + item : anything that can index a 1D array + + Returns + ------- + If `item` is an integer returns a tuple with 3 elements: + - boolean, which is True if it is a surface element + - vertex index if it is a surface element, otherwise array with 3 voxel indices + - structure.BrainStructure object describing the brain structure the element was taken from + + Otherwise returns a new BrainModelAxis + """ + if isinstance(item, int): + return self.get_element(item) + if isinstance(item, str): + raise IndexError("Can not index an Axis with a string (except for ParcelsAxis)") + return self.__class__(self.name[item], self.voxel[item], self.vertex[item], + self.affine, self.volume_shape, self.nvertices) + + def get_element(self, index): + """ + Describes a single element from the axis + + Parameters + ---------- + index : int + Indexes the row/column of interest + + Returns + ------- + tuple with 3 elements + - str, 'CIFTI_MODEL_TYPE_SURFACE' for vertex or 'CIFTI_MODEL_TYPE_VOXELS' for voxel + - vertex index if it is a surface element, otherwise array with 3 voxel indices + - structure.BrainStructure object describing the brain structure the element was taken from + """ + element_type = 'CIFTI_MODEL_TYPE_' + ( + 'SURFACE' if self.name[index] in self.nvertices.keys() else 'VOXELS' + ) + struct = self.vertex if 'SURFACE' in element_type else self.voxel + return element_type, struct[index], self.name[index] + + +class ParcelsAxis(Axis): + """ + Each row/column in the CIFTI-2 vector/matrix represents a parcel of voxels/vertices + + This Axis describes which parcel is represented by each row/column. + + Individual parcels can be accessed based on their name, using + ``parcel = parcel_axis[name]`` + """ + + def __init__(self, name, voxels, vertices, affine=None, volume_shape=None, nvertices=None): + """ + Use of this constructor is not recommended. New ParcelsAxis axes can be constructed more + easily from a sequence of BrainModelAxis axes using + :py:meth:`~ParcelsAxis.from_brain_models` + + Parameters + ---------- + name : array_like + (N, ) string array with the parcel names + voxels : array_like + (N, ) object array each containing a sequence of voxels. + For each parcel the voxels are represented by a (M, 3) index array + vertices : array_like + (N, ) object array each containing a sequence of vertices. + For each parcel the vertices are represented by a mapping from brain structure name to + (M, ) index array + affine : array_like, optional + (4, 4) array mapping voxel indices to mm space (not needed for CIFTI-2 files only + covering the surface) + volume_shape : tuple of three integers, optional + shape of the volume in which the voxels were defined (not needed for CIFTI-2 files only + covering the surface) + nvertices : dict from string to integer, optional + maps names of surface elements to integers (not needed for volumetric CIFTI-2 files) + """ + self.name = np.asanyarray(name, dtype='U') + as_array = np.asanyarray(voxels) + if as_array.ndim == 1: + voxels = as_array.astype('object') + else: + voxels = np.empty(len(voxels), dtype='object') + for idx in range(len(voxels)): + voxels[idx] = as_array[idx] + self.voxels = np.asanyarray(voxels, dtype='object') + self.vertices = np.asanyarray(vertices, dtype='object') + self.affine = np.asanyarray(affine) if affine is not None else None + self.volume_shape = volume_shape + if nvertices is None: + self.nvertices = {} + else: + self.nvertices = {BrainModelAxis.to_cifti_brain_structure_name(name): number + for name, number in nvertices.items()} + + for check_name in ('name', 'voxels', 'vertices'): + if getattr(self, check_name).shape != (self.size, ): + raise ValueError("Input {} has incorrect shape ({}) for Parcel axis".format( + check_name, getattr(self, check_name).shape)) + + @classmethod + def from_brain_models(cls, named_brain_models): + """ + Creates a Parcel axis from a list of BrainModelAxis axes with names + + Parameters + ---------- + named_brain_models : iterable of 2-element tuples of string and BrainModelAxis + list of (parcel name, brain model representation) pairs defining each parcel + + Returns + ------- + ParcelsAxis + """ + nparcels = len(named_brain_models) + affine = None + volume_shape = None + all_names = [] + all_voxels = np.zeros(nparcels, dtype='object') + all_vertices = np.zeros(nparcels, dtype='object') + nvertices = {} + for idx_parcel, (parcel_name, bm) in enumerate(named_brain_models): + all_names.append(parcel_name) + + voxels = bm.voxel[bm.volume_mask] + if voxels.shape[0] != 0: + if affine is None: + affine = bm.affine + volume_shape = bm.volume_shape + elif not np.allclose(affine, bm.affine) or (volume_shape != bm.volume_shape): + raise ValueError("Can not combine brain models defined in different " + "volumes into a single Parcel axis") + all_voxels[idx_parcel] = voxels + + vertices = {} + for name, _, bm_part in bm.iter_structures(): + if name in bm.nvertices.keys(): + if name in nvertices.keys() and nvertices[name] != bm.nvertices[name]: + raise ValueError("Got multiple conflicting number of " + "vertices for surface structure %s" % name) + nvertices[name] = bm.nvertices[name] + vertices[name] = bm_part.vertex + all_vertices[idx_parcel] = vertices + return ParcelsAxis(all_names, all_voxels, all_vertices, affine, volume_shape, nvertices) + + @classmethod + def from_index_mapping(cls, mim): + """ + Creates a new Parcels axis based on a CIFTI-2 dataset + + Parameters + ---------- + mim : :class:`cifti2.Cifti2MatrixIndicesMap` + + Returns + ------- + ParcelsAxis + """ + nparcels = len(list(mim.parcels)) + all_names = [] + all_voxels = np.zeros(nparcels, dtype='object') + all_vertices = np.zeros(nparcels, dtype='object') + + volume_shape = None if mim.volume is None else mim.volume.volume_dimensions + affine = None + if mim.volume is not None: + affine = mim.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix + nvertices = {} + for surface in mim.surfaces: + nvertices[surface.brain_structure] = surface.surface_number_of_vertices + for idx_parcel, parcel in enumerate(mim.parcels): + nvoxels = 0 if parcel.voxel_indices_ijk is None else len(parcel.voxel_indices_ijk) + voxels = np.zeros((nvoxels, 3), dtype='i4') + if nvoxels != 0: + voxels[:] = parcel.voxel_indices_ijk + vertices = {} + for vertex in parcel.vertices: + name = vertex.brain_structure + vertices[vertex.brain_structure] = np.array(vertex) + if name not in nvertices.keys(): + raise ValueError("Number of vertices for surface structure %s not defined" % + name) + all_voxels[idx_parcel] = voxels + all_vertices[idx_parcel] = vertices + all_names.append(parcel.name) + return cls(all_names, all_voxels, all_vertices, affine, volume_shape, nvertices) + + def to_mapping(self, dim): + """ + Converts the Parcel to a MatrixIndicesMap for storage in CIFTI-2 format + + Parameters + ---------- + dim : int + which dimension of the CIFTI-2 vector/matrix is described by this dataset (zero-based) + + Returns + ------- + :class:`cifti2.Cifti2MatrixIndicesMap` + """ + mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_PARCELS') + if self.affine is not None: + affine = cifti2.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, matrix=self.affine) + mim.volume = cifti2.Cifti2Volume(self.volume_shape, affine) + for name, nvertex in self.nvertices.items(): + mim.append(cifti2.Cifti2Surface(name, nvertex)) + for name, voxels, vertices in zip(self.name, self.voxels, self.vertices): + cifti_voxels = cifti2.Cifti2VoxelIndicesIJK(voxels) + element = cifti2.Cifti2Parcel(name, cifti_voxels) + for name_vertex, idx_vertices in vertices.items(): + element.vertices.append(cifti2.Cifti2Vertices(name_vertex, idx_vertices)) + mim.append(element) + return mim + + _affine = None + + @property + def affine(self): + """ + Affine of the volumetric image in which the greyordinate voxels were defined + """ + return self._affine + + @affine.setter + def affine(self, value): + if value is not None: + value = np.asanyarray(value) + if value.shape != (4, 4): + raise ValueError('Affine transformation should be a 4x4 array') + self._affine = value + + _volume_shape = None + + @property + def volume_shape(self): + """ + Shape of the volumetric image in which the greyordinate voxels were defined + """ + return self._volume_shape + + @volume_shape.setter + def volume_shape(self, value): + if value is not None: + value = tuple(value) + if len(value) != 3: + raise ValueError("Volume shape should be a tuple of length 3") + if not all(isinstance(v, int) for v in value): + raise ValueError("All elements of the volume shape should be integers") + self._volume_shape = value + + def __len__(self): + return self.name.size + + def __eq__(self, other): + if (self.__class__ != other.__class__ or len(self) != len(other) or + not np.array_equal(self.name, other.name) or self.nvertices != other.nvertices or + any(not np.array_equal(vox1, vox2) + for vox1, vox2 in zip(self.voxels, other.voxels))): + return False + if self.affine is not None: + if ( + other.affine is None or + not np.allclose(self.affine, other.affine) or + self.volume_shape != other.volume_shape + ): + return False + elif other.affine is not None: + return False + for vert1, vert2 in zip(self.vertices, other.vertices): + if len(vert1) != len(vert2): + return False + for name in vert1.keys(): + if name not in vert2 or not np.array_equal(vert1[name], vert2[name]): + return False + return True + + def __add__(self, other): + """ + Concatenates two Parcels + + Parameters + ---------- + other : ParcelsAxis + parcel to be appended to the current one + + Returns + ------- + Parcel + """ + if not isinstance(other, ParcelsAxis): + return NotImplemented + if self.affine is None: + affine, shape = other.affine, other.volume_shape + else: + affine, shape = self.affine, self.volume_shape + if other.affine is not None and (not np.allclose(other.affine, affine) or + other.volume_shape != shape): + raise ValueError("Trying to concatenate two ParcelsAxis defined " + "in a different brain volume") + nvertices = dict(self.nvertices) + for name, value in other.nvertices.items(): + if name in nvertices.keys() and nvertices[name] != value: + raise ValueError("Trying to concatenate two ParcelsAxis with inconsistent " + "number of vertices for %s" + % name) + nvertices[name] = value + return self.__class__( + np.append(self.name, other.name), + np.append(self.voxels, other.voxels), + np.append(self.vertices, other.vertices), + affine, shape, nvertices + ) + + def __getitem__(self, item): + """ + Extracts subset of the axes based on the type of ``item``: + + - `int`: 3-element tuple of (parcel name, parcel voxels, parcel vertices) + - `string`: 2-element tuple of (parcel voxels, parcel vertices + - other object that can index 1D arrays: new Parcel axis + """ + if isinstance(item, str): + idx = np.where(self.name == item)[0] + if len(idx) == 0: + raise IndexError("Parcel %s not found" % item) + if len(idx) > 1: + raise IndexError("Multiple parcels with name %s found" % item) + return self.voxels[idx[0]], self.vertices[idx[0]] + if isinstance(item, int): + return self.get_element(item) + return self.__class__(self.name[item], self.voxels[item], self.vertices[item], + self.affine, self.volume_shape, self.nvertices) + + def get_element(self, index): + """ + Describes a single element from the axis + + Parameters + ---------- + index : int + Indexes the row/column of interest + + Returns + ------- + tuple with 3 elements + - unicode name of the parcel + - (M, 3) int array with voxel indices + - dict from string to (K, ) int array with vertex indices + for a specific surface brain structure + """ + return self.name[index], self.voxels[index], self.vertices[index] + + +class ScalarAxis(Axis): + """ + Along this axis of the CIFTI-2 vector/matrix each row/column has been given + a unique name and optionally metadata + """ + + def __init__(self, name, meta=None): + """ + Parameters + ---------- + name : array_like + (N, ) string array with the parcel names + meta : array_like + (N, ) object array with a dictionary of metadata for each row/column. + Defaults to empty dictionary + """ + self.name = np.asanyarray(name, dtype='U') + if meta is None: + meta = [{} for _ in range(self.name.size)] + self.meta = np.asanyarray(meta, dtype='object') + + for check_name in ('name', 'meta'): + if getattr(self, check_name).shape != (self.size, ): + raise ValueError("Input {} has incorrect shape ({}) for ScalarAxis axis".format( + check_name, getattr(self, check_name).shape)) + + @classmethod + def from_index_mapping(cls, mim): + """ + Creates a new Scalar axis based on a CIFTI-2 dataset + + Parameters + ---------- + mim : :class:`.cifti2.Cifti2MatrixIndicesMap` + + Returns + ------- + ScalarAxis + """ + names = [nm.map_name for nm in mim.named_maps] + meta = [{} if nm.metadata is None else dict(nm.metadata) for nm in mim.named_maps] + return cls(names, meta) + + def to_mapping(self, dim): + """ + Converts the hcp_labels to a MatrixIndicesMap for storage in CIFTI-2 format + + Parameters + ---------- + dim : int + which dimension of the CIFTI-2 vector/matrix is described by this dataset (zero-based) + + Returns + ------- + :class:`.cifti2.Cifti2MatrixIndicesMap` + """ + mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_SCALARS') + for name, meta in zip(self.name, self.meta): + meta = None if len(meta) == 0 else meta + named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta)) + mim.append(named_map) + return mim + + def __len__(self): + return self.name.size + + def __eq__(self, other): + """ + Compares two Scalars + + Parameters + ---------- + other : ScalarAxis + scalar axis to be compared + + Returns + ------- + bool : False if type, length or content do not match + """ + if not isinstance(other, ScalarAxis) or self.size != other.size: + return False + return np.array_equal(self.name, other.name) and np.array_equal(self.meta, other.meta) + + def __add__(self, other): + """ + Concatenates two Scalars + + Parameters + ---------- + other : ScalarAxis + scalar axis to be appended to the current one + + Returns + ------- + ScalarAxis + """ + if not isinstance(other, ScalarAxis): + return NotImplemented + return ScalarAxis( + np.append(self.name, other.name), + np.append(self.meta, other.meta), + ) + + def __getitem__(self, item): + if isinstance(item, int): + return self.get_element(item) + return self.__class__(self.name[item], self.meta[item]) + + def get_element(self, index): + """ + Describes a single element from the axis + + Parameters + ---------- + index : int + Indexes the row/column of interest + + Returns + ------- + tuple with 2 elements + - unicode name of the row/column + - dictionary with the element metadata + """ + return self.name[index], self.meta[index] + + +class LabelAxis(Axis): + """ + Defines CIFTI-2 axis for label array. + + Along this axis of the CIFTI-2 vector/matrix each row/column has been given a unique name, + label table, and optionally metadata + """ + + def __init__(self, name, label, meta=None): + """ + Parameters + ---------- + name : array_like + (N, ) string array with the parcel names + label : array_like + single dictionary or (N, ) object array with dictionaries mapping + from integers to (name, (R, G, B, A)), where name is a string and R, G, B, and A are + floats between 0 and 1 giving the colour and alpha (i.e., transparency) + meta : array_like, optional + (N, ) object array with a dictionary of metadata for each row/column + """ + self.name = np.asanyarray(name, dtype='U') + if isinstance(label, dict): + label = [label.copy() for _ in range(self.name.size)] + self.label = np.asanyarray(label, dtype='object') + if meta is None: + meta = [{} for _ in range(self.name.size)] + self.meta = np.asanyarray(meta, dtype='object') + + for check_name in ('name', 'meta', 'label'): + if getattr(self, check_name).shape != (self.size, ): + raise ValueError("Input {} has incorrect shape ({}) for LabelAxis axis".format( + check_name, getattr(self, check_name).shape)) + + @classmethod + def from_index_mapping(cls, mim): + """ + Creates a new Label axis based on a CIFTI-2 dataset + + Parameters + ---------- + mim : :class:`.cifti2.Cifti2MatrixIndicesMap` + + Returns + ------- + LabelAxis + """ + tables = [{key: (value.label, value.rgba) for key, value in nm.label_table.items()} + for nm in mim.named_maps] + rest = ScalarAxis.from_index_mapping(mim) + return LabelAxis(rest.name, tables, rest.meta) + + def to_mapping(self, dim): + """ + Converts the hcp_labels to a MatrixIndicesMap for storage in CIFTI-2 format + + Parameters + ---------- + dim : int + which dimension of the CIFTI-2 vector/matrix is described by this dataset (zero-based) + + Returns + ------- + :class:`.cifti2.Cifti2MatrixIndicesMap` + """ + mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_LABELS') + for name, label, meta in zip(self.name, self.label, self.meta): + label_table = cifti2.Cifti2LabelTable() + for key, value in label.items(): + label_table[key] = (value[0],) + tuple(value[1]) + if len(meta) == 0: + meta = None + named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta), + label_table) + mim.append(named_map) + return mim + + def __len__(self): + return self.name.size + + def __eq__(self, other): + """ + Compares two Labels + + Parameters + ---------- + other : LabelAxis + label axis to be compared + + Returns + ------- + bool : False if type, length or content do not match + """ + if not isinstance(other, LabelAxis) or self.size != other.size: + return False + return ( + np.array_equal(self.name, other.name) and + np.array_equal(self.meta, other.meta) and + np.array_equal(self.label, other.label) + ) + + def __add__(self, other): + """ + Concatenates two Labels + + Parameters + ---------- + other : LabelAxis + label axis to be appended to the current one + + Returns + ------- + LabelAxis + """ + if not isinstance(other, LabelAxis): + return NotImplemented + return LabelAxis( + np.append(self.name, other.name), + np.append(self.label, other.label), + np.append(self.meta, other.meta), + ) + + def __getitem__(self, item): + if isinstance(item, int): + return self.get_element(item) + return self.__class__(self.name[item], self.label[item], self.meta[item]) + + def get_element(self, index): + """ + Describes a single element from the axis + + Parameters + ---------- + index : int + Indexes the row/column of interest + + Returns + ------- + tuple with 2 elements + - unicode name of the row/column + - dictionary with the label table + - dictionary with the element metadata + """ + return self.name[index], self.label[index], self.meta[index] + + +class SeriesAxis(Axis): + """ + Along this axis of the CIFTI-2 vector/matrix the rows/columns increase monotonously in time + + This Axis describes the time point of each row/column. + """ + size = None + + def __init__(self, start, step, size, unit="SECOND"): + """ + Creates a new SeriesAxis axis + + Parameters + ---------- + start : float + starting time point + step : float + sampling time (TR) + size : int + number of time points + unit : str + Unit of the step size (one of 'second', 'hertz', 'meter', or 'radian') + """ + self.unit = unit + self.start = start + self.step = step + self.size = size + + @property + def time(self): + return np.arange(self.size) * self.step + self.start + + @classmethod + def from_index_mapping(cls, mim): + """ + Creates a new SeriesAxis axis based on a CIFTI-2 dataset + + Parameters + ---------- + mim : :class:`.cifti2.Cifti2MatrixIndicesMap` + + Returns + ------- + SeriesAxis + """ + start = mim.series_start * 10 ** mim.series_exponent + step = mim.series_step * 10 ** mim.series_exponent + return cls(start, step, mim.number_of_series_points, mim.series_unit) + + def to_mapping(self, dim): + """ + Converts the SeriesAxis to a MatrixIndicesMap for storage in CIFTI-2 format + + Parameters + ---------- + dim : int + which dimension of the CIFTI-2 vector/matrix is described by this dataset (zero-based) + + Returns + ------- + :class:`cifti2.Cifti2MatrixIndicesMap` + """ + mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_SERIES') + mim.series_exponent = 0 + mim.series_start = self.start + mim.series_step = self.step + mim.number_of_series_points = self.size + mim.series_unit = self.unit + return mim + + _unit = None + + @property + def unit(self): + return self._unit + + @unit.setter + def unit(self, value): + if value.upper() not in ("SECOND", "HERTZ", "METER", "RADIAN"): + raise ValueError("SeriesAxis unit should be one of " + + "('second', 'hertz', 'meter', or 'radian'") + self._unit = value.upper() + + def __len__(self): + return self.size + + def __eq__(self, other): + """ + True if start, step, size, and unit are the same. + """ + return ( + isinstance(other, SeriesAxis) and + self.start == other.start and + self.step == other.step and + self.size == other.size and + self.unit == other.unit + ) + + def __add__(self, other): + """ + Concatenates two SeriesAxis + + Parameters + ---------- + other : SeriesAxis + Time SeriesAxis to append at the end of the current time SeriesAxis. + Note that the starting time of the other time SeriesAxis is ignored. + + Returns + ------- + SeriesAxis + New time SeriesAxis with the concatenation of the two + + Raises + ------ + ValueError + raised if the repetition time of the two time SeriesAxis is different + """ + if isinstance(other, SeriesAxis): + if other.step != self.step: + raise ValueError('Can only concatenate SeriesAxis with the same step size') + if other.unit != self.unit: + raise ValueError('Can only concatenate SeriesAxis with the same unit') + return SeriesAxis(self.start, self.step, self.size + other.size, self.unit) + return NotImplemented + + def __getitem__(self, item): + if isinstance(item, slice): + step = 1 if item.step is None else item.step + idx_start = ((self.size - 1 if step < 0 else 0) + if item.start is None else + (item.start if item.start >= 0 else self.size + item.start)) + idx_end = ((-1 if step < 0 else self.size) + if item.stop is None else + (item.stop if item.stop >= 0 else self.size + item.stop)) + if idx_start > self.size and step < 0: + idx_start = self.size - 1 + if idx_end > self.size: + idx_end = self.size + nelements = (idx_end - idx_start) // step + if nelements < 0: + nelements = 0 + return SeriesAxis(idx_start * self.step + self.start, self.step * step, + nelements, self.unit) + elif isinstance(item, int): + return self.get_element(item) + raise IndexError('SeriesAxis can only be indexed with integers or slices ' + 'without breaking the regular structure') + + def get_element(self, index): + """ + Gives the time point of a specific row/column + + Parameters + ---------- + index : int + Indexes the row/column of interest + + Returns + ------- + float + """ + original_index = index + if index < 0: + index = self.size + index + if index >= self.size or index < 0: + raise IndexError("index %i is out of range for SeriesAxis with size %i" % + (original_index, self.size)) + return self.start + self.step * index diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index f0df76ac7d..8c3d40cd56 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -6,7 +6,6 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from __future__ import division, print_function, absolute_import from distutils.version import LooseVersion @@ -94,7 +93,7 @@ def may_contain_header(klass, binaryblock): @staticmethod def _chk_qfac(hdr, fix=False): - # Allow qfac of 0 without complaint for CIFTI2 + # Allow qfac of 0 without complaint for CIFTI-2 rep = Report(HeaderDataError) if hdr['pixdim'][0] in (-1, 0, 1): return hdr, rep @@ -127,7 +126,7 @@ class _Cifti2AsNiftiImage(Nifti2Image): class Cifti2Parser(xml.XmlParser): - '''Class to parse an XML string into a CIFTI2 header object''' + '''Class to parse an XML string into a CIFTI-2 header object''' def __init__(self, encoding=None, buffer_size=3500000, verbose=0): super(Cifti2Parser, self).__init__(encoding=encoding, buffer_size=buffer_size, @@ -164,7 +163,7 @@ def StartElementHandler(self, name, attrs): parent = self.struct_state[-1] if not isinstance(parent, Cifti2Header): raise Cifti2HeaderError( - 'Matrix element can only be a child of the CIFTI2 Header element' + 'Matrix element can only be a child of the CIFTI-2 Header element' ) parent.matrix = matrix self.struct_state.append(matrix) @@ -175,7 +174,8 @@ def StartElementHandler(self, name, attrs): parent = self.struct_state[-1] if not isinstance(parent, (Cifti2Matrix, Cifti2NamedMap)): raise Cifti2HeaderError( - 'MetaData element can only be a child of the CIFTI2 Matrix or NamedMap elements' + 'MetaData element can only be a child of the CIFTI-2 Matrix ' + 'or NamedMap elements' ) self.struct_state.append(meta) @@ -207,7 +207,7 @@ def StartElementHandler(self, name, attrs): matrix = self.struct_state[-1] if not isinstance(matrix, Cifti2Matrix): raise Cifti2HeaderError( - 'MatrixIndicesMap element can only be a child of the CIFTI2 Matrix element' + 'MatrixIndicesMap element can only be a child of the CIFTI-2 Matrix element' ) matrix.append(mim) self.struct_state.append(mim) @@ -218,7 +218,7 @@ def StartElementHandler(self, name, attrs): mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( - 'NamedMap element can only be a child of the CIFTI2 MatrixIndicesMap element' + 'NamedMap element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) self.struct_state.append(named_map) mim.append(named_map) @@ -234,7 +234,7 @@ def StartElementHandler(self, name, attrs): lata = Cifti2LabelTable() if not isinstance(named_map, Cifti2NamedMap): raise Cifti2HeaderError( - 'LabelTable element can only be a child of the CIFTI2 NamedMap element' + 'LabelTable element can only be a child of the CIFTI-2 NamedMap element' ) self.fsm_state.append('LabelTable') self.struct_state.append(lata) @@ -244,7 +244,7 @@ def StartElementHandler(self, name, attrs): lata = self.struct_state[-1] if not isinstance(lata, Cifti2LabelTable): raise Cifti2HeaderError( - 'Label element can only be a child of the CIFTI2 LabelTable element' + 'Label element can only be a child of the CIFTI-2 LabelTable element' ) label = Cifti2Label() label.key = int(attrs["Key"]) @@ -260,7 +260,7 @@ def StartElementHandler(self, name, attrs): named_map = self.struct_state[-1] if not isinstance(named_map, Cifti2NamedMap): raise Cifti2HeaderError( - 'MapName element can only be a child of the CIFTI2 NamedMap element' + 'MapName element can only be a child of the CIFTI-2 NamedMap element' ) self.fsm_state.append('MapName') @@ -271,7 +271,7 @@ def StartElementHandler(self, name, attrs): mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( - 'Surface element can only be a child of the CIFTI2 MatrixIndicesMap element' + 'Surface element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) if mim.indices_map_to_data_type != "CIFTI_INDEX_TYPE_PARCELS": raise Cifti2HeaderError( @@ -287,7 +287,7 @@ def StartElementHandler(self, name, attrs): mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( - 'Parcel element can only be a child of the CIFTI2 MatrixIndicesMap element' + 'Parcel element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) parcel.name = attrs["Name"] mim.append(parcel) @@ -299,7 +299,7 @@ def StartElementHandler(self, name, attrs): parcel = self.struct_state[-1] if not isinstance(parcel, Cifti2Parcel): raise Cifti2HeaderError( - 'Vertices element can only be a child of the CIFTI2 Parcel element' + 'Vertices element can only be a child of the CIFTI-2 Parcel element' ) vertices.brain_structure = attrs["BrainStructure"] if vertices.brain_structure not in CIFTI_BRAIN_STRUCTURES: @@ -315,7 +315,7 @@ def StartElementHandler(self, name, attrs): parent = self.struct_state[-1] if not isinstance(parent, (Cifti2Parcel, Cifti2BrainModel)): raise Cifti2HeaderError( - 'VoxelIndicesIJK element can only be a child of the CIFTI2 ' + 'VoxelIndicesIJK element can only be a child of the CIFTI-2 ' 'Parcel or BrainModel elements' ) parent.voxel_indices_ijk = Cifti2VoxelIndicesIJK() @@ -325,7 +325,7 @@ def StartElementHandler(self, name, attrs): mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( - 'Volume element can only be a child of the CIFTI2 MatrixIndicesMap element' + 'Volume element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) dimensions = tuple([int(val) for val in attrs["VolumeDimensions"].split(',')]) @@ -339,7 +339,7 @@ def StartElementHandler(self, name, attrs): if not isinstance(volume, Cifti2Volume): raise Cifti2HeaderError( 'TransformationMatrixVoxelIndicesIJKtoXYZ element can only be a child ' - 'of the CIFTI2 Volume element' + 'of the CIFTI-2 Volume element' ) transform = Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ() transform.meter_exponent = int(attrs["MeterExponent"]) @@ -354,7 +354,7 @@ def StartElementHandler(self, name, attrs): if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( 'BrainModel element can only be a child ' - 'of the CIFTI2 MatrixIndicesMap element' + 'of the CIFTI-2 MatrixIndicesMap element' ) if mim.indices_map_to_data_type != "CIFTI_INDEX_TYPE_BRAIN_MODELS": raise Cifti2HeaderError( @@ -386,7 +386,7 @@ def StartElementHandler(self, name, attrs): if not isinstance(model, Cifti2BrainModel): raise Cifti2HeaderError( 'VertexIndices element can only be a child ' - 'of the CIFTI2 BrainModel element' + 'of the CIFTI-2 BrainModel element' ) self.fsm_state.append('VertexIndices') model.vertex_indices = index diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py new file mode 100644 index 0000000000..3f6cb3a1a4 --- /dev/null +++ b/nibabel/cifti2/tests/test_axes.py @@ -0,0 +1,650 @@ +import numpy as np +import pytest +from .test_cifti2io_axes import check_rewrite +import nibabel.cifti2.cifti2_axes as axes +from copy import deepcopy + + +rand_affine = np.random.randn(4, 4) +vol_shape = (5, 10, 3) +use_label = {0: ('something', (0.2, 0.4, 0.1, 0.5)), 1: ('even better', (0.3, 0.8, 0.43, 0.9))} + + +def get_brain_models(): + """ + Generates a set of practice BrainModelAxis axes + + Yields + ------ + BrainModelAxis axis + """ + mask = np.zeros(vol_shape) + mask[0, 1, 2] = 1 + mask[0, 4, 2] = True + mask[0, 4, 0] = True + yield axes.BrainModelAxis.from_mask(mask, 'ThalamusRight', rand_affine) + mask[0, 0, 0] = True + yield axes.BrainModelAxis.from_mask(mask, affine=rand_affine) + + yield axes.BrainModelAxis.from_surface([0, 5, 10], 15, 'CortexLeft') + yield axes.BrainModelAxis.from_surface([0, 5, 10, 13], 15) + + surface_mask = np.zeros(15, dtype='bool') + surface_mask[[2, 9, 14]] = True + yield axes.BrainModelAxis.from_mask(surface_mask, name='CortexRight') + + +def get_parcels(): + """ + Generates a practice Parcel axis out of all practice brain models + + Returns + ------- + Parcel axis + """ + bml = list(get_brain_models()) + return axes.ParcelsAxis.from_brain_models([('mixed', bml[0] + bml[2]), ('volume', bml[1]), ('surface', bml[3])]) + + +def get_scalar(): + """ + Generates a practice ScalarAxis axis with names ('one', 'two', 'three') + + Returns + ------- + ScalarAxis axis + """ + return axes.ScalarAxis(['one', 'two', 'three']) + + +def get_label(): + """ + Generates a practice LabelAxis axis with names ('one', 'two', 'three') and two labels + + Returns + ------- + LabelAxis axis + """ + return axes.LabelAxis(['one', 'two', 'three'], use_label) + + +def get_series(): + """ + Generates a set of 4 practice SeriesAxis axes with different starting times/lengths/time steps and units + + Yields + ------ + SeriesAxis axis + """ + yield axes.SeriesAxis(3, 10, 4) + yield axes.SeriesAxis(8, 10, 3) + yield axes.SeriesAxis(3, 2, 4) + yield axes.SeriesAxis(5, 10, 5, "HERTZ") + + +def get_axes(): + """ + Iterates through all of the practice axes defined in the functions above + + Yields + ------ + Cifti2 axis + """ + yield get_parcels() + yield get_scalar() + yield get_label() + for elem in get_brain_models(): + yield elem + for elem in get_series(): + yield elem + + +def test_brain_models(): + """ + Tests the introspection and creation of CIFTI-2 BrainModelAxis axes + """ + bml = list(get_brain_models()) + assert len(bml[0]) == 3 + assert (bml[0].vertex == -1).all() + assert (bml[0].voxel == [[0, 1, 2], [0, 4, 0], [0, 4, 2]]).all() + assert bml[0][1][0] == 'CIFTI_MODEL_TYPE_VOXELS' + assert (bml[0][1][1] == [0, 4, 0]).all() + assert bml[0][1][2] == axes.BrainModelAxis.to_cifti_brain_structure_name('thalamus_right') + assert len(bml[1]) == 4 + assert (bml[1].vertex == -1).all() + assert (bml[1].voxel == [[0, 0, 0], [0, 1, 2], [0, 4, 0], [0, 4, 2]]).all() + assert len(bml[2]) == 3 + assert (bml[2].voxel == -1).all() + assert (bml[2].vertex == [0, 5, 10]).all() + assert bml[2][1] == ('CIFTI_MODEL_TYPE_SURFACE', 5, 'CIFTI_STRUCTURE_CORTEX_LEFT') + assert len(bml[3]) == 4 + assert (bml[3].voxel == -1).all() + assert (bml[3].vertex == [0, 5, 10, 13]).all() + assert bml[4][1] == ('CIFTI_MODEL_TYPE_SURFACE', 9, 'CIFTI_STRUCTURE_CORTEX_RIGHT') + assert len(bml[4]) == 3 + assert (bml[4].voxel == -1).all() + assert (bml[4].vertex == [2, 9, 14]).all() + + for bm, label, is_surface in zip(bml, ['ThalamusRight', 'Other', 'cortex_left', 'Other'], + (False, False, True, True)): + assert np.all(bm.surface_mask == ~bm.volume_mask) + structures = list(bm.iter_structures()) + assert len(structures) == 1 + name = structures[0][0] + assert name == axes.BrainModelAxis.to_cifti_brain_structure_name(label) + if is_surface: + assert bm.nvertices[name] == 15 + else: + assert name not in bm.nvertices + assert (bm.affine == rand_affine).all() + assert bm.volume_shape == vol_shape + + bmt = bml[0] + bml[1] + bml[2] + assert len(bmt) == 10 + structures = list(bmt.iter_structures()) + assert len(structures) == 3 + for bm, (name, _, bm_split) in zip(bml[:3], structures): + assert bm == bm_split + assert (bm_split.name == name).all() + assert bm == bmt[bmt.name == bm.name[0]] + assert bm == bmt[np.where(bmt.name == bm.name[0])] + + bmt = bmt + bml[2] + assert len(bmt) == 13 + structures = list(bmt.iter_structures()) + assert len(structures) == 3 + assert len(structures[-1][2]) == 6 + + # break brain model + bmt.affine = np.eye(4) + with pytest.raises(ValueError): + bmt.affine = np.eye(3) + with pytest.raises(ValueError): + bmt.affine = np.eye(4).flatten() + + bmt.volume_shape = (5, 3, 1) + with pytest.raises(ValueError): + bmt.volume_shape = (5., 3, 1) + with pytest.raises(ValueError): + bmt.volume_shape = (5, 3, 1, 4) + + with pytest.raises(IndexError): + bmt['thalamus_left'] + + # Test the constructor + bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + assert np.all(bm_vox.name == ['CIFTI_STRUCTURE_THALAMUS_LEFT'] * 5) + assert np.array_equal(bm_vox.vertex, np.full(5, -1)) + assert np.array_equal(bm_vox.voxel, np.full((5, 3), 1)) + with pytest.raises(ValueError): + # no volume shape + axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4)) + with pytest.raises(ValueError): + # no affine + axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), volume_shape=(2, 3, 4)) + with pytest.raises(ValueError): + # incorrect name + axes.BrainModelAxis('random_name', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + with pytest.raises(ValueError): + # negative voxel indices + axes.BrainModelAxis('thalamus_left', voxel=-np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + with pytest.raises(ValueError): + # no voxels or vertices + axes.BrainModelAxis('thalamus_left', affine=np.eye(4), volume_shape=(2, 3, 4)) + with pytest.raises(ValueError): + # incorrect voxel shape + axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 2), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + + bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + assert np.array_equal(bm_vertex.name, ['CIFTI_STRUCTURE_CORTEX_LEFT'] * 5) + assert np.array_equal(bm_vertex.vertex, np.full(5, 1)) + assert np.array_equal(bm_vertex.voxel, np.full((5, 3), -1)) + with pytest.raises(ValueError): + axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int)) + with pytest.raises(ValueError): + axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_right': 20}) + with pytest.raises(ValueError): + axes.BrainModelAxis('cortex_left', vertex=-np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + + # test from_mask errors + with pytest.raises(ValueError): + # affine should be 4x4 matrix + axes.BrainModelAxis.from_mask(np.arange(5) > 2, affine=np.ones(5)) + with pytest.raises(ValueError): + # only 1D or 3D masks accepted + axes.BrainModelAxis.from_mask(np.ones((5, 3))) + + # tests error in adding together or combining as ParcelsAxis + bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), volume_shape=(2, 3, 4)) + bm_vox + bm_vox + assert (bm_vertex + bm_vox)[:bm_vertex.size] == bm_vertex + assert (bm_vox + bm_vertex)[:bm_vox.size] == bm_vox + for bm_added in (bm_vox + bm_vertex, bm_vertex + bm_vox): + assert bm_added.nvertices == bm_vertex.nvertices + assert np.all(bm_added.affine == bm_vox.affine) + assert bm_added.volume_shape == bm_vox.volume_shape + + axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_vox)]) + with pytest.raises(Exception): + bm_vox + get_label() + + bm_other_shape = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), volume_shape=(4, 3, 4)) + with pytest.raises(ValueError): + bm_vox + bm_other_shape + with pytest.raises(ValueError): + axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_other_shape)]) + bm_other_affine = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4) * 2, volume_shape=(2, 3, 4)) + with pytest.raises(ValueError): + bm_vox + bm_other_affine + with pytest.raises(ValueError): + axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_other_affine)]) + + bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + bm_other_number = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 30}) + with pytest.raises(ValueError): + bm_vertex + bm_other_number + with pytest.raises(ValueError): + axes.ParcelsAxis.from_brain_models([('a', bm_vertex), ('b', bm_other_number)]) + + # test equalities + bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), volume_shape=(2, 3, 4)) + bm_other = deepcopy(bm_vox) + assert bm_vox == bm_other + bm_other.voxel[1, 0] = 0 + assert bm_vox != bm_other + + bm_other = deepcopy(bm_vox) + bm_other.vertex[1] = 10 + assert bm_vox == bm_other, 'vertices are ignored in volumetric BrainModelAxis' + + bm_other = deepcopy(bm_vox) + bm_other.name[1] = 'BRAIN_STRUCTURE_OTHER' + assert bm_vox != bm_other + + bm_other = deepcopy(bm_vox) + bm_other.affine[0, 0] = 10 + assert bm_vox != bm_other + + bm_other = deepcopy(bm_vox) + bm_other.affine = None + assert bm_vox != bm_other + assert bm_other != bm_vox + + bm_other = deepcopy(bm_vox) + bm_other.volume_shape = (10, 3, 4) + assert bm_vox != bm_other + + bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + bm_other = deepcopy(bm_vertex) + assert bm_vertex == bm_other + bm_other.voxel[1, 0] = 0 + assert bm_vertex == bm_other, 'voxels are ignored in surface BrainModelAxis' + + bm_other = deepcopy(bm_vertex) + bm_other.vertex[1] = 10 + assert bm_vertex != bm_other + + bm_other = deepcopy(bm_vertex) + bm_other.name[1] = 'BRAIN_STRUCTURE_CORTEX_RIGHT' + assert bm_vertex != bm_other + + bm_other = deepcopy(bm_vertex) + bm_other.nvertices['BRAIN_STRUCTURE_CORTEX_LEFT'] = 50 + assert bm_vertex != bm_other + + bm_other = deepcopy(bm_vertex) + bm_other.nvertices['BRAIN_STRUCTURE_CORTEX_RIGHT'] = 20 + assert bm_vertex != bm_other + + assert bm_vox != get_parcels() + assert bm_vertex != get_parcels() + + +def test_parcels(): + """ + Test the introspection and creation of CIFTI-2 Parcel axes + """ + prc = get_parcels() + assert isinstance(prc, axes.ParcelsAxis) + assert prc[0] == ('mixed', ) + prc['mixed'] + assert prc['mixed'][0].shape == (3, 3) + assert len(prc['mixed'][1]) == 1 + assert prc['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3, ) + + assert prc[1] == ('volume', ) + prc['volume'] + assert prc['volume'][0].shape == (4, 3) + assert len(prc['volume'][1]) == 0 + + assert prc[2] == ('surface', ) + prc['surface'] + assert prc['surface'][0].shape == (0, 3) + assert len(prc['surface'][1]) == 1 + assert prc['surface'][1]['CIFTI_STRUCTURE_OTHER'].shape == (4, ) + + prc2 = prc + prc + assert len(prc2) == 6 + assert (prc2.affine == prc.affine).all() + assert (prc2.nvertices == prc.nvertices) + assert (prc2.volume_shape == prc.volume_shape) + assert prc2[:3] == prc + assert prc2[3:] == prc + + assert prc2[3:]['mixed'][0].shape == (3, 3) + assert len(prc2[3:]['mixed'][1]) == 1 + assert prc2[3:]['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3, ) + + with pytest.raises(IndexError): + prc['non_existent'] + + prc['surface'] + with pytest.raises(IndexError): + # parcel exists twice + prc2['surface'] + + # break parcels + prc.affine = np.eye(4) + with pytest.raises(ValueError): + prc.affine = np.eye(3) + with pytest.raises(ValueError): + prc.affine = np.eye(4).flatten() + + prc.volume_shape = (5, 3, 1) + with pytest.raises(ValueError): + prc.volume_shape = (5., 3, 1) + with pytest.raises(ValueError): + prc.volume_shape = (5, 3, 1, 4) + + # break adding of parcels + with pytest.raises(Exception): + prc + get_label() + + prc = get_parcels() + other_prc = get_parcels() + prc + other_prc + + other_prc = get_parcels() + other_prc.affine = np.eye(4) * 2 + with pytest.raises(ValueError): + prc + other_prc + + other_prc = get_parcels() + other_prc.volume_shape = (20, 3, 4) + with pytest.raises(ValueError): + prc + other_prc + + # test parcel equalities + prc = get_parcels() + assert prc != get_scalar() + + prc_other = deepcopy(prc) + assert prc == prc_other + assert prc != prc_other[:2] + assert prc == prc_other[:] + prc_other.affine[0, 0] = 10 + assert prc != prc_other + + prc_other = deepcopy(prc) + prc_other.affine = None + assert prc != prc_other + assert prc_other != prc + assert (prc + prc_other).affine is not None + assert (prc_other + prc).affine is not None + + prc_other = deepcopy(prc) + prc_other.volume_shape = (10, 3, 4) + assert prc != prc_other + with pytest.raises(ValueError): + prc + prc_other + + prc_other = deepcopy(prc) + prc_other.nvertices['CIFTI_STRUCTURE_CORTEX_LEFT'] = 80 + assert prc != prc_other + with pytest.raises(ValueError): + prc + prc_other + + prc_other = deepcopy(prc) + prc_other.voxels[0] = np.ones((2, 3), dtype='i4') + assert prc != prc_other + + prc_other = deepcopy(prc) + prc_other.voxels[0] = prc_other.voxels * 2 + assert prc != prc_other + + prc_other = deepcopy(prc) + prc_other.vertices[0]['CIFTI_STRUCTURE_CORTEX_LEFT'] = np.ones((8, ), dtype='i4') + assert prc != prc_other + + prc_other = deepcopy(prc) + prc_other.vertices[0]['CIFTI_STRUCTURE_CORTEX_LEFT'] *= 2 + assert prc != prc_other + + prc_other = deepcopy(prc) + prc_other.name[0] = 'new_name' + assert prc != prc_other + + # test direct initialisation + axes.ParcelsAxis( + voxels=[np.ones((3, 2), dtype=int)], + vertices=[{}], + name=['single_voxel'], + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) + + with pytest.raises(ValueError): + axes.ParcelsAxis( + voxels=[np.ones((3, 2), dtype=int)], + vertices=[{}], + name=[['single_voxel']], # wrong shape name array + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) + + +def test_scalar(): + """ + Test the introspection and creation of CIFTI-2 ScalarAxis axes + """ + sc = get_scalar() + assert len(sc) == 3 + assert isinstance(sc, axes.ScalarAxis) + assert (sc.name == ['one', 'two', 'three']).all() + assert (sc.meta == [{}] * 3).all() + assert sc[1] == ('two', {}) + sc2 = sc + sc + assert len(sc2) == 6 + assert (sc2.name == ['one', 'two', 'three', 'one', 'two', 'three']).all() + assert (sc2.meta == [{}] * 6).all() + assert sc2[:3] == sc + assert sc2[3:] == sc + + sc.meta[1]['a'] = 3 + assert 'a' not in sc.meta + + # test equalities + assert sc != get_label() + with pytest.raises(Exception): + sc + get_label() + + sc_other = deepcopy(sc) + assert sc == sc_other + assert sc != sc_other[:2] + assert sc == sc_other[:] + sc_other.name[0] = 'new_name' + assert sc != sc_other + + sc_other = deepcopy(sc) + sc_other.meta[0]['new_key'] = 'new_entry' + assert sc != sc_other + sc.meta[0]['new_key'] = 'new_entry' + assert sc == sc_other + + # test constructor + assert axes.ScalarAxis(['scalar_name'], [{}]) == axes.ScalarAxis(['scalar_name']) + + with pytest.raises(ValueError): + axes.ScalarAxis([['scalar_name']]) # wrong shape + + with pytest.raises(ValueError): + axes.ScalarAxis(['scalar_name'], [{}, {}]) # wrong size + + +def test_label(): + """ + Test the introspection and creation of CIFTI-2 ScalarAxis axes + """ + lab = get_label() + assert len(lab) == 3 + assert isinstance(lab, axes.LabelAxis) + assert (lab.name == ['one', 'two', 'three']).all() + assert (lab.meta == [{}] * 3).all() + assert (lab.label == [use_label] * 3).all() + assert lab[1] == ('two', use_label, {}) + lab2 = lab + lab + assert len(lab2) == 6 + assert (lab2.name == ['one', 'two', 'three', 'one', 'two', 'three']).all() + assert (lab2.meta == [{}] * 6).all() + assert (lab2.label == [use_label] * 6).all() + assert lab2[:3] == lab + assert lab2[3:] == lab + + # test equalities + lab = get_label() + assert lab != get_scalar() + with pytest.raises(Exception): + lab + get_scalar() + + other_lab = deepcopy(lab) + assert lab != other_lab[:2] + assert lab == other_lab[:] + other_lab.name[0] = 'new_name' + assert lab != other_lab + + other_lab = deepcopy(lab) + other_lab.meta[0]['new_key'] = 'new_item' + assert 'new_key' not in other_lab.meta[1] + assert lab != other_lab + lab.meta[0]['new_key'] = 'new_item' + assert lab == other_lab + + other_lab = deepcopy(lab) + other_lab.label[0][20] = ('new_label', (0, 0, 0, 1)) + assert lab != other_lab + assert 20 not in other_lab.label[1] + lab.label[0][20] = ('new_label', (0, 0, 0, 1)) + assert lab == other_lab + + # test constructor + assert axes.LabelAxis(['scalar_name'], [{}], [{}]) == axes.LabelAxis(['scalar_name'], [{}]) + + with pytest.raises(ValueError): + axes.LabelAxis([['scalar_name']], [{}]) # wrong shape + + with pytest.raises(ValueError): + axes.LabelAxis(['scalar_name'], [{}, {}]) # wrong size + + +def test_series(): + """ + Test the introspection and creation of CIFTI-2 SeriesAxis axes + """ + sr = list(get_series()) + assert sr[0].unit == 'SECOND' + assert sr[1].unit == 'SECOND' + assert sr[2].unit == 'SECOND' + assert sr[3].unit == 'HERTZ' + sr[0].unit = 'hertz' + assert sr[0].unit == 'HERTZ' + with pytest.raises(ValueError): + sr[0].unit = 'non_existent' + + sr = list(get_series()) + assert (sr[0].time == np.arange(4) * 10 + 3).all() + assert (sr[1].time == np.arange(3) * 10 + 8).all() + assert (sr[2].time == np.arange(4) * 2 + 3).all() + assert ((sr[0] + sr[1]).time == np.arange(7) * 10 + 3).all() + assert ((sr[1] + sr[0]).time == np.arange(7) * 10 + 8).all() + assert ((sr[1] + sr[0] + sr[0]).time == np.arange(11) * 10 + 8).all() + assert sr[1][2] == 28 + assert sr[1][-2] == sr[1].time[-2] + + with pytest.raises(ValueError): + sr[0] + sr[2] + with pytest.raises(ValueError): + sr[2] + sr[1] + with pytest.raises(ValueError): + sr[0] + sr[3] + with pytest.raises(ValueError): + sr[3] + sr[1] + with pytest.raises(ValueError): + sr[3] + sr[2] + + # test slicing + assert (sr[0][1:3].time == sr[0].time[1:3]).all() + assert (sr[0][1:].time == sr[0].time[1:]).all() + assert (sr[0][:-2].time == sr[0].time[:-2]).all() + assert (sr[0][1:-1].time == sr[0].time[1:-1]).all() + assert (sr[0][1:-1:2].time == sr[0].time[1:-1:2]).all() + assert (sr[0][::2].time == sr[0].time[::2]).all() + assert (sr[0][:10:2].time == sr[0].time[::2]).all() + assert (sr[0][10:].time == sr[0].time[10:]).all() + assert (sr[0][10:12].time == sr[0].time[10:12]).all() + assert (sr[0][10::-1].time == sr[0].time[10::-1]).all() + assert (sr[0][3:1:-1].time == sr[0].time[3:1:-1]).all() + assert (sr[0][1:3:-1].time == sr[0].time[1:3:-1]).all() + + with pytest.raises(IndexError): + assert sr[0][[0, 1]] + with pytest.raises(IndexError): + assert sr[0][20] + with pytest.raises(IndexError): + assert sr[0][-20] + + # test_equalities + sr = next(get_series()) + with pytest.raises(Exception): + sr + get_scalar() + assert sr != sr[:2] + assert sr == sr[:] + + for key, value in ( + ('start', 20), + ('step', 7), + ('size', 14), + ('unit', 'HERTZ'), + ): + sr_other = deepcopy(sr) + assert sr == sr_other + setattr(sr_other, key, value) + assert sr != sr_other + + +def test_writing(): + """ + Tests the writing and reading back in of custom created CIFTI-2 axes + """ + for ax1 in get_axes(): + for ax2 in get_axes(): + arr = np.random.randn(len(ax1), len(ax2)) + check_rewrite(arr, (ax1, ax2)) + + +def test_common_interface(): + """ + Tests the common interface for all custom created CIFTI-2 axes + """ + for axis1, axis2 in zip(get_axes(), get_axes()): + assert axis1 == axis2 + concatenated = axis1 + axis2 + assert axis1 != concatenated + assert axis1 == concatenated[:axis1.size] + if isinstance(axis1, axes.SeriesAxis): + assert axis2 != concatenated[axis1.size:] + else: + assert axis2 == concatenated[axis1.size:] + + assert len(axis1) == axis1.size + diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index ce71b92bcc..0d3d550a66 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -1,4 +1,4 @@ -""" Testing CIFTI2 objects +""" Testing CIFTI-2 objects """ import collections from xml.etree import ElementTree @@ -9,7 +9,7 @@ from nibabel.nifti2 import Nifti2Header from nibabel.cifti2.cifti2 import _float_01, _value_if_klass, Cifti2HeaderError -from nose.tools import assert_true, assert_equal, assert_raises, assert_is_none +import pytest from nibabel.tests.test_dataobj_images import TestDataobjAPI as _TDA @@ -27,244 +27,287 @@ def compare_xml_leaf(str1, str2): def test_value_if_klass(): - assert_equal(_value_if_klass(None, list), None) - assert_equal(_value_if_klass([1], list), [1]) - assert_raises(ValueError, _value_if_klass, 1, list) + assert _value_if_klass(None, list) is None + assert _value_if_klass([1], list) == [1] + with pytest.raises(ValueError): + _value_if_klass(1, list) def test_cifti2_metadata(): md = ci.Cifti2MetaData(metadata={'a': 'aval'}) - assert_equal(len(md), 1) - assert_equal(list(iter(md)), ['a']) - assert_equal(md['a'], 'aval') - assert_equal(md.data, dict([('a', 'aval')])) + assert len(md) == 1 + assert list(iter(md)) == ['a'] + assert md['a'] == 'aval' + assert md.data == dict([('a', 'aval')]) md = ci.Cifti2MetaData() - assert_equal(len(md), 0) - assert_equal(list(iter(md)), []) - assert_equal(md.data, {}) - assert_raises(ValueError, md.difference_update, None) + assert len(md) == 0 + assert list(iter(md)) == [] + assert md.data == {} + with pytest.raises(ValueError): + md.difference_update(None) md['a'] = 'aval' - assert_equal(md['a'], 'aval') - assert_equal(len(md), 1) - assert_equal(md.data, dict([('a', 'aval')])) + assert md['a'] == 'aval' + assert len(md) == 1 + assert md.data == dict([('a', 'aval')]) del md['a'] - assert_equal(len(md), 0) + assert len(md) == 0 metadata_test = [('a', 'aval'), ('b', 'bval')] md.update(metadata_test) - assert_equal(md.data, dict(metadata_test)) + assert md.data == dict(metadata_test) - assert_equal(list(iter(md)), list(iter(collections.OrderedDict(metadata_test)))) + assert list(iter(md)) == list(iter(collections.OrderedDict(metadata_test))) md.update({'a': 'aval', 'b': 'bval'}) - assert_equal(md.data, dict(metadata_test)) + assert md.data == dict(metadata_test) md.update({'a': 'aval', 'd': 'dval'}) - assert_equal(md.data, dict(metadata_test + [('d', 'dval')])) + assert md.data == dict(metadata_test + [('d', 'dval')]) md.difference_update({'a': 'aval', 'd': 'dval'}) - assert_equal(md.data, dict(metadata_test[1:])) + assert md.data == dict(metadata_test[1:]) - assert_raises(KeyError, md.difference_update, {'a': 'aval', 'd': 'dval'}) - assert_equal(md.to_xml().decode('utf-8'), - 'bbval') + with pytest.raises(KeyError): + md.difference_update({'a': 'aval', 'd': 'dval'}) + assert md.to_xml().decode('utf-8') == 'bbval' def test__float_01(): - assert_equal(_float_01(0), 0) - assert_equal(_float_01(1), 1) - assert_equal(_float_01('0'), 0) - assert_equal(_float_01('0.2'), 0.2) - assert_raises(ValueError, _float_01, 1.1) - assert_raises(ValueError, _float_01, -0.1) - assert_raises(ValueError, _float_01, 2) - assert_raises(ValueError, _float_01, -1) - assert_raises(ValueError, _float_01, 'foo') + assert _float_01(0) == 0 + assert _float_01(1) == 1 + assert _float_01('0') == 0 + assert _float_01('0.2') == 0.2 + with pytest.raises(ValueError): + _float_01(1.1) + with pytest.raises(ValueError): + _float_01(-0.1) + with pytest.raises(ValueError): + _float_01(2) + with pytest.raises(ValueError): + _float_01(-1) + with pytest.raises(ValueError): + _float_01('foo') def test_cifti2_labeltable(): lt = ci.Cifti2LabelTable() - assert_equal(len(lt), 0) - assert_raises(ci.Cifti2HeaderError, lt.to_xml) - assert_raises(ci.Cifti2HeaderError, lt._to_xml_element) + assert len(lt) == 0 + with pytest.raises(ci.Cifti2HeaderError): + lt.to_xml() + with pytest.raises(ci.Cifti2HeaderError): + lt._to_xml_element() + label = ci.Cifti2Label(label='Test', key=0) lt[0] = label - assert_equal(len(lt), 1) - assert_equal(dict(lt), {label.key: label}) + assert len(lt) == 1 + assert dict(lt) == {label.key: label} lt.clear() lt.append(label) - assert_equal(len(lt), 1) - assert_equal(dict(lt), {label.key: label}) + assert len(lt) == 1 + assert dict(lt) == {label.key: label} lt.clear() test_tuple = (label.label, label.red, label.green, label.blue, label.alpha) lt[label.key] = test_tuple - assert_equal(len(lt), 1) + assert len(lt) == 1 v = lt[label.key] - assert_equal( - (v.label, v.red, v.green, v.blue, v.alpha), - test_tuple - ) + assert (v.label, v.red, v.green, v.blue, v.alpha) == test_tuple + + with pytest.raises(ValueError): + lt[1] = label + + with pytest.raises(ValueError): + lt[0] = test_tuple[:-1] + + with pytest.raises(ValueError): + lt[0] = ('foo', 1.1, 0, 0, 1) + + with pytest.raises(ValueError): + lt[0] = ('foo', 1.0, -1, 0, 1) + + with pytest.raises(ValueError): + lt[0] = ('foo', 1.0, 0, -0.1, 1) - assert_raises(ValueError, lt.__setitem__, 1, label) - assert_raises(ValueError, lt.__setitem__, 0, test_tuple[:-1]) - assert_raises(ValueError, lt.__setitem__, 0, ('foo', 1.1, 0, 0, 1)) - assert_raises(ValueError, lt.__setitem__, 0, ('foo', 1.0, -1, 0, 1)) - assert_raises(ValueError, lt.__setitem__, 0, ('foo', 1.0, 0, -0.1, 1)) def test_cifti2_label(): lb = ci.Cifti2Label() lb.label = 'Test' lb.key = 0 - assert_equal(lb.rgba, (0, 0, 0, 0)) - assert_true(compare_xml_leaf( - lb.to_xml().decode('utf-8'), - "" - )) + assert lb.rgba == (0, 0, 0, 0) + assert compare_xml_leaf(lb.to_xml().decode('utf-8'), + "") lb.red = 0 lb.green = 0.1 lb.blue = 0.2 lb.alpha = 0.3 - assert_equal(lb.rgba, (0, 0.1, 0.2, 0.3)) + assert lb.rgba == (0, 0.1, 0.2, 0.3) - assert_true(compare_xml_leaf( - lb.to_xml().decode('utf-8'), - "" - )) + assert compare_xml_leaf(lb.to_xml().decode('utf-8'), + "") lb.red = 10 - assert_raises(ci.Cifti2HeaderError, lb.to_xml) + with pytest.raises(ci.Cifti2HeaderError): + lb.to_xml() lb.red = 0 lb.key = 'a' - assert_raises(ci.Cifti2HeaderError, lb.to_xml) + with pytest.raises(ci.Cifti2HeaderError): + lb.to_xml() lb.key = 0 def test_cifti2_parcel(): pl = ci.Cifti2Parcel() - assert_raises(ci.Cifti2HeaderError, pl.to_xml) - assert_raises(TypeError, pl.append_cifti_vertices, None) + with pytest.raises(ci.Cifti2HeaderError): + pl.to_xml() + + with pytest.raises(TypeError): + pl.append_cifti_vertices(None) + + with pytest.raises(ValueError): + ci.Cifti2Parcel(vertices=[1, 2, 3]) - assert_raises(ValueError, ci.Cifti2Parcel, **{'vertices': [1, 2, 3]}) pl = ci.Cifti2Parcel(name='region', voxel_indices_ijk=ci.Cifti2VoxelIndicesIJK([[1, 2, 3]]), vertices=[ci.Cifti2Vertices([0, 1, 2])]) pl.pop_cifti2_vertices(0) - assert_equal(len(pl.vertices), 0) - assert_equal( - pl.to_xml().decode('utf-8'), - '1 2 3' - ) + + assert len(pl.vertices) == 0 + assert pl.to_xml().decode('utf-8') == '1 2 3' def test_cifti2_vertices(): vs = ci.Cifti2Vertices() - assert_raises(ci.Cifti2HeaderError, vs.to_xml) + with pytest.raises(ci.Cifti2HeaderError): + vs.to_xml() + vs.brain_structure = 'CIFTI_STRUCTURE_OTHER' - assert_equal( - vs.to_xml().decode('utf-8'), - '' - ) - assert_equal(len(vs), 0) + + assert vs.to_xml().decode('utf-8') == '' + + assert len(vs) == 0 vs.extend(np.array([0, 1, 2])) - assert_equal(len(vs), 3) - assert_raises(ValueError, vs.__setitem__, 1, 'a') - assert_raises(ValueError, vs.insert, 1, 'a') - assert_equal( - vs.to_xml().decode('utf-8'), - '0 1 2' - ) + assert len(vs) == 3 + with pytest.raises(ValueError): + vs[1] = 'a' + with pytest.raises(ValueError): + vs.insert(1, 'a') + + assert vs.to_xml().decode('utf-8') == '0 1 2' vs[0] = 10 - assert_equal(vs[0], 10) - assert_equal(len(vs), 3) + assert vs[0] == 10 + assert len(vs) == 3 vs = ci.Cifti2Vertices(vertices=[0, 1, 2]) - assert_equal(len(vs), 3) + assert len(vs) == 3 def test_cifti2_transformationmatrixvoxelindicesijktoxyz(): tr = ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ() - assert_raises(ci.Cifti2HeaderError, tr.to_xml) + with pytest.raises(ci.Cifti2HeaderError): + tr.to_xml() def test_cifti2_surface(): s = ci.Cifti2Surface() - assert_raises(ci.Cifti2HeaderError, s.to_xml) + with pytest.raises(ci.Cifti2HeaderError): + s.to_xml() def test_cifti2_volume(): vo = ci.Cifti2Volume() - assert_raises(ci.Cifti2HeaderError, vo.to_xml) + with pytest.raises(ci.Cifti2HeaderError): + vo.to_xml() def test_cifti2_vertexindices(): vi = ci.Cifti2VertexIndices() - assert_equal(len(vi), 0) - assert_raises(ci.Cifti2HeaderError, vi.to_xml) + assert len(vi) == 0 + with pytest.raises(ci.Cifti2HeaderError): + vi.to_xml() vi.extend(np.array([0, 1, 2])) - assert_equal(len(vi), 3) - assert_equal( - vi.to_xml().decode('utf-8'), - '0 1 2' - ) - assert_raises(ValueError, vi.__setitem__, 0, 'a') + assert len(vi) == 3 + assert vi.to_xml().decode('utf-8') == '0 1 2' + + with pytest.raises(ValueError): + vi[0] = 'a' + vi[0] = 10 - assert_equal(vi[0], 10) - assert_equal(len(vi), 3) + assert vi[0] == 10 + assert len(vi) == 3 def test_cifti2_voxelindicesijk(): vi = ci.Cifti2VoxelIndicesIJK() - assert_raises(ci.Cifti2HeaderError, vi.to_xml) + with pytest.raises(ci.Cifti2HeaderError): + vi.to_xml() vi = ci.Cifti2VoxelIndicesIJK() - assert_equal(len(vi), 0) - assert_raises(ci.Cifti2HeaderError, vi.to_xml) + assert len(vi) == 0 + + with pytest.raises(ci.Cifti2HeaderError): + vi.to_xml() vi.extend(np.array([[0, 1, 2]])) - assert_equal(len(vi), 1) - assert_equal(vi[0], [0, 1, 2]) + + assert len(vi) == 1 + assert vi[0] == [0, 1, 2] vi.append([3, 4, 5]) - assert_equal(len(vi), 2) + assert len(vi) == 2 vi.append([6, 7, 8]) - assert_equal(len(vi), 3) + assert len(vi) == 3 del vi[-1] - assert_equal(len(vi), 2) + assert len(vi) == 2 - assert_equal(vi[1], [3, 4, 5]) + assert vi[1] == [3, 4, 5] vi[1] = [3, 4, 6] - assert_equal(vi[1], [3, 4, 6]) - assert_raises(ValueError, vi.__setitem__, 'a', [1, 2, 3]) - assert_raises(TypeError, vi.__setitem__, [1, 2], [1, 2, 3]) - assert_raises(ValueError, vi.__setitem__, 1, [2, 3]) - assert_equal(vi[1, 1], 4) - assert_raises(ValueError, vi.__setitem__, [1, 1], 'a') - assert_equal(vi[0, 1:], [1, 2]) + assert vi[1] == [3, 4, 6] + with pytest.raises(ValueError): + vi['a'] = [1, 2, 3] + + with pytest.raises(TypeError): + vi[[1, 2]] = [1, 2, 3] + + with pytest.raises(ValueError): + vi[1] = [2, 3] + + assert vi[1, 1] == 4 + + with pytest.raises(ValueError): + vi[[1, 1]] = 'a' + + assert vi[0, 1:] == [1, 2] vi[0, 1] = 10 - assert_equal(vi[0, 1], 10) + assert vi[0, 1] == 10 vi[0, 1] = 1 #test for vi[:, 0] and other slices - assert_raises(NotImplementedError, vi.__getitem__, (slice(None), 0)) - assert_raises(NotImplementedError, vi.__setitem__, (slice(None), 0), 0) - assert_raises(NotImplementedError, vi.__delitem__, (slice(None), 0)) - assert_raises(ValueError, vi.__getitem__, (0, 0, 0)) - assert_raises(ValueError, vi.__setitem__, (0, 0, 0), 0) - - assert_equal( - vi.to_xml().decode('utf-8'), - '0 1 2\n3 4 6' - ) - assert_raises(TypeError, ci.Cifti2VoxelIndicesIJK, [0, 1]) + with pytest.raises(NotImplementedError): + vi[:, 0] + with pytest.raises(NotImplementedError): + vi[:, 0] = 0 + with pytest.raises(NotImplementedError): + # Don't know how to use remove with slice + del vi[:, 0] + with pytest.raises(ValueError): + vi[0, 0, 0] + + with pytest.raises(ValueError): + vi[0, 0, 0] = 0 + + assert vi.to_xml().decode('utf-8') == '0 1 2\n3 4 6' + + with pytest.raises(TypeError): + ci.Cifti2VoxelIndicesIJK([0, 1]) + vi = ci.Cifti2VoxelIndicesIJK([[1, 2, 3]]) - assert_equal(len(vi), 1) + assert len(vi) == 1 def test_matrixindicesmap(): @@ -273,59 +316,77 @@ def test_matrixindicesmap(): volume2 = ci.Cifti2Volume() parcel = ci.Cifti2Parcel() - assert_is_none(mim.volume) + assert mim.volume is None mim.append(volume) mim.append(parcel) - assert_equal(mim.volume, volume) - assert_raises(ci.Cifti2HeaderError, mim.insert, 0, volume) - assert_raises(ci.Cifti2HeaderError, mim.__setitem__, 1, volume) + assert mim.volume == volume + with pytest.raises(ci.Cifti2HeaderError): + mim.insert(0, volume) + + with pytest.raises(ci.Cifti2HeaderError): + mim[1] = volume mim[0] = volume2 - assert_equal(mim.volume, volume2) + assert mim.volume == volume2 del mim.volume - assert_is_none(mim.volume) - assert_raises(ValueError, delattr, mim, 'volume') + assert mim.volume is None + with pytest.raises(ValueError): + del mim.volume mim.volume = volume - assert_equal(mim.volume, volume) + assert mim.volume == volume mim.volume = volume2 - assert_equal(mim.volume, volume2) + assert mim.volume == volume2 - assert_raises(ValueError, setattr, mim, 'volume', parcel) + with pytest.raises(ValueError): + mim.volume = parcel def test_matrix(): m = ci.Cifti2Matrix() - assert_raises(TypeError, m, setattr, 'metadata', ci.Cifti2Parcel()) - assert_raises(TypeError, m.__setitem__, 0, ci.Cifti2Parcel()) - assert_raises(TypeError, m.insert, 0, ci.Cifti2Parcel()) + + with pytest.raises(ValueError): + m.metadata = ci.Cifti2Parcel() + + with pytest.raises(TypeError): + m[0] = ci.Cifti2Parcel() + + with pytest.raises(TypeError): + m.insert(0, ci.Cifti2Parcel()) mim_none = ci.Cifti2MatrixIndicesMap(None, 'CIFTI_INDEX_TYPE_LABELS') mim_0 = ci.Cifti2MatrixIndicesMap(0, 'CIFTI_INDEX_TYPE_LABELS') mim_1 = ci.Cifti2MatrixIndicesMap(1, 'CIFTI_INDEX_TYPE_LABELS') mim_01 = ci.Cifti2MatrixIndicesMap([0, 1], 'CIFTI_INDEX_TYPE_LABELS') - assert_raises(ci.Cifti2HeaderError, m.insert, 0, mim_none) - assert_equal(m.mapped_indices, []) + with pytest.raises(ci.Cifti2HeaderError): + m.insert(0, mim_none) + + assert m.mapped_indices == [] h = ci.Cifti2Header(matrix=m) - assert_equal(m.mapped_indices, []) + assert m.mapped_indices == [] m.insert(0, mim_0) - assert_equal(h.mapped_indices, [0]) - assert_equal(h.number_of_mapped_indices, 1) - assert_raises(ci.Cifti2HeaderError, m.insert, 0, mim_0) - assert_raises(ci.Cifti2HeaderError, m.insert, 0, mim_01) + assert h.mapped_indices == [0] + assert h.number_of_mapped_indices == 1 + with pytest.raises(ci.Cifti2HeaderError): + m.insert(0, mim_0) + + with pytest.raises(ci.Cifti2HeaderError): + m.insert(0, mim_01) + m[0] = mim_1 - assert_equal(list(m.mapped_indices), [1]) + assert list(m.mapped_indices) == [1] m.insert(0, mim_0) - assert_equal(list(sorted(m.mapped_indices)), [0, 1]) - assert_equal(h.number_of_mapped_indices, 2) - assert_equal(h.get_index_map(0), mim_0) - assert_equal(h.get_index_map(1), mim_1) - assert_raises(ci.Cifti2HeaderError, h.get_index_map, 2) + assert list(sorted(m.mapped_indices)) == [0, 1] + assert h.number_of_mapped_indices == 2 + assert h.get_index_map(0) == mim_0 + assert h.get_index_map(1) == mim_1 + with pytest.raises(ci.Cifti2HeaderError): + h.get_index_map(2) def test_underscoring(): @@ -342,7 +403,7 @@ def test_underscoring(): ) for camel, underscored in pairs: - assert_equal(ci.cifti2._underscore(camel), underscored) + assert ci.cifti2._underscore(camel) == underscored class TestCifti2ImageAPI(_TDA): @@ -358,4 +419,10 @@ class TestCifti2ImageAPI(_TDA): standard_extension = '.nii' def make_imaker(self, arr, header=None, ni_header=None): + for idx, sz in enumerate(arr.shape): + maps = [ci.Cifti2NamedMap(str(value)) for value in range(sz)] + mim = ci.Cifti2MatrixIndicesMap( + (idx, ), 'CIFTI_INDEX_TYPE_SCALARS', maps=maps + ) + header.matrix.append(mim) return lambda: self.image_maker(arr.copy(), header, ni_header) diff --git a/nibabel/cifti2/tests/test_cifti2io_axes.py b/nibabel/cifti2/tests/test_cifti2io_axes.py new file mode 100644 index 0000000000..c237e3c61a --- /dev/null +++ b/nibabel/cifti2/tests/test_cifti2io_axes.py @@ -0,0 +1,180 @@ +from nibabel.cifti2 import cifti2_axes, cifti2 +from nibabel.tests.nibabel_data import get_nibabel_data, needs_nibabel_data +import nibabel as nib +import os +import numpy as np +import tempfile + +test_directory = os.path.join(get_nibabel_data(), 'nitest-cifti2') + +hcp_labels = ['CortexLeft', 'CortexRight', 'AccumbensLeft', 'AccumbensRight', 'AmygdalaLeft', 'AmygdalaRight', + 'brain_stem', 'CaudateLeft', 'CaudateRight', 'CerebellumLeft', 'CerebellumRight', + 'Diencephalon_ventral_left', 'Diencephalon_ventral_right', 'HippocampusLeft', 'HippocampusRight', + 'PallidumLeft', 'PallidumRight', 'PutamenLeft', 'PutamenRight', 'ThalamusLeft', 'ThalamusRight'] + +hcp_n_elements = [29696, 29716, 135, 140, 315, 332, 3472, 728, 755, 8709, 9144, 706, + 712, 764, 795, 297, 260, 1060, 1010, 1288, 1248] + +hcp_affine = np.array([[ -2., 0., 0., 90.], + [ 0., 2., 0., -126.], + [ 0., 0., 2., -72.], + [ 0., 0., 0., 1.]]) + + +def check_hcp_grayordinates(brain_model): + """Checks that a BrainModelAxis matches the expected 32k HCP grayordinates + """ + assert isinstance(brain_model, cifti2_axes.BrainModelAxis) + structures = list(brain_model.iter_structures()) + assert len(structures) == len(hcp_labels) + idx_start = 0 + for idx, (name, _, bm), label, nel in zip(range(len(structures)), structures, hcp_labels, hcp_n_elements): + if idx < 2: + assert name in bm.nvertices.keys() + assert (bm.voxel == -1).all() + assert (bm.vertex != -1).any() + assert bm.nvertices[name] == 32492 + else: + assert name not in bm.nvertices.keys() + assert (bm.voxel != -1).any() + assert (bm.vertex == -1).all() + assert (bm.affine == hcp_affine).all() + assert bm.volume_shape == (91, 109, 91) + assert name == cifti2_axes.BrainModelAxis.to_cifti_brain_structure_name(label) + assert len(bm) == nel + assert (bm.name == brain_model.name[idx_start:idx_start + nel]).all() + assert (bm.voxel == brain_model.voxel[idx_start:idx_start + nel]).all() + assert (bm.vertex == brain_model.vertex[idx_start:idx_start + nel]).all() + idx_start += nel + assert idx_start == len(brain_model) + + assert (brain_model.vertex[:5] == np.arange(5)).all() + assert structures[0][2].vertex[-1] == 32491 + assert structures[1][2].vertex[0] == 0 + assert structures[1][2].vertex[-1] == 32491 + assert structures[-1][2].name[-1] == brain_model.name[-1] + assert (structures[-1][2].voxel[-1] == brain_model.voxel[-1]).all() + assert structures[-1][2].vertex[-1] == brain_model.vertex[-1] + assert (brain_model.voxel[-1] == [38, 55, 46]).all() + assert (brain_model.voxel[70000] == [56, 22, 19]).all() + + +def check_Conte69(brain_model): + """Checks that the BrainModelAxis matches the expected Conte69 surface coordinates + """ + assert isinstance(brain_model, cifti2_axes.BrainModelAxis) + structures = list(brain_model.iter_structures()) + assert len(structures) == 2 + assert structures[0][0] == 'CIFTI_STRUCTURE_CORTEX_LEFT' + assert structures[0][2].surface_mask.all() + assert structures[1][0] == 'CIFTI_STRUCTURE_CORTEX_RIGHT' + assert structures[1][2].surface_mask.all() + assert (brain_model.voxel == -1).all() + + assert (brain_model.vertex[:5] == np.arange(5)).all() + assert structures[0][2].vertex[-1] == 32491 + assert structures[1][2].vertex[0] == 0 + assert structures[1][2].vertex[-1] == 32491 + + +def check_rewrite(arr, axes, extension='.nii'): + """ + Checks wheter writing the Cifti2 array to disc and reading it back in gives the same object + + Parameters + ---------- + arr : array + N-dimensional array of data + axes : Sequence[cifti2_axes.Axis] + sequence of length N with the meaning of the rows/columns along each dimension + extension : str + custom extension to use + """ + (fd, name) = tempfile.mkstemp(extension) + cifti2.Cifti2Image(arr, header=axes).to_filename(name) + img = nib.load(name) + arr2 = img.get_fdata() + assert np.allclose(arr, arr2) + for idx in range(len(img.shape)): + assert (axes[idx] == img.header.get_axis(idx)) + return img + + +@needs_nibabel_data('nitest-cifti2') +def test_read_ones(): + img = nib.load(os.path.join(test_directory, 'ones.dscalar.nii')) + arr = img.get_fdata() + axes = [img.header.get_axis(dim) for dim in range(2)] + assert (arr == 1).all() + assert isinstance(axes[0], cifti2_axes.ScalarAxis) + assert len(axes[0]) == 1 + assert axes[0].name[0] == 'ones' + assert axes[0].meta[0] == {} + check_hcp_grayordinates(axes[1]) + img = check_rewrite(arr, axes) + check_hcp_grayordinates(img.header.get_axis(1)) + + +@needs_nibabel_data('nitest-cifti2') +def test_read_conte69_dscalar(): + img = nib.load(os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dscalar.nii')) + arr = img.get_fdata() + axes = [img.header.get_axis(dim) for dim in range(2)] + assert isinstance(axes[0], cifti2_axes.ScalarAxis) + assert len(axes[0]) == 2 + assert axes[0].name[0] == 'MyelinMap_BC_decurv' + assert axes[0].name[1] == 'corrThickness' + assert axes[0].meta[0] == {'PaletteColorMapping': '\n MODE_AUTO_SCALE_PERCENTAGE\n 98.000000 2.000000 2.000000 98.000000\n -100.000000 0.000000 0.000000 100.000000\n ROY-BIG-BL\n true\n true\n false\n true\n THRESHOLD_TEST_SHOW_OUTSIDE\n THRESHOLD_TYPE_OFF\n false\n -1.000000 1.000000\n -1.000000 1.000000\n -1.000000 1.000000\n \n PALETTE_THRESHOLD_RANGE_MODE_MAP\n'} + check_Conte69(axes[1]) + check_rewrite(arr, axes) + + +@needs_nibabel_data('nitest-cifti2') +def test_read_conte69_dtseries(): + img = nib.load(os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dtseries.nii')) + arr = img.get_fdata() + axes = [img.header.get_axis(dim) for dim in range(2)] + assert isinstance(axes[0], cifti2_axes.SeriesAxis) + assert len(axes[0]) == 2 + assert axes[0].start == 0 + assert axes[0].step == 1 + assert axes[0].size == arr.shape[0] + assert (axes[0].time == [0, 1]).all() + check_Conte69(axes[1]) + check_rewrite(arr, axes) + + +@needs_nibabel_data('nitest-cifti2') +def test_read_conte69_dlabel(): + img = nib.load(os.path.join(test_directory, 'Conte69.parcellations_VGD11b.32k_fs_LR.dlabel.nii')) + arr = img.get_fdata() + axes = [img.header.get_axis(dim) for dim in range(2)] + assert isinstance(axes[0], cifti2_axes.LabelAxis) + assert len(axes[0]) == 3 + assert (axes[0].name == ['Composite Parcellation-lh (FRB08_OFP03_retinotopic)', + 'Brodmann lh (from colin.R via pals_R-to-fs_LR)', 'MEDIAL WALL lh (fs_LR)']).all() + assert axes[0].label[1][70] == ('19_B05', (1.0, 0.867, 0.467, 1.0)) + assert (axes[0].meta == [{}] * 3).all() + check_Conte69(axes[1]) + check_rewrite(arr, axes) + + +@needs_nibabel_data('nitest-cifti2') +def test_read_conte69_ptseries(): + img = nib.load(os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.ptseries.nii')) + arr = img.get_fdata() + axes = [img.header.get_axis(dim) for dim in range(2)] + assert isinstance(axes[0], cifti2_axes.SeriesAxis) + assert len(axes[0]) == 2 + assert axes[0].start == 0 + assert axes[0].step == 1 + assert axes[0].size == arr.shape[0] + assert (axes[0].time == [0, 1]).all() + + assert len(axes[1]) == 54 + voxels, vertices = axes[1]['ER_FRB08'] + assert voxels.shape == (0, 3) + assert len(vertices) == 2 + assert vertices['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (206 // 2, ) + assert vertices['CIFTI_STRUCTURE_CORTEX_RIGHT'].shape == (206 // 2, ) + check_rewrite(arr, axes) diff --git a/nibabel/cifti2/tests/test_cifti2io.py b/nibabel/cifti2/tests/test_cifti2io_header.py similarity index 74% rename from nibabel/cifti2/tests/test_cifti2io.py rename to nibabel/cifti2/tests/test_cifti2io_header.py index 521e112847..0fef5ccd78 100644 --- a/nibabel/cifti2/tests/test_cifti2io.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -6,7 +6,6 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from __future__ import division, print_function, absolute_import from os.path import join as pjoin, dirname import io @@ -22,7 +21,7 @@ from nibabel.tests.test_nifti2 import TestNifti2SingleHeader from numpy.testing import assert_array_almost_equal -from nose.tools import (assert_true, assert_equal, assert_raises) +import pytest NIBABEL_TEST_DATA = pjoin(dirname(nib.__file__), 'tests', 'data') NIFTI2_DATA = pjoin(NIBABEL_TEST_DATA, 'example_nifti2.nii.gz') @@ -43,39 +42,40 @@ def test_read_nifti2(): - # Error trying to read a CIFTI2 image from a NIfTI2-only image. + # Error trying to read a CIFTI-2 image from a NIfTI2-only image. filemap = ci.Cifti2Image.make_file_map() for k in filemap: filemap[k].fileobj = io.open(NIFTI2_DATA) - assert_raises(ValueError, ci.Cifti2Image.from_file_map, filemap) + with pytest.raises(ValueError): + ci.Cifti2Image.from_file_map(filemap) @needs_nibabel_data('nitest-cifti2') def test_read_internal(): img2 = ci.load(DATA_FILE6) - assert_true(isinstance(img2.header, ci.Cifti2Header)) - assert_equal(img2.shape, (1, 91282)) + assert isinstance(img2.header, ci.Cifti2Header) + assert img2.shape == (1, 91282) @needs_nibabel_data('nitest-cifti2') def test_read_and_proxies(): img2 = nib.load(DATA_FILE6) - assert_true(isinstance(img2.header, ci.Cifti2Header)) - assert_equal(img2.shape, (1, 91282)) + assert isinstance(img2.header, ci.Cifti2Header) + assert img2.shape == (1, 91282) # While we cannot reshape arrayproxies, all images are in-memory - assert_true(not img2.in_memory) - data = img2.get_data() - assert_true(data is not img2.dataobj) + assert not img2.in_memory + data = img2.get_fdata() + assert data is not img2.dataobj # Uncaching has no effect, images are always array images img2.uncache() - assert_true(data is not img2.get_data()) + assert data is not img2.get_fdata() @needs_nibabel_data('nitest-cifti2') def test_version(): for i, dat in enumerate(datafiles): img = nib.load(dat) - assert_equal(LooseVersion(img.header.version), LooseVersion('2')) + assert LooseVersion(img.header.version) == LooseVersion('2') @needs_nibabel_data('nitest-cifti2') @@ -85,8 +85,7 @@ def test_readwritedata(): img = ci.load(name) ci.save(img, 'test.nii') img2 = ci.load('test.nii') - assert_equal(len(img.header.matrix), - len(img2.header.matrix)) + assert len(img.header.matrix) == len(img2.header.matrix) # Order should be preserved in load/save for mim1, mim2 in zip(img.header.matrix, img2.header.matrix): @@ -94,14 +93,14 @@ def test_readwritedata(): if isinstance(m_, ci.Cifti2NamedMap)] named_maps2 = [m_ for m_ in mim2 if isinstance(m_, ci.Cifti2NamedMap)] - assert_equal(len(named_maps1), len(named_maps2)) + assert len(named_maps1) == len(named_maps2) for map1, map2 in zip(named_maps1, named_maps2): - assert_equal(map1.map_name, map2.map_name) + assert map1.map_name == map2.map_name if map1.label_table is None: - assert_true(map2.label_table is None) + assert map2.label_table is None else: - assert_equal(len(map1.label_table), - len(map2.label_table)) + assert len(map1.label_table) == len(map2.label_table) + assert_array_almost_equal(img.dataobj, img2.dataobj) @@ -112,8 +111,7 @@ def test_nibabel_readwritedata(): img = nib.load(name) nib.save(img, 'test.nii') img2 = nib.load('test.nii') - assert_equal(len(img.header.matrix), - len(img2.header.matrix)) + assert len(img.header.matrix) == len(img2.header.matrix) # Order should be preserved in load/save for mim1, mim2 in zip(img.header.matrix, img2.header.matrix): @@ -121,14 +119,13 @@ def test_nibabel_readwritedata(): if isinstance(m_, ci.Cifti2NamedMap)] named_maps2 = [m_ for m_ in mim2 if isinstance(m_, ci.Cifti2NamedMap)] - assert_equal(len(named_maps1), len(named_maps2)) + assert len(named_maps1) == len(named_maps2) for map1, map2 in zip(named_maps1, named_maps2): - assert_equal(map1.map_name, map2.map_name) + assert map1.map_name == map2.map_name if map1.label_table is None: - assert_true(map2.label_table is None) + assert map2.label_table is None else: - assert_equal(len(map1.label_table), - len(map2.label_table)) + assert len(map1.label_table) == len(map2.label_table) assert_array_almost_equal(img.dataobj, img2.dataobj) @@ -153,10 +150,10 @@ def test_cifti2types(): for name in datafiles: hdr = ci.load(name).header # Matrix and MetaData aren't conditional, so don't bother counting - assert_true(isinstance(hdr.matrix, ci.Cifti2Matrix)) - assert_true(isinstance(hdr.matrix.metadata, ci.Cifti2MetaData)) + assert isinstance(hdr.matrix, ci.Cifti2Matrix) + assert isinstance(hdr.matrix.metadata, ci.Cifti2MetaData) for mim in hdr.matrix: - assert_true(isinstance(mim, ci.Cifti2MatrixIndicesMap)) + assert isinstance(mim, ci.Cifti2MatrixIndicesMap) counter[ci.Cifti2MatrixIndicesMap] += 1 for map_ in mim: print(map_) @@ -169,21 +166,20 @@ def test_cifti2types(): counter[ci.Cifti2VoxelIndicesIJK] += 1 elif isinstance(map_, ci.Cifti2NamedMap): counter[ci.Cifti2NamedMap] += 1 - assert_true(isinstance(map_.metadata, ci.Cifti2MetaData)) + assert isinstance(map_.metadata, ci.Cifti2MetaData) if isinstance(map_.label_table, ci.Cifti2LabelTable): counter[ci.Cifti2LabelTable] += 1 for label in map_.label_table: - assert_true(isinstance(map_.label_table[label], - ci.Cifti2Label)) + assert isinstance(map_.label_table[label], ci.Cifti2Label) counter[ci.Cifti2Label] += 1 elif isinstance(map_, ci.Cifti2Parcel): counter[ci.Cifti2Parcel] += 1 if isinstance(map_.voxel_indices_ijk, ci.Cifti2VoxelIndicesIJK): counter[ci.Cifti2VoxelIndicesIJK] += 1 - assert_true(isinstance(map_.vertices, list)) + assert isinstance(map_.vertices, list) for vtcs in map_.vertices: - assert_true(isinstance(vtcs, ci.Cifti2Vertices)) + assert isinstance(vtcs, ci.Cifti2Vertices) counter[ci.Cifti2Vertices] += 1 elif isinstance(map_, ci.Cifti2Surface): counter[ci.Cifti2Surface] += 1 @@ -193,19 +189,14 @@ def test_cifti2types(): ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ): counter[ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ] += 1 - assert_equal(list(mim.named_maps), - [m_ for m_ in mim if isinstance(m_, ci.Cifti2NamedMap)]) - assert_equal(list(mim.surfaces), - [m_ for m_ in mim if isinstance(m_, ci.Cifti2Surface)]) - assert_equal(list(mim.parcels), - [m_ for m_ in mim if isinstance(m_, ci.Cifti2Parcel)]) - assert_equal(list(mim.brain_models), - [m_ for m_ in mim if isinstance(m_, ci.Cifti2BrainModel)]) - assert_equal([mim.volume] if mim.volume else [], - [m_ for m_ in mim if isinstance(m_, ci.Cifti2Volume)]) + assert list(mim.named_maps) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2NamedMap)] + assert list(mim.surfaces) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2Surface)] + assert list(mim.parcels) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2Parcel)] + assert list(mim.brain_models) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2BrainModel)] + assert ([mim.volume] if mim.volume else []) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2Volume)] for klass, count in counter.items(): - assert_true(count > 0, "No exercise of " + klass.__name__) + assert count > 0, "No exercise of " + klass.__name__ @needs_nibabel_data('nitest-cifti2') @@ -238,34 +229,34 @@ def test_read_geometry(): ('CIFTI_STRUCTURE_THALAMUS_RIGHT', 1248, [32, 47, 34], [38, 55, 46])] current_index = 0 for from_file, expected in zip(geometry_mapping.brain_models, expected_geometry): - assert_true(from_file.model_type in ("CIFTI_MODEL_TYPE_SURFACE", "CIFTI_MODEL_TYPE_VOXELS")) - assert_equal(from_file.brain_structure, expected[0]) - assert_equal(from_file.index_offset, current_index) - assert_equal(from_file.index_count, expected[1]) + assert from_file.model_type in ("CIFTI_MODEL_TYPE_SURFACE", "CIFTI_MODEL_TYPE_VOXELS") + assert from_file.brain_structure == expected[0] + assert from_file.index_offset == current_index + assert from_file.index_count == expected[1] current_index += from_file.index_count if from_file.model_type == 'CIFTI_MODEL_TYPE_SURFACE': - assert_equal(from_file.voxel_indices_ijk, None) - assert_equal(len(from_file.vertex_indices), expected[1]) - assert_equal(from_file.vertex_indices[0], expected[2]) - assert_equal(from_file.vertex_indices[-1], expected[3]) - assert_equal(from_file.surface_number_of_vertices, 32492) + assert from_file.voxel_indices_ijk is None + assert len(from_file.vertex_indices) == expected[1] + assert from_file.vertex_indices[0] == expected[2] + assert from_file.vertex_indices[-1] == expected[3] + assert from_file.surface_number_of_vertices == 32492 else: - assert_equal(from_file.vertex_indices, None) - assert_equal(from_file.surface_number_of_vertices, None) - assert_equal(len(from_file.voxel_indices_ijk), expected[1]) - assert_equal(from_file.voxel_indices_ijk[0], expected[2]) - assert_equal(from_file.voxel_indices_ijk[-1], expected[3]) - assert_equal(current_index, img.shape[1]) + assert from_file.vertex_indices is None + assert from_file.surface_number_of_vertices is None + assert len(from_file.voxel_indices_ijk) == expected[1] + assert from_file.voxel_indices_ijk[0] == expected[2] + assert from_file.voxel_indices_ijk[-1] == expected[3] + assert current_index == img.shape[1] expected_affine = [[-2, 0, 0, 90], [ 0, 2, 0, -126], [ 0, 0, 2, -72], [ 0, 0, 0, 1]] expected_dimensions = (91, 109, 91) - assert_true((geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix == - expected_affine).all()) - assert_equal(geometry_mapping.volume.volume_dimensions, expected_dimensions) + assert (geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix == + expected_affine).all() + assert geometry_mapping.volume.volume_dimensions == expected_dimensions @needs_nibabel_data('nitest-cifti2') @@ -328,18 +319,18 @@ def test_read_parcels(): ('ER_FRB08', ((103, 21514, 26470), (103, 21514, 26470))), ('13b_OFP03', ((60, 21042, 21194), (71, 21040, 21216)))] - assert_equal(img.shape[1], len(expected_parcels)) - assert_equal(len(list(parcel_mapping.parcels)), len(expected_parcels)) + assert img.shape[1] == len(expected_parcels) + assert len(list(parcel_mapping.parcels)) == len(expected_parcels) for (name, expected_surfaces), parcel in zip(expected_parcels, parcel_mapping.parcels): - assert_equal(parcel.name, name) - assert_equal(len(parcel.vertices), 2) + assert parcel.name == name + assert len(parcel.vertices) == 2 for vertices, orientation, (length, first_element, last_element) in zip(parcel.vertices, ('LEFT', 'RIGHT'), expected_surfaces): - assert_equal(len(vertices), length) - assert_equal(vertices[0], first_element) - assert_equal(vertices[-1], last_element) - assert_equal(vertices.brain_structure, 'CIFTI_STRUCTURE_CORTEX_%s' % orientation) + assert len(vertices) == length + assert vertices[0] == first_element + assert vertices[-1] == last_element + assert vertices.brain_structure == 'CIFTI_STRUCTURE_CORTEX_%s' % orientation @needs_nibabel_data('nitest-cifti2') @@ -348,31 +339,31 @@ def test_read_scalar(): scalar_mapping = img.header.matrix.get_index_map(0) expected_names = ('MyelinMap_BC_decurv', 'corrThickness') - assert_equal(img.shape[0], len(expected_names)) - assert_equal(len(list(scalar_mapping.named_maps)), len(expected_names)) + assert img.shape[0] == len(expected_names) + assert len(list(scalar_mapping.named_maps)) == len(expected_names) expected_meta = [('PaletteColorMapping', '\n = LooseVersion("1.14"): + legacy_printopt = np.get_printoptions().get("legacy") + np.set_printoptions(legacy="1.13") + yield + np.set_printoptions(legacy=legacy_printopt) + else: + yield diff --git a/nibabel/data.py b/nibabel/data.py index 2a53f15f64..6208ebe7d5 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -8,7 +8,7 @@ from os.path import join as pjoin import glob import sys -from six.moves import configparser +import configparser from distutils.version import LooseVersion from .environment import get_nipy_user_dir, get_nipy_system_dir diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 86185a7aef..4d86810d5d 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -55,6 +55,11 @@ def dataobj(self): def _data(self): return self._dataobj + @deprecate_with_version('get_data() is deprecated in favor of get_fdata(),' + ' which has a more predictable return type. To ' + 'obtain get_data() behavior going forward, use ' + 'numpy.asanyarray(img.dataobj).', + '3.0', '5.0') def get_data(self, caching='fill'): """ Return image data from image with any necessary scaling applied @@ -62,8 +67,8 @@ def get_data(self, caching='fill'): We recommend you use the ``get_fdata`` method instead of the ``get_data`` method, because it is easier to predict the return - data type. We will deprecate the ``get_data`` method around April - 2018, and remove it around April 2020. + data type. ``get_data`` will be deprecated around November 2019 + and removed around November 2021. If you don't care about the predictability of the return data type, and you want the minimum possible data size in memory, you can @@ -344,7 +349,10 @@ def get_fdata(self, caching='fill', dtype=np.float64): if self._fdata_cache is not None: if self._fdata_cache.dtype.type == dtype.type: return self._fdata_cache - data = np.asanyarray(self._dataobj).astype(dtype, copy=False) + # Always return requested data type + # For array proxies, will attempt to confine data array to dtype + # during scaling + data = np.asanyarray(self._dataobj, dtype=dtype) if caching == 'fill': self._fdata_cache = data return data @@ -404,3 +412,80 @@ def get_shape(self): """ Return shape for image """ return self.shape + + @classmethod + def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): + ''' Class method to create image from mapping in ``file_map`` + + .. deprecated:: 2.4.1 + ``keep_file_open='auto'`` is redundant with `False` and has + been deprecated. It raises an error as of nibabel 3.0. + + Parameters + ---------- + file_map : dict + Mapping with (kay, value) pairs of (``file_type``, FileHolder + instance giving file-likes for each file needed for this image + type. + mmap : {True, False, 'c', 'r'}, optional, keyword only + `mmap` controls the use of numpy memory mapping for reading image + array data. If False, do not try numpy ``memmap`` for data array. + If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A + `mmap` value of True gives the same behavior as ``mmap='c'``. If + image data file cannot be memory-mapped, ignore `mmap` value and + read array from file. + keep_file_open : { None, True, False }, optional, keyword only + `keep_file_open` controls whether a new file handle is created + every time the image is accessed, or a single file handle is + created and used for the lifetime of this ``ArrayProxy``. If + ``True``, a single file handle is created and used. If ``False``, + a new file handle is created every time the image is accessed. + If ``file_map`` refers to an open file handle, this setting has no + effect. The default value (``None``) will result in the value of + ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. + + Returns + ------- + img : DataobjImage instance + ''' + raise NotImplementedError + + @classmethod + def from_filename(klass, filename, *, mmap=True, keep_file_open=None): + '''Class method to create image from filename `filename` + + .. deprecated:: 2.4.1 + ``keep_file_open='auto'`` is redundant with `False` and has + been deprecated. It raises an error as of nibabel 3.0. + + Parameters + ---------- + filename : str + Filename of image to load + mmap : {True, False, 'c', 'r'}, optional, keyword only + `mmap` controls the use of numpy memory mapping for reading image + array data. If False, do not try numpy ``memmap`` for data array. + If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A + `mmap` value of True gives the same behavior as ``mmap='c'``. If + image data file cannot be memory-mapped, ignore `mmap` value and + read array from file. + keep_file_open : { None, True, False }, optional, keyword only + `keep_file_open` controls whether a new file handle is created + every time the image is accessed, or a single file handle is + created and used for the lifetime of this ``ArrayProxy``. If + ``True``, a single file handle is created and used. If ``False``, + a new file handle is created every time the image is accessed. + The default value (``None``) will result in the value of + ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. + + Returns + ------- + img : DataobjImage instance + ''' + if mmap not in (True, False, 'c', 'r'): + raise ValueError("mmap should be one of {True, False, 'c', 'r'}") + file_map = klass.filespec_to_file_map(filename) + return klass.from_file_map(file_map, mmap=mmap, + keep_file_open=keep_file_open) + + load = from_filename diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index c8abee91a0..1a0f85330d 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -4,7 +4,7 @@ import warnings from .deprecator import Deprecator -from .info import cmp_pkg_version +from .pkg_info import cmp_pkg_version class ModuleProxy(object): @@ -20,10 +20,10 @@ class ModuleProxy(object): :: arr = np.arange(24).reshape((2, 3, 4)) - minc = ModuleProxy('nibabel.minc') - minc_image = minc.Minc1Image(arr, np.eye(4)) + nifti1 = ModuleProxy('nibabel.nifti1') + nifti1_image = nifti1.Nifti1Image(arr, np.eye(4)) - So, the ``minc`` object is a proxy that will import the required module + So, the ``nifti1`` object is a proxy that will import the required module when you do attribute access and return the attributes of the imported module. """ diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 32a7c6835c..a0b7b8535a 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -5,7 +5,7 @@ import warnings import re -_LEADING_WHITE = re.compile('^(\s*)') +_LEADING_WHITE = re.compile(r'^(\s*)') class ExpiredDeprecationError(RuntimeError): diff --git a/nibabel/dft.py b/nibabel/dft.py index 392856d4c1..7b39d35b81 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -10,7 +10,6 @@ """ DICOM filesystem tools """ -from __future__ import division, print_function, absolute_import import os from os.path import join as pjoin @@ -278,9 +277,7 @@ def __exit__(self, type, value, traceback): def _get_subdirs(base_dir, files_dict=None, followlinks=False): dirs = [] - # followlinks keyword not available for python 2.5. - kwargs = {} if not followlinks else {'followlinks': True} - for (dirpath, dirnames, filenames) in os.walk(base_dir, **kwargs): + for (dirpath, dirnames, filenames) in os.walk(base_dir, followlinks=followlinks): abs_dir = os.path.realpath(dirpath) if abs_dir in dirs: raise CachingError('link cycle detected under %s' % base_dir) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 8713fc4ea2..bf2c1ff03e 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -558,9 +558,9 @@ def _check_affines(self): affs = [self.get_frame_affine(i) for i in range(nframes)] if affs: i = iter(affs) - first = i.next() + first = next(i) for item in i: - if not np.all(first == item): + if not np.allclose(first, item): return False return True @@ -688,19 +688,32 @@ def ndim(self): def is_proxy(self): return True - def __array__(self): + def __array__(self, dtype=None): ''' Read of data from file This reads ALL FRAMES into one array, can be memory expensive. If you want to read only some slices, use the slicing syntax (``__getitem__``) below, or ``subheader.data_from_fileobj(frame)`` + + Parameters + ---------- + dtype : numpy dtype specifier, optional + A numpy dtype specifier specifying the type of the returned array. + + Returns + ------- + array + Scaled image data with type `dtype`. ''' + # dtype=None is interpreted as float64 data = np.empty(self.shape) frame_mapping = get_frame_order(self._subheader._mlist) for i in sorted(frame_mapping): data[:, :, :, i] = self._subheader.data_from_fileobj( frame_mapping[i][0]) + if dtype is not None: + data = data.astype(dtype, copy=False) return data def __getitem__(self, sliceobj): @@ -759,7 +772,7 @@ def __init__(self, dataobj, affine, header, Parameters ---------- - dataabj : array-like + dataobj : array-like image data affine : None or (4,4) array-like homogeneous affine giving relationship between voxel coords and @@ -787,7 +800,7 @@ def __init__(self, dataobj, affine, header, >>> frame0 = img.get_frame(0) >>> frame0.shape == (10, 10, 3) True - >>> data4d = img.get_data() + >>> data4d = img.get_fdata() >>> data4d.shape == (10, 10, 3, 1) True """ @@ -810,6 +823,7 @@ def __init__(self, dataobj, affine, header, file_map = self.__class__.make_file_map() self.file_map = file_map self._data_cache = None + self._fdata_cache = None @property def affine(self): @@ -873,7 +887,7 @@ def _get_fileholders(file_map): return file_map['header'], file_map['image'] @classmethod - def from_file_map(klass, file_map): + def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): """class method to create image from mapping specified in file_map """ @@ -942,7 +956,7 @@ def to_file_map(self, file_map=None): # It appears to be necessary to load the data before saving even if the # data itself is not used. - self.get_data() + self.get_fdata() hdr = self.header mlist = self._mlist subheaders = self.get_subheaders() diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index eac1c046ed..0928cd39d3 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -85,7 +85,7 @@ import math -from six.moves import reduce +from functools import reduce import numpy as np diff --git a/nibabel/externals/__init__.py b/nibabel/externals/__init__.py index 4c31772bb5..0eefb918c9 100644 --- a/nibabel/externals/__init__.py +++ b/nibabel/externals/__init__.py @@ -1,5 +1,2 @@ # init for externals package from collections import OrderedDict - -from ..deprecated import ModuleProxy as _ModuleProxy -six = _ModuleProxy('nibabel.externals.six') diff --git a/nibabel/externals/netcdf.py b/nibabel/externals/netcdf.py index e485533cd7..7adaf32dc1 100644 --- a/nibabel/externals/netcdf.py +++ b/nibabel/externals/netcdf.py @@ -10,13 +10,16 @@ NetCDF files. The same API is also used in the PyNIO and pynetcdf modules, allowing these modules to be used interchangeably when working with NetCDF files. + +Only NetCDF3 is supported here; for NetCDF4 see +`netCDF4-python `__, +which has a similar API. + """ -from __future__ import division, print_function, absolute_import # TODO: # * properly implement ``_FillValue``. -# * implement Jeff Whitaker's patch for masked variables. # * fix character variables. # * implement PAGESIZE for Python 2.6? @@ -29,20 +32,24 @@ # otherwise the key would be inserted into userspace attributes. -__all__ = ['netcdf_file'] +__all__ = ['netcdf_file', 'netcdf_variable'] +import sys +import warnings +import weakref from operator import mul -from mmap import mmap, ACCESS_READ +from collections import OrderedDict -import numpy as np # noqa -from ..py3k import asbytes, asstr -from numpy import frombuffer, ndarray, dtype, empty, array, asarray +import mmap as mm + +import numpy as np +from numpy.compat import asbytes, asstr +from numpy import frombuffer, dtype, empty, array, asarray from numpy import little_endian as LITTLE_ENDIAN from functools import reduce -from six import integer_types - +IS_PYPY = ('__pypy__' in sys.modules) ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00' ZERO = b'\x00\x00\x00\x00' @@ -55,27 +62,39 @@ NC_DIMENSION = b'\x00\x00\x00\n' NC_VARIABLE = b'\x00\x00\x00\x0b' NC_ATTRIBUTE = b'\x00\x00\x00\x0c' - +FILL_BYTE = b'\x81' +FILL_CHAR = b'\x00' +FILL_SHORT = b'\x80\x01' +FILL_INT = b'\x80\x00\x00\x01' +FILL_FLOAT = b'\x7C\xF0\x00\x00' +FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00' TYPEMAP = {NC_BYTE: ('b', 1), - NC_CHAR: ('c', 1), - NC_SHORT: ('h', 2), - NC_INT: ('i', 4), - NC_FLOAT: ('f', 4), - NC_DOUBLE: ('d', 8)} + NC_CHAR: ('c', 1), + NC_SHORT: ('h', 2), + NC_INT: ('i', 4), + NC_FLOAT: ('f', 4), + NC_DOUBLE: ('d', 8)} + +FILLMAP = {NC_BYTE: FILL_BYTE, + NC_CHAR: FILL_CHAR, + NC_SHORT: FILL_SHORT, + NC_INT: FILL_INT, + NC_FLOAT: FILL_FLOAT, + NC_DOUBLE: FILL_DOUBLE} REVERSE = {('b', 1): NC_BYTE, - ('B', 1): NC_CHAR, - ('c', 1): NC_CHAR, - ('h', 2): NC_SHORT, - ('i', 4): NC_INT, - ('f', 4): NC_FLOAT, - ('d', 8): NC_DOUBLE, + ('B', 1): NC_CHAR, + ('c', 1): NC_CHAR, + ('h', 2): NC_SHORT, + ('i', 4): NC_INT, + ('f', 4): NC_FLOAT, + ('d', 8): NC_DOUBLE, - # these come from asarray(1).dtype.char and asarray('foo').dtype.char, - # used when getting the types from generic attributes. - ('l', 4): NC_INT, - ('S', 1): NC_CHAR} + # these come from asarray(1).dtype.char and asarray('foo').dtype.char, + # used when getting the types from generic attributes. + ('l', 4): NC_INT, + ('S', 1): NC_CHAR} class netcdf_file(object): @@ -96,17 +115,22 @@ class netcdf_file(object): ---------- filename : string or file-like string -> filename - mode : {'r', 'w'}, optional - read-write mode, default is 'r' + mode : {'r', 'w', 'a'}, optional + read-write-append mode, default is 'r' mmap : None or bool, optional Whether to mmap `filename` when reading. Default is True when `filename` is a file name, False when `filename` is a - file-like object + file-like object. Note that when mmap is in use, data arrays + returned refer directly to the mmapped data on disk, and the + file cannot be closed as long as references to it exist. version : {1, 2}, optional version of netcdf to read / write, where 1 means *Classic format* and 2 means *64-bit offset format*. Default is 1. See - `here `_ + `here `__ for more info. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. Notes ----- @@ -117,7 +141,7 @@ class netcdf_file(object): NetCDF files are a self-describing binary data format. The file contains metadata that describes the dimensions and variables in the file. More details about NetCDF files can be found `here - `_. There + `__. There are three main sections to a NetCDF data structure: 1. Dimensions @@ -145,6 +169,13 @@ class netcdf_file(object): unnecessary data into memory. It uses the ``mmap`` module to create Numpy arrays mapped to the data on disk, for the same purpose. + Note that when `netcdf_file` is used to open a file with mmap=True + (default for read-only), arrays returned by it refer to data + directly on the disk. The file should not be closed, and cannot be cleanly + closed when asked, if such arrays are alive. You may want to copy data arrays + obtained from mmapped Netcdf file if they are to be processed after the file + is closed, see the example below. + Examples -------- To create a NetCDF file: @@ -166,9 +197,9 @@ class netcdf_file(object): >>> time.units = 'days since 2008-01-01' >>> f.close() - Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice + Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice of the time variable allows for the data to be set in the object, rather - than letting ``range(10)`` overwrite the ``time`` variable. + than letting ``arange(10)`` overwrite the ``time`` variable. To read the NetCDF file we just created: @@ -182,7 +213,22 @@ class netcdf_file(object): True >>> time[-1] 9 + + NetCDF files, when opened read-only, return arrays that refer + directly to memory-mapped data on disk: + + >>> data = time[:] + >>> data.base.base # doctest: +ELLIPSIS + + + If the data is to be processed after the file is closed, it needs + to be copied to main memory: + + >>> data = time[:].copy() + >>> del time # References to mmap'd objects can delay full closure >>> f.close() + >>> data.mean() + 4.5 A NetCDF file can also be used as context manager: @@ -192,12 +238,16 @@ class netcdf_file(object): Delete our temporary directory and file: - >>> del f, time # needed for windows unlink + >>> del f # needed for windows unlink >>> os.unlink(fname) >>> os.rmdir(tmp_pth) """ - def __init__(self, filename, mode='r', mmap=None, version=1): + def __init__(self, filename, mode='r', mmap=None, version=1, + maskandscale=False): """Initialize netcdf_file from fileobj (str or file-like).""" + if mode not in 'rwa': + raise ValueError("Mode must be either 'r', 'w' or 'a'.") + if hasattr(filename, 'seek'): # file-like self.fp = filename self.filename = 'None' @@ -207,34 +257,39 @@ def __init__(self, filename, mode='r', mmap=None, version=1): raise ValueError('Cannot use file object for mmap') else: # maybe it's a string self.filename = filename - self.fp = open(self.filename, '%sb' % mode) + omode = 'r+' if mode == 'a' else mode + self.fp = open(self.filename, '%sb' % omode) if mmap is None: - mmap = True - try: - self.fp.seek(0, 2) - except ValueError: - self.file_bytes = -1 # Unknown file length (gzip). - else: - self.file_bytes = self.fp.tell() - self.fp.seek(0) + # Mmapped files on PyPy cannot be usually closed + # before the GC runs, so it's better to use mmap=False + # as the default. + mmap = (not IS_PYPY) - self.use_mmap = mmap - self.version_byte = version + if mode != 'r': + # Cannot read write-only files + mmap = False - if not mode in 'rw': - raise ValueError("Mode must be either 'r' or 'w'.") + self.use_mmap = mmap self.mode = mode + self.version_byte = version + self.maskandscale = maskandscale - self.dimensions = {} - self.variables = {} + self.dimensions = OrderedDict() + self.variables = OrderedDict() self._dims = [] self._recs = 0 self._recsize = 0 - self._attributes = {} + self._mm = None + self._mm_buf = None + if self.use_mmap: + self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ) + self._mm_buf = np.frombuffer(self._mm, dtype=np.int8) + + self._attributes = OrderedDict() - if mode == 'r': + if mode in 'ra': self._read() def __setattr__(self, attr, value): @@ -248,10 +303,28 @@ def __setattr__(self, attr, value): def close(self): """Closes the NetCDF file.""" - if not self.fp.closed: + if hasattr(self, 'fp') and not self.fp.closed: try: self.flush() finally: + self.variables = OrderedDict() + if self._mm_buf is not None: + ref = weakref.ref(self._mm_buf) + self._mm_buf = None + if ref() is None: + # self._mm_buf is gc'd, and we can close the mmap + self._mm.close() + else: + # we cannot close self._mm, since self._mm_buf is + # alive and there may still be arrays referring to it + warnings.warn(( + "Cannot close a netcdf_file opened with mmap=True, when " + "netcdf_variables or arrays referring to its data still exist. " + "All data arrays obtained from such files refer directly to " + "data on disk, and must be copied before the file can be cleanly " + "closed. (See netcdf_file docstring for more information on mmap.)" + ), category=RuntimeWarning) + self._mm = None self.fp.close() __del__ = close @@ -281,6 +354,9 @@ def createDimension(self, name, length): createVariable """ + if length is None and self._dims: + raise ValueError("Only first dimension may be unlimited!") + self.dimensions[name] = length self._dims.append(name) @@ -324,7 +400,9 @@ def createVariable(self, name, type, dimensions): raise ValueError("NetCDF 3 does not support type %s" % type) data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3 - self.variables[name] = netcdf_variable(data, typecode, size, shape, dimensions) + self.variables[name] = netcdf_variable( + data, typecode, size, shape, dimensions, + maskandscale=self.maskandscale) return self.variables[name] def flush(self): @@ -336,7 +414,7 @@ def flush(self): sync : Identical function """ - if hasattr(self, 'mode') and self.mode is 'w': + if hasattr(self, 'mode') and self.mode in 'wa': self._write() sync = flush @@ -378,7 +456,7 @@ def _write_att_array(self, attributes): self._pack_int(len(attributes)) for name, values in attributes.items(): self._pack_string(name) - self._write_values(values) + self._write_att_values(values) else: self.fp.write(ABSENT) @@ -387,11 +465,13 @@ def _write_var_array(self): self.fp.write(NC_VARIABLE) self._pack_int(len(self.variables)) - # Sort variables non-recs first, then recs. We use a DSU - # since some people use pupynere with Python 2.3.x. - deco = [(v._shape and not v.isrec, k) for (k, v) in self.variables.items()] - deco.sort() - variables = [k for (unused, k) in deco][::-1] + # Sort variable names non-recs first, then recs. + def sortkey(n): + v = self.variables[n] + if v.isrec: + return (-1,) + return v._shape + variables = sorted(self.variables, key=sortkey, reverse=True) # Set the metadata for all variables. for name in variables: @@ -429,8 +509,8 @@ def _write_var_metadata(self, name): vsize = var.data[0].size * var.data.itemsize except IndexError: vsize = 0 - rec_vars = len([var for var in self.variables.values() - if var.isrec]) + rec_vars = len([v for v in self.variables.values() + if v.isrec]) if rec_vars > 1: vsize += -vsize % 4 self.variables[name].__dict__['_vsize'] = vsize @@ -453,12 +533,17 @@ def _write_var_data(self, name): if not var.isrec: self.fp.write(var.data.tostring()) count = var.data.size * var.data.itemsize - self.fp.write(b'0' * (var._vsize - count)) + self._write_var_padding(var, var._vsize - count) else: # record variable # Handle rec vars with shape[0] < nrecs. if self._recs > len(var.data): shape = (self._recs,) + var.data.shape[1:] - var.data.resize(shape) + # Resize in-place does not always work since + # the array might not be single-segment + try: + var.data.resize(shape) + except ValueError: + var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype) pos0 = pos = self.fp.tell() for rec in var.data: @@ -471,30 +556,42 @@ def _write_var_data(self, name): self.fp.write(rec.tostring()) # Padding count = rec.size * rec.itemsize - self.fp.write(b'0' * (var._vsize - count)) + self._write_var_padding(var, var._vsize - count) pos += self._recsize self.fp.seek(pos) self.fp.seek(pos0 + var._vsize) - def _write_values(self, values): + def _write_var_padding(self, var, size): + encoded_fill_value = var._get_encoded_fill_value() + num_fills = size // len(encoded_fill_value) + self.fp.write(encoded_fill_value * num_fills) + + def _write_att_values(self, values): if hasattr(values, 'dtype'): nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] else: - types = [(t, NC_INT) for t in integer_types] - types += [ + types = [ + (int, NC_INT), (float, NC_FLOAT), - (str, NC_CHAR), + (str, NC_CHAR) ] - try: - sample = values[0] - except TypeError: + # bytes index into scalars in py3k. Check for "string" types + if isinstance(values, (str, bytes)): sample = values + else: + try: + sample = values[0] # subscriptable? + except TypeError: + sample = values # scalar + for class_, nc_type in types: if isinstance(sample, class_): break typecode, size = TYPEMAP[nc_type] dtype_ = '>%s' % typecode + # asarray() dies with bytes and '>c' in py3k. Change to 'S' + dtype_ = 'S' if dtype_ == '>c' else dtype_ values = asarray(values, dtype=dtype_) @@ -511,7 +608,7 @@ def _write_values(self, values): values = values.byteswap() self.fp.write(values.tostring()) count = values.size * values.itemsize - self.fp.write(b'0' * (-count % 4)) # pad + self.fp.write(b'\x00' * (-count % 4)) # pad def _read(self): # Check magic bytes and version @@ -532,7 +629,7 @@ def _read_numrecs(self): def _read_dim_array(self): header = self.fp.read(4) - if not header in [ZERO, NC_DIMENSION]: + if header not in [ZERO, NC_DIMENSION]: raise ValueError("Unexpected header.") count = self._unpack_int() @@ -548,19 +645,19 @@ def _read_gatt_array(self): def _read_att_array(self): header = self.fp.read(4) - if not header in [ZERO, NC_ATTRIBUTE]: + if header not in [ZERO, NC_ATTRIBUTE]: raise ValueError("Unexpected header.") count = self._unpack_int() - attributes = {} + attributes = OrderedDict() for attr in range(count): name = asstr(self._unpack_string()) - attributes[name] = self._read_values() + attributes[name] = self._read_att_values() return attributes def _read_var_array(self): header = self.fp.read(4) - if not header in [ZERO, NC_VARIABLE]: + if header not in [ZERO, NC_VARIABLE]: raise ValueError("Unexpected header.") begin = 0 @@ -570,7 +667,7 @@ def _read_var_array(self): for var in range(count): (name, dimensions, shape, attributes, typecode, size, dtype_, begin_, vsize) = self._read_var() - # https://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html + # https://www.unidata.ucar.edu/software/netcdf/docs/user_guide.html # Note that vsize is the product of the dimension lengths # (omitting the record dimension) and the number of bytes # per value (determined from the type), increased to the @@ -607,28 +704,21 @@ def _read_var_array(self): else: # not a record variable # Calculate size to avoid problems with vsize (above) a_size = reduce(mul, shape, 1) * size - if self.file_bytes >= 0 and begin_ + a_size > self.file_bytes: - data = frombuffer(b'\x00'*a_size, dtype=dtype_) - elif self.use_mmap: - mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ) - data = ndarray.__new__(ndarray, shape, dtype=dtype_, - buffer=mm, offset=begin_, order=0) + if self.use_mmap: + data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_) + data.shape = shape else: pos = self.fp.tell() self.fp.seek(begin_) - # Try to read file, which may fail because the data is - # at or past the end of file. In that case, we treat - # this data as zeros. - buf = self.fp.read(a_size) - if len(buf) < a_size: - buf = b'\x00'*a_size - data = frombuffer(buf, dtype=dtype_) + data = frombuffer(self.fp.read(a_size), dtype=dtype_ + ).copy() data.shape = shape self.fp.seek(pos) # Add variable. self.variables[name] = netcdf_variable( - data, typecode, size, shape, dimensions, attributes) + data, typecode, size, shape, dimensions, attributes, + maskandscale=self.maskandscale) if rec_vars: # Remove padding when only one record variable. @@ -638,13 +728,13 @@ def _read_var_array(self): # Build rec array. if self.use_mmap: - mm = mmap(self.fp.fileno(), begin+self._recs*self._recsize, access=ACCESS_READ) - rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtypes, - buffer=mm, offset=begin, order=0) + rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes) + rec_array.shape = (self._recs,) else: pos = self.fp.tell() self.fp.seek(begin) - rec_array = frombuffer(self.fp.read(self._recs*self._recsize), dtype=dtypes) + rec_array = frombuffer(self.fp.read(self._recs*self._recsize), + dtype=dtypes).copy() rec_array.shape = (self._recs,) self.fp.seek(pos) @@ -676,7 +766,7 @@ def _read_var(self): return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize - def _read_values(self): + def _read_att_values(self): nc_type = self.fp.read(4) n = self._unpack_int() @@ -686,8 +776,8 @@ def _read_values(self): values = self.fp.read(int(count)) self.fp.read(-count % 4) # read padding - if typecode is not 'c': - values = frombuffer(values, dtype='>%s' % typecode) + if typecode != 'c': + values = frombuffer(values, dtype='>%s' % typecode).copy() if values.shape == (1,): values = values[0] else: @@ -718,7 +808,7 @@ def _pack_string(self, s): count = len(s) self._pack_int(count) self.fp.write(asbytes(s)) - self.fp.write(b'0' * (-count % 4)) # pad + self.fp.write(b'\x00' * (-count % 4)) # pad def _unpack_string(self): count = self._unpack_int() @@ -729,7 +819,7 @@ def _unpack_string(self): class netcdf_variable(object): """ - A data object for the `netcdf` module. + A data object for netcdf files. `netcdf_variable` objects are constructed by calling the method `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable` @@ -763,6 +853,9 @@ class netcdf_variable(object): attributes : dict, optional Attribute values (any type) keyed by string names. These attributes become attributes for the netcdf_variable object. + maskandscale : bool, optional + Whether to automatically scale and/or mask data based on attributes. + Default is False. Attributes @@ -777,14 +870,17 @@ class netcdf_variable(object): isrec, shape """ - def __init__(self, data, typecode, size, shape, dimensions, attributes=None): + def __init__(self, data, typecode, size, shape, dimensions, + attributes=None, + maskandscale=False): self.data = data self._typecode = typecode self._size = size self._shape = shape self.dimensions = dimensions + self.maskandscale = maskandscale - self._attributes = attributes or {} + self._attributes = attributes or OrderedDict() for k, v in self._attributes.items(): self.__dict__[k] = v @@ -806,7 +902,7 @@ def isrec(self): `netcdf_variable`. """ - return self.data.shape and not self._shape[0] + return bool(self.data.shape) and not self._shape[0] isrec = property(isrec) def shape(self): @@ -883,9 +979,36 @@ def itemsize(self): return self._size def __getitem__(self, index): - return self.data[index] + if not self.maskandscale: + return self.data[index] + + data = self.data[index].copy() + missing_value = self._get_missing_value() + data = self._apply_missing_value(data, missing_value) + scale_factor = self._attributes.get('scale_factor') + add_offset = self._attributes.get('add_offset') + if add_offset is not None or scale_factor is not None: + data = data.astype(np.float64) + if scale_factor is not None: + data = data * scale_factor + if add_offset is not None: + data += add_offset + + return data def __setitem__(self, index, data): + if self.maskandscale: + missing_value = ( + self._get_missing_value() or + getattr(data, 'fill_value', 999999)) + self._attributes.setdefault('missing_value', missing_value) + self._attributes.setdefault('_FillValue', missing_value) + data = ((data - self._attributes.get('add_offset', 0.0)) / + self._attributes.get('scale_factor', 1.0)) + data = np.ma.asarray(data).filled(missing_value) + if self._typecode not in 'fd' and data.dtype.kind == 'f': + data = np.round(data) + # Expand data for record vars? if self.isrec: if isinstance(index, tuple): @@ -898,9 +1021,86 @@ def __setitem__(self, index, data): recs = rec_index + 1 if recs > len(self.data): shape = (recs,) + self._shape[1:] - self.data.resize(shape) + # Resize in-place does not always work since + # the array might not be single-segment + try: + self.data.resize(shape) + except ValueError: + self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype) self.data[index] = data + def _default_encoded_fill_value(self): + """ + The default encoded fill-value for this Variable's data type. + """ + nc_type = REVERSE[self.typecode(), self.itemsize()] + return FILLMAP[nc_type] + + def _get_encoded_fill_value(self): + """ + Returns the encoded fill value for this variable as bytes. + + This is taken from either the _FillValue attribute, or the default fill + value for this variable's data type. + """ + if '_FillValue' in self._attributes: + fill_value = np.array(self._attributes['_FillValue'], + dtype=self.data.dtype).tostring() + if len(fill_value) == self.itemsize(): + return fill_value + else: + return self._default_encoded_fill_value() + else: + return self._default_encoded_fill_value() + + def _get_missing_value(self): + """ + Returns the value denoting "no data" for this variable. + + If this variable does not have a missing/fill value, returns None. + + If both _FillValue and missing_value are given, give precedence to + _FillValue. The netCDF standard gives special meaning to _FillValue; + missing_value is just used for compatibility with old datasets. + """ + + if '_FillValue' in self._attributes: + missing_value = self._attributes['_FillValue'] + elif 'missing_value' in self._attributes: + missing_value = self._attributes['missing_value'] + else: + missing_value = None + + return missing_value + + @staticmethod + def _apply_missing_value(data, missing_value): + """ + Applies the given missing value to the data array. + + Returns a numpy.ma array, with any value equal to missing_value masked + out (unless missing_value is None, in which case the original array is + returned). + """ + + if missing_value is None: + newdata = data + else: + try: + missing_value_isnan = np.isnan(missing_value) + except (TypeError, NotImplementedError): + # some data types (e.g., characters) cannot be tested for NaN + missing_value_isnan = False + + if missing_value_isnan: + mymask = np.isnan(data) + else: + mymask = (data == missing_value) + + newdata = np.ma.masked_where(mymask, data) + + return newdata + NetCDFFile = netcdf_file NetCDFVariable = netcdf_variable diff --git a/nibabel/externals/oset.py b/nibabel/externals/oset.py index 83b1e3e24d..0a29c661c5 100644 --- a/nibabel/externals/oset.py +++ b/nibabel/externals/oset.py @@ -13,13 +13,8 @@ License: BSD-3 """ -from __future__ import absolute_import -try: - from collections.abc import MutableSet -except ImportError: - # PY2 compatibility - from collections import MutableSet +from collections.abc import MutableSet KEY, PREV, NEXT = range(3) diff --git a/nibabel/externals/six.py b/nibabel/externals/six.py deleted file mode 100644 index 77e656cd67..0000000000 --- a/nibabel/externals/six.py +++ /dev/null @@ -1,12 +0,0 @@ -""" Shim allowing some grace time for removal of six.py copy """ -# Remove around version 4.0 -from __future__ import absolute_import - -import warnings - -warnings.warn("We no longer carry a copy of the 'six' package in nibabel; " - "Please import the 'six' package directly", - FutureWarning, - stacklevel=2) - -from six import * # noqa diff --git a/nibabel/externals/tests/test_netcdf.py b/nibabel/externals/tests/test_netcdf.py index 679d9d5ff4..f85393be4e 100644 --- a/nibabel/externals/tests/test_netcdf.py +++ b/nibabel/externals/tests/test_netcdf.py @@ -1,22 +1,16 @@ ''' Tests for netcdf ''' -from __future__ import division, print_function, absolute_import import os from os.path import join as pjoin, dirname -import shutil -import tempfile -import time -import sys from io import BytesIO from glob import glob from contextlib import contextmanager import numpy as np -from numpy.testing import dec, assert_ -from ..netcdf import netcdf_file +import pytest -from nose.tools import assert_true, assert_false, assert_equal, assert_raises +from ..netcdf import netcdf_file TEST_DATA_PATH = pjoin(dirname(__file__), 'data') @@ -37,54 +31,41 @@ def make_simple(*args, **kwargs): f.close() -def gen_for_simple(ncfileobj): - ''' Generator for example fileobj tests ''' - yield assert_equal, ncfileobj.history, b'Created for a test' +def assert_simple_truths(ncfileobj): + assert ncfileobj.history == b'Created for a test' time = ncfileobj.variables['time'] - yield assert_equal, time.units, b'days since 2008-01-01' - yield assert_equal, time.shape, (N_EG_ELS,) - yield assert_equal, time[-1], N_EG_ELS-1 - - -def test_read_write_files(): - # test round trip for example file - cwd = os.getcwd() - try: - tmpdir = tempfile.mkdtemp() - os.chdir(tmpdir) - with make_simple('simple.nc', 'w') as f: - pass - # To read the NetCDF file we just created:: - with netcdf_file('simple.nc') as f: - # Using mmap is the default - yield assert_true, f.use_mmap - for testargs in gen_for_simple(f): - yield testargs - - # Now without mmap - with netcdf_file('simple.nc', mmap=False) as f: - # Using mmap is the default - yield assert_false, f.use_mmap - for testargs in gen_for_simple(f): - yield testargs - - # To read the NetCDF file we just created, as file object, no - # mmap. When n * n_bytes(var_type) is not divisible by 4, this - # raised an error in pupynere 1.0.12 and scipy rev 5893, because - # calculated vsize was rounding up in units of 4 - see - # https://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html - fobj = open('simple.nc', 'rb') - with netcdf_file(fobj) as f: - # by default, don't use mmap for file-like - yield assert_false, f.use_mmap - for testargs in gen_for_simple(f): - yield testargs - except: - os.chdir(cwd) - shutil.rmtree(tmpdir) - raise - os.chdir(cwd) - shutil.rmtree(tmpdir) + assert time.units == b'days since 2008-01-01' + assert time.shape == (N_EG_ELS,) + assert time[-1] == N_EG_ELS - 1 + + +def test_read_write_files(tmp_path): + fname = str(tmp_path / 'simple.nc') + + with make_simple(fname, 'w') as f: + pass + # To read the NetCDF file we just created:: + with netcdf_file(fname) as f: + # Using mmap is the default + assert f.use_mmap + assert_simple_truths(f) + + # Now without mmap + with netcdf_file(fname, mmap=False) as f: + # Using mmap is the default + assert not f.use_mmap + assert_simple_truths(f) + + # To read the NetCDF file we just created, as file object, no + # mmap. When n * n_bytes(var_type) is not divisible by 4, this + # raised an error in pupynere 1.0.12 and scipy rev 5893, because + # calculated vsize was rounding up in units of 4 - see + # https://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html + fobj = open(fname, 'rb') + with netcdf_file(fobj) as f: + # by default, don't use mmap for file-like + assert not f.use_mmap + assert_simple_truths(f) def test_read_write_sio(): @@ -94,12 +75,12 @@ def test_read_write_sio(): eg_sio2 = BytesIO(str_val) with netcdf_file(eg_sio2) as f2: - for testargs in gen_for_simple(f2): - yield testargs + assert_simple_truths(f2) # Test that error is raised if attempting mmap for sio eg_sio3 = BytesIO(str_val) - yield assert_raises, ValueError, netcdf_file, eg_sio3, 'r', True + with pytest.raises(ValueError): + netcdf_file(eg_sio3, 'r', True) # Test 64-bit offset write / read eg_sio_64 = BytesIO() with make_simple(eg_sio_64, 'w', version=2) as f_64: @@ -107,15 +88,13 @@ def test_read_write_sio(): eg_sio_64 = BytesIO(str_val) with netcdf_file(eg_sio_64) as f_64: - for testargs in gen_for_simple(f_64): - yield testargs - yield assert_equal, f_64.version_byte, 2 + assert_simple_truths(f_64) + assert f_64.version_byte == 2 # also when version 2 explicitly specified eg_sio_64 = BytesIO(str_val) with netcdf_file(eg_sio_64, version=2) as f_64: - for testargs in gen_for_simple(f_64): - yield testargs - yield assert_equal, f_64.version_byte, 2 + assert_simple_truths(f_64) + assert f_64.version_byte == 2 def test_read_example_data(): @@ -135,7 +114,8 @@ def test_itemset_no_segfault_on_readonly(): time_var = f.variables['time'] # time_var.assignValue(42) should raise a RuntimeError--not seg. fault! - assert_raises(RuntimeError, time_var.assignValue, 42) + with pytest.raises(RuntimeError): + time_var.assignValue(42) def test_write_invalid_dtype(): @@ -148,14 +128,14 @@ def test_write_invalid_dtype(): with netcdf_file(BytesIO(), 'w') as f: f.createDimension('time', N_EG_ELS) for dt in dtypes: - yield assert_raises, ValueError, \ - f.createVariable, 'time', dt, ('time',) + with pytest.raises(ValueError): + f.createVariable('time', dt, ('time',)) def test_flush_rewind(): stream = BytesIO() with make_simple(stream, mode='w') as f: - x = f.createDimension('x',4) + x = f.createDimension('x', 4) v = f.createVariable('v', 'i2', ['x']) v[:] = 1 f.flush() @@ -163,7 +143,7 @@ def test_flush_rewind(): f.flush() len_double = len(stream.getvalue()) - assert_(len_single == len_double) + assert len_single == len_double def test_dtype_specifiers(): @@ -193,8 +173,8 @@ def test_ticket_1720(): io = BytesIO(contents) with netcdf_file(io, 'r') as f: - assert_equal(f.history, b'Created for a test') + assert f.history == b'Created for a test' float_var = f.variables['float_var'] - assert_equal(float_var.units, b'metres') - assert_equal(float_var.shape, (10,)) - assert_(np.allclose(float_var[:], items)) + assert float_var.units == b'metres' + assert float_var.shape == (10,) + assert np.allclose(float_var[:], items) diff --git a/nibabel/externals/tests/test_six.py b/nibabel/externals/tests/test_six.py deleted file mode 100644 index 35db2ca851..0000000000 --- a/nibabel/externals/tests/test_six.py +++ /dev/null @@ -1,33 +0,0 @@ -""" Test we are deprecating externals.six import -""" - -import warnings -import types - -from nose.tools import assert_true, assert_equal - -from nibabel.deprecated import ModuleProxy - - -def test_old_namespace(): - with warnings.catch_warnings(record=True) as warns: - # Top level import. - # This import does not trigger an import of the six.py module, because - # it's the proxy object. - from nibabel.externals import six - assert_equal(warns, []) - # If there was a previous import it will be module, otherwise it will be - # a proxy. - previous_import = isinstance(six, types.ModuleType) - if not previous_import: - assert_true(isinstance(six, ModuleProxy)) - shim_BytesIO = six.BytesIO # just to check it works - # There may or may not be a warning raised on accessing the proxy, - # depending on whether the externals.six.py module is already imported - # in this test run. - if not previous_import: - assert_equal(warns.pop(0).category, FutureWarning) - from six import BytesIO - assert_equal(warns, []) - # The import from old module is the same as that from new - assert_true(shim_BytesIO is BytesIO) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 7cc5b10648..90bbd8e652 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -8,8 +8,8 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Common interface for any image format--volume or surface, binary or xml.''' +import io from copy import deepcopy -from six import string_types from .fileholders import FileHolder from .filename_parser import (types_filenames, TypesFilenamesError, splitext_addext) @@ -78,8 +78,8 @@ class FileBasedImage(object): methods: - * .get_header() (deprecated, use header property instead) - * .to_filename(fname) - writes data to filename(s) derived from + * get_header() (deprecated, use header property instead) + * to_filename(fname) - writes data to filename(s) derived from ``fname``, where the derivation may differ between formats. * to_file_map() - save image to files with which the image is already associated. @@ -120,7 +120,7 @@ class FileBasedImage(object): You can get the data out again with:: - img.get_data() + img.get_fdata() Less commonly, for some image types that support it, you might want to fetch out the unscaled array via the object containing the data:: @@ -246,7 +246,7 @@ def set_filename(self, filename): Parameters ---------- - filename : str + filename : str or os.PathLike If the image format only has one file associated with it, this will be the only filename set into the image ``.file_map`` attribute. Otherwise, the image instance will @@ -279,7 +279,7 @@ def filespec_to_file_map(klass, filespec): Parameters ---------- - filespec : str + filespec : str or os.PathLike Filename that might be for this image file type. Returns @@ -321,7 +321,7 @@ def to_filename(self, filename): Parameters ---------- - filename : str + filename : str or os.PathLike filename to which to save image. We will parse `filename` with ``filespec_to_file_map`` to work out names for image, header etc. @@ -373,7 +373,7 @@ def make_file_map(klass, mapping=None): for key, ext in klass.files_types: file_map[key] = FileHolder() mapval = mapping.get(key, None) - if isinstance(mapval, string_types): + if isinstance(mapval, str): file_map[key].filename = mapval elif hasattr(mapval, 'tell'): file_map[key].fileobj = mapval @@ -419,7 +419,7 @@ def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): Parameters ---------- - filename : str + filename : str or os.PathLike Filename for an image, or an image header (metadata) file. If `filename` points to an image data file, and the image type has a separate "header" file, we work out the name of the header file, @@ -466,7 +466,7 @@ def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): Parameters ---------- - filename : str + filename : str or os.PathLike Filename for an image, or an image header (metadata) file. If `filename` points to an image data file, and the image type has a separate "header" file, we work out the name of the header file, @@ -511,3 +511,92 @@ def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): if sniff is None or len(sniff[0]) < klass._meta_sniff_len: return False, sniff return klass.header_class.may_contain_header(sniff[0]), sniff + + +class SerializableImage(FileBasedImage): + ''' + Abstract image class for (de)serializing images to/from byte strings. + + The class doesn't define any image properties. + + It has: + + methods: + + * to_bytes() - serialize image to byte string + + classmethods: + + * from_bytes(bytestring) - make instance by deserializing a byte string + + Loading from byte strings should provide round-trip equivalence: + + .. code:: python + + img_a = klass.from_bytes(bstr) + img_b = klass.from_bytes(img_a.to_bytes()) + + np.allclose(img_a.get_fdata(), img_b.get_fdata()) + np.allclose(img_a.affine, img_b.affine) + + Further, for images that are single files on disk, the following methods of loading + the image must be equivalent: + + .. code:: python + + img = klass.from_filename(fname) + + with open(fname, 'rb') as fobj: + img = klass.from_bytes(fobj.read()) + + And the following methods of saving a file must be equivalent: + + .. code:: python + + img.to_filename(fname) + + with open(fname, 'wb') as fobj: + fobj.write(img.to_bytes()) + + Images that consist of separate header and data files (e.g., Analyze + images) currently do not support this interface. + For multi-file images, ``to_bytes()`` and ``from_bytes()`` must be + overridden, and any encoding details should be documented. + ''' + + @classmethod + def from_bytes(klass, bytestring): + """ Construct image from a byte string + + Class method + + Parameters + ---------- + bstring : bytes + Byte string containing the on-disk representation of an image + """ + if len(klass.files_types) > 1: + raise NotImplementedError("from_bytes is undefined for multi-file images") + bio = io.BytesIO(bytestring) + file_map = klass.make_file_map({'image': bio, 'header': bio}) + return klass.from_file_map(file_map) + + def to_bytes(self): + """ Return a ``bytes`` object with the contents of the file that would + be written if the image were saved. + + Parameters + ---------- + None + + Returns + ------- + bytes + Serialized image + """ + if len(self.__class__.files_types) > 1: + raise NotImplementedError("to_bytes() is undefined for multi-file images") + bio = io.BytesIO() + file_map = self.make_file_map({'image': bio, 'header': bio}) + self.to_file_map(file_map) + return bio.getvalue() diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index 5a858f1dbf..35cfd3c348 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -99,7 +99,7 @@ def file_like(self): def copy_file_map(file_map): - ''' Copy mapping of fileholders given by `file_map` + r''' Copy mapping of fileholders given by `file_map` Parameters ---------- diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index db6e073018..ed04610fdd 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -9,16 +9,43 @@ ''' Create filename pairs, triplets etc, with expected extensions ''' import os -try: - basestring -except NameError: - basestring = str +import pathlib class TypesFilenamesError(Exception): pass +def _stringify_path(filepath_or_buffer): + """Attempt to convert a path-like object to a string. + + Parameters + ---------- + filepath_or_buffer : str or os.PathLike + + Returns + ------- + str_filepath_or_buffer : str + + Notes + ----- + Objects supporting the fspath protocol (python 3.6+) are coerced + according to its __fspath__ method. + For backwards compatibility with older pythons, pathlib.Path objects + are specially coerced. + Any other object is passed through unchanged, which includes bytes, + strings, buffers, or anything else that's not even path-like. + + Copied from: + https://github.com/pandas-dev/pandas/blob/325dd686de1589c17731cf93b649ed5ccb5a99b4/pandas/io/common.py#L131-L160 + """ + if hasattr(filepath_or_buffer, "__fspath__"): + return filepath_or_buffer.__fspath__() + elif isinstance(filepath_or_buffer, pathlib.Path): + return str(filepath_or_buffer) + return filepath_or_buffer + + def types_filenames(template_fname, types_exts, trailing_suffixes=('.gz', '.bz2'), enforce_extensions=True, @@ -31,7 +58,7 @@ def types_filenames(template_fname, types_exts, Parameters ---------- - template_fname : str + template_fname : str or os.PathLike template filename from which to construct output dict of filenames, with given `types_exts` type to extension mapping. If ``self.enforce_extensions`` is True, then filename must have one @@ -82,7 +109,8 @@ def types_filenames(template_fname, types_exts, >>> tfns == {'t1': '/path/test.funny', 't2': '/path/test.ext2'} True ''' - if not isinstance(template_fname, basestring): + template_fname = _stringify_path(template_fname) + if not isinstance(template_fname, str): raise TypesFilenamesError('Need file name as input ' 'to set_filenames') if template_fname.endswith('.'): @@ -151,7 +179,7 @@ def parse_filename(filename, Parameters ---------- - filename : str + filename : str or os.PathLike filename in which to search for type extensions types_exts : sequence of sequences sequence of (name, extension) str sequences defining type to @@ -190,6 +218,8 @@ def parse_filename(filename, >>> parse_filename('/path/fnameext2.gz', types_exts, ('.gz',)) ('/path/fname', 'ext2', '.gz', 't2') ''' + filename = _stringify_path(filename) + ignored = None if match_case: endswith = _endswith @@ -232,7 +262,7 @@ def splitext_addext(filename, Parameters ---------- - filename : str + filename : str or os.PathLike filename that may end in any or none of `addexts` match_case : bool, optional If True, match case of `addexts` and `filename`, otherwise do @@ -257,6 +287,8 @@ def splitext_addext(filename, >>> splitext_addext('fname.ext.foo', ('.foo', '.bar')) ('fname', '.ext', '.foo') ''' + filename = _stringify_path(filename) + if match_case: endswith = _endswith else: diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index e55f48c127..af410a7e22 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -1,12 +1,11 @@ """ Utilities for getting array slices out of file-like objects """ -from __future__ import division import operator from numbers import Integral from mmap import mmap -from six.moves import reduce +from functools import reduce import numpy as np diff --git a/nibabel/fileutils.py b/nibabel/fileutils.py index be9c214616..b88e2f7128 100644 --- a/nibabel/fileutils.py +++ b/nibabel/fileutils.py @@ -23,9 +23,8 @@ def read_zt_byte_strings(fobj, n_strings=1, bufsize=1024): Parameters ---------- f : fileobj - File object to use. Should implement ``read``, returning byte objects - (str in Python 2), and ``seek(n, 1)`` to seek from current file - position. + File object to use. Should implement ``read``, returning byte objects, + and ``seek(n, 1)`` to seek from current file position. n_strings : int, optional Number of byte strings to return bufsize: int, optional diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index edce19c6cd..f8d2442662 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -1,6 +1,5 @@ """ Read / write FreeSurfer geometry, morphometry, label, annotation formats """ -from __future__ import division, print_function, absolute_import import warnings import numpy as np @@ -8,7 +7,6 @@ import time from collections import OrderedDict -from six.moves import xrange from ..openers import Opener @@ -357,7 +355,7 @@ def read_annot(filepath, orig_ids=False): to any label and orig_ids=False, its id will be set to -1. ctab : ndarray, shape (n_labels, 5) RGBT + label id colortable array. - names : list of str (python 2), list of bytes (python 3) + names : list of bytes The names of the labels. The length of the list is n_labels. """ with open(filepath, "rb") as fobj: @@ -432,7 +430,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): orig_tab = orig_tab[:-1] names = list() ctab = np.zeros((n_entries, 5), dt) - for i in xrange(n_entries): + for i in range(n_entries): # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] @@ -483,7 +481,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): # number of LUT entries present in the file entries_to_read = np.fromfile(fobj, dt, 1)[0] names = list() - for _ in xrange(entries_to_read): + for _ in range(entries_to_read): # index of this entry idx = np.fromfile(fobj, dt, 1)[0] # structure name length + string @@ -561,7 +559,7 @@ def write_string(s): write(-2) # maxstruc - write(np.max(labels) + 1) + write(max(np.max(labels) + 1, ctab.shape[0])) # File of LUT is unknown. write_string('NOFILE') diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index bf92bd962c..160cead87b 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -16,10 +16,11 @@ from ..affines import voxel_sizes, from_matvec from ..volumeutils import (array_to_file, array_from_file, endian_codes, Recoder) +from ..filebasedimages import SerializableImage +from ..filename_parser import _stringify_path from ..spatialimages import HeaderDataError, SpatialImage from ..fileholders import FileHolder from ..arrayproxy import ArrayProxy, reshape_dataobj -from ..keywordonly import kw_only_meth from ..openers import ImageOpener from ..batteryrunners import BatteryRunner, Report from ..wrapstruct import LabeledWrapStruct @@ -503,7 +504,7 @@ def __setitem__(self, item, value): super(MGHHeader, self).__setitem__(item, value) -class MGHImage(SpatialImage): +class MGHImage(SpatialImage, SerializableImage): """ Class for MGH format image """ header_class = MGHHeader @@ -528,21 +529,26 @@ def __init__(self, dataobj, affine, header=None, @classmethod def filespec_to_file_map(klass, filespec): + filespec = _stringify_path(filespec) """ Check for compressed .mgz format, then .mgh format """ if splitext(filespec)[1].lower() == '.mgz': return dict(image=FileHolder(filename=filespec)) return super(MGHImage, klass).filespec_to_file_map(filespec) @classmethod - @kw_only_meth(1) - def from_file_map(klass, file_map, mmap=True, keep_file_open=None): - '''Load image from `file_map` + def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): + ''' Class method to create image from mapping in ``file_map`` + + .. deprecated:: 2.4.1 + ``keep_file_open='auto'`` is redundant with `False` and has + been deprecated. It raises an error as of nibabel 3.0. Parameters ---------- - file_map : None or mapping, optional - files mapping. If None (default) use object's ``file_map`` - attribute instead + file_map : dict + Mapping with (kay, value) pairs of (``file_type``, FileHolder + instance giving file-likes for each file needed for this image + type. mmap : {True, False, 'c', 'r'}, optional, keyword only `mmap` controls the use of numpy memory mapping for reading image array data. If False, do not try numpy ``memmap`` for data array. @@ -550,19 +556,19 @@ def from_file_map(klass, file_map, mmap=True, keep_file_open=None): `mmap` value of True gives the same behavior as ``mmap='c'``. If image data file cannot be memory-mapped, ignore `mmap` value and read array from file. - keep_file_open : { None, 'auto', True, False }, optional, keyword only + keep_file_open : { None, True, False }, optional, keyword only `keep_file_open` controls whether a new file handle is created every time the image is accessed, or a single file handle is created and used for the lifetime of this ``ArrayProxy``. If ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behaviour is the same as if - ``keep_file_open is False``. If ``file_map`` refers to an open - file handle, this setting has no effect. The default value - (``None``) will result in the value of + a new file handle is created every time the image is accessed. + If ``file_map`` refers to an open file handle, this setting has no + effect. The default value (``None``) will result in the value of ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. + + Returns + ------- + img : MGHImage instance ''' if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") @@ -577,47 +583,6 @@ def from_file_map(klass, file_map, mmap=True, keep_file_open=None): img = klass(data, affine, header, file_map=file_map) return img - @classmethod - @kw_only_meth(1) - def from_filename(klass, filename, mmap=True, keep_file_open=None): - '''class method to create image from filename `filename` - - Parameters - ---------- - filename : str - Filename of image to load - mmap : {True, False, 'c', 'r'}, optional, keyword only - `mmap` controls the use of numpy memory mapping for reading image - array data. If False, do not try numpy ``memmap`` for data array. - If one of {'c', 'r'}, try numpy memmap with ``mode=mmap``. A - `mmap` value of True gives the same behavior as ``mmap='c'``. If - image data file cannot be memory-mapped, ignore `mmap` value and - read array from file. - keep_file_open : { None, 'auto', True, False }, optional, keyword only - `keep_file_open` controls whether a new file handle is created - every time the image is accessed, or a single file handle is - created and used for the lifetime of this ``ArrayProxy``. If - ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behaviour is the same as if - ``keep_file_open is False``. The default value (``None``) will - result in the value of - ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. - - Returns - ------- - img : MGHImage instance - ''' - if mmap not in (True, False, 'c', 'r'): - raise ValueError("mmap should be one of {True, False, 'c', 'r'}") - file_map = klass.filespec_to_file_map(filename) - return klass.from_file_map(file_map, mmap=mmap, - keep_file_open=keep_file_open) - - load = from_filename - def to_file_map(self, file_map=None): ''' Write image to `file_map` or contained ``self.file_map`` @@ -629,7 +594,7 @@ def to_file_map(self, file_map=None): ''' if file_map is None: file_map = self.file_map - data = self.get_data() + data = np.asanyarray(self.dataobj) self.update_header() hdr = self.header with file_map['image'].get_prepare_fileobj('wb') as mghf: diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 1b6065f351..b2401a11ab 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -1,4 +1,3 @@ -from __future__ import division, print_function, absolute_import import os from os.path import join as pjoin, isdir import getpass @@ -7,12 +6,12 @@ import hashlib import warnings - from ...tmpdirs import InTemporaryDirectory -from nose.tools import assert_true +import unittest +import pytest import numpy as np -from numpy.testing import assert_equal, assert_raises, dec, assert_allclose +from numpy.testing import assert_allclose, assert_array_equal from .. import (read_geometry, read_morph_data, read_annot, read_label, write_geometry, write_morph_data, write_annot) @@ -36,10 +35,8 @@ data_path = pjoin(nib_data, 'nitest-freesurfer', DATA_SDIR) have_freesurfer = isdir(data_path) -freesurfer_test = dec.skipif( - not have_freesurfer, - 'cannot find freesurfer {0} directory'.format(DATA_SDIR)) - +freesurfer_test = unittest.skipUnless(have_freesurfer, + 'cannot find freesurfer {0} directory'.format(DATA_SDIR)) def _hash_file_content(fname): hasher = hashlib.md5() @@ -54,19 +51,18 @@ def test_geometry(): """Test IO of .surf""" surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "inflated")) coords, faces = read_geometry(surf_path) - assert_equal(0, faces.min()) - assert_equal(coords.shape[0], faces.max() + 1) + assert 0 == faces.min() + assert coords.shape[0] == faces.max() + 1 surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "sphere")) coords, faces, volume_info, create_stamp = read_geometry( surf_path, read_metadata=True, read_stamp=True) - assert_equal(0, faces.min()) - assert_equal(coords.shape[0], faces.max() + 1) - assert_equal(9, len(volume_info)) - assert_equal([2, 0, 20], volume_info['head']) - assert_equal(u'created by greve on Thu Jun 8 19:17:51 2006', - create_stamp) + assert 0 == faces.min() + assert coords.shape[0] == faces.max() + 1 + assert 9 == len(volume_info) + assert np.array_equal([2, 0, 20], volume_info['head']) + assert create_stamp == 'created by greve on Thu Jun 8 19:17:51 2006' # Test equivalence of freesurfer- and nibabel-generated triangular files # with respect to read_geometry() @@ -83,7 +79,8 @@ def test_geometry(): for key in ('xras', 'yras', 'zras', 'cras'): assert_allclose(volume_info2[key], volume_info[key], rtol=1e-7, atol=1e-30) - assert_equal(volume_info2['cras'], volume_info['cras']) + + assert np.array_equal(volume_info2['cras'], volume_info['cras']) with open(surf_path, 'rb') as fobj: np.fromfile(fobj, ">u1", 3) read_create_stamp = fobj.readline().decode().rstrip('\n') @@ -93,27 +90,27 @@ def test_geometry(): with clear_and_catch_warnings() as w: warnings.filterwarnings('always', category=DeprecationWarning) read_geometry(surf_path, read_metadata=True) - assert_true(any('volume information contained' in str(ww.message) - for ww in w)) - assert_true(any('extension code' in str(ww.message) for ww in w)) + + assert any('volume information contained' in str(ww.message) for ww in w) + assert any('extension code' in str(ww.message) for ww in w) volume_info['head'] = [1, 2] with clear_and_catch_warnings() as w: write_geometry(surf_path, coords, faces, create_stamp, volume_info) - assert_true(any('Unknown extension' in str(ww.message) for ww in w)) + assert any('Unknown extension' in str(ww.message) for ww in w) volume_info['a'] = 0 - assert_raises(ValueError, write_geometry, surf_path, coords, - faces, create_stamp, volume_info) + with pytest.raises(ValueError): + write_geometry(surf_path, coords, faces, create_stamp, volume_info) - assert_equal(create_stamp, read_create_stamp) + assert create_stamp == read_create_stamp - np.testing.assert_array_equal(coords, coords2) - np.testing.assert_array_equal(faces, faces2) + assert np.array_equal(coords, coords2) + assert np.array_equal(faces, faces2) # Validate byte ordering coords_swapped = coords.byteswap().newbyteorder() faces_swapped = faces.byteswap().newbyteorder() - np.testing.assert_array_equal(coords_swapped, coords) - np.testing.assert_array_equal(faces_swapped, faces) + assert np.array_equal(coords_swapped, coords) + assert np.array_equal(faces_swapped, faces) @freesurfer_test @@ -123,14 +120,14 @@ def test_quad_geometry(): new_quad = pjoin(get_nibabel_data(), 'nitest-freesurfer', 'subjects', 'bert', 'surf', 'lh.inflated.nofix') coords, faces = read_geometry(new_quad) - assert_equal(0, faces.min()) - assert_equal(coords.shape[0], faces.max() + 1) + assert 0 == faces.min() + assert coords.shape[0] == (faces.max() + 1) with InTemporaryDirectory(): new_path = 'test' write_geometry(new_path, coords, faces) coords2, faces2 = read_geometry(new_path) - assert_equal(coords, coords2) - assert_equal(faces, faces2) + assert np.array_equal(coords,coords2) + assert np.array_equal(faces, faces2) @freesurfer_test @@ -138,13 +135,13 @@ def test_morph_data(): """Test IO of morphometry data file (eg. curvature).""" curv_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "curv")) curv = read_morph_data(curv_path) - assert_true(-1.0 < curv.min() < 0) - assert_true(0 < curv.max() < 1.0) + assert -1.0 < curv.min() < 0 + assert 0 < curv.max() < 1.0 with InTemporaryDirectory(): new_path = 'test' write_morph_data(new_path, curv) curv2 = read_morph_data(new_path) - assert_equal(curv2, curv) + assert np.array_equal(curv2, curv) def test_write_morph_data(): @@ -157,17 +154,17 @@ def test_write_morph_data(): for shape in okay_shapes: write_morph_data('test.curv', values.reshape(shape)) # Check ordering is preserved, regardless of shape - assert_equal(values, read_morph_data('test.curv')) - assert_raises(ValueError, write_morph_data, 'test.curv', - np.zeros(shape), big_num) + assert np.array_equal(read_morph_data('test.curv'), values) + + with pytest.raises(ValueError): + write_morph_data('test.curv', np.zeros(shape), big_num) # Windows 32-bit overflows Python int if np.dtype(np.int) != np.dtype(np.int32): - assert_raises(ValueError, write_morph_data, 'test.curv', - strided_scalar((big_num,))) + with pytest.raises(ValueError): + write_morph_data('test.curv', strided_scalar((big_num,))) for shape in bad_shapes: - assert_raises(ValueError, write_morph_data, 'test.curv', - values.reshape(shape)) - + with pytest.raises(ValueError): + write_morph_data('test.curv', values.reshape(shape)) @freesurfer_test def test_annot(): @@ -178,8 +175,8 @@ def test_annot(): hash_ = _hash_file_content(annot_path) labels, ctab, names = read_annot(annot_path) - assert_true(labels.shape == (163842, )) - assert_true(ctab.shape == (len(names), 5)) + assert labels.shape == (163842, ) + assert ctab.shape == (len(names), 5) labels_orig = None if a == 'aparc': @@ -187,9 +184,9 @@ def test_annot(): np.testing.assert_array_equal(labels == -1, labels_orig == 0) # Handle different version of fsaverage if hash_ == 'bf0b488994657435cdddac5f107d21e8': - assert_true(np.sum(labels_orig == 0) == 13887) + assert np.sum(labels_orig == 0) == 13887 elif hash_ == 'd4f5b7cbc2ed363ac6fcf89e19353504': - assert_true(np.sum(labels_orig == 1639705) == 13327) + assert np.sum(labels_orig == 1639705) == 13327 else: raise RuntimeError("Unknown freesurfer file. Please report " "the problem to the maintainer of nibabel.") @@ -204,11 +201,11 @@ def test_annot(): if labels_orig is not None: labels_orig_2, _, _ = read_annot(annot_path, orig_ids=True) - np.testing.assert_array_equal(labels, labels2) + assert np.array_equal(labels, labels2) if labels_orig is not None: - np.testing.assert_array_equal(labels_orig, labels_orig_2) - np.testing.assert_array_equal(ctab, ctab2) - assert_equal(names, names2) + assert np.array_equal(labels_orig, labels_orig_2) + assert np.array_equal(ctab, ctab2) + assert names == names2 def test_read_write_annot(): @@ -270,12 +267,10 @@ def test_write_annot_fill_ctab(): # values back. badannot = (10 * np.arange(nlabels, dtype=np.int32)).reshape(-1, 1) rgbal = np.hstack((rgba, badannot)) - print(labels) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) - assert_true( - any('Annotation values in {} will be incorrect'.format( - annot_path) == str(ww.message) for ww in w)) + assert any('Annotation values in {} will be incorrect'.format(annot_path) == str(ww.message) + for ww in w) labels2, rgbal2, names2 = read_annot(annot_path, orig_ids=True) names2 = [n.decode('ascii') for n in names2] assert np.all(np.isclose(rgbal2[:, :4], rgba)) @@ -289,9 +284,8 @@ def test_write_annot_fill_ctab(): rgbal[:, 2] * (2 ** 16)) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) - assert_true( - not any('Annotation values in {} will be incorrect'.format( - annot_path) == str(ww.message) for ww in w)) + assert all('Annotation values in {} will be incorrect'.format(annot_path) != str(ww.message) + for ww in w) labels2, rgbal2, names2 = read_annot(annot_path) names2 = [n.decode('ascii') for n in names2] assert np.all(np.isclose(rgbal2[:, :4], rgba)) @@ -349,10 +343,29 @@ def test_label(): label_path = pjoin(data_path, "label", "lh.cortex.label") label = read_label(label_path) # XXX : test more - assert_true(label.min() >= 0) - assert_true(label.max() <= 163841) - assert_true(label.shape[0] <= 163842) + assert label.min() >= 0 + assert label.max() <= 163841 + assert label.shape[0] <= 163842 labels, scalars = read_label(label_path, True) - assert_true(np.all(labels == label)) - assert_true(len(labels) == len(scalars)) + assert np.all(labels == label) + assert len(labels) == len(scalars) + + +def test_write_annot_maxstruct(): + """Test writing ANNOT files with repeated labels""" + with InTemporaryDirectory(): + nlabels = 3 + names = ['label {}'.format(l) for l in range(1, nlabels + 1)] + # max label < n_labels + labels = np.array([1, 1, 1], dtype=np.int32) + rgba = np.array(np.random.randint(0, 255, (nlabels, 4)), dtype=np.int32) + annot_path = 'c.annot' + + write_annot(annot_path, labels, rgba, names) + # Validate the file can be read + rt_labels, rt_ctab, rt_names = read_annot(annot_path) + # Check round-trip + assert np.array_equal(labels, rt_labels) + assert np.array_equal(rgba, rt_ctab[:, :4]) + assert names == [n.decode('ascii') for n in rt_names] diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 47e54080c3..e1cfc56b18 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -23,12 +23,10 @@ from ...wrapstruct import WrapStructError from ... import imageglobals -from nose.tools import assert_true, assert_false -from numpy.testing import (assert_equal, assert_array_equal, - assert_array_almost_equal, assert_almost_equal, - assert_raises) -from ...testing import assert_not_equal +import pytest + +from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_almost_equal from ...testing import data_path @@ -67,10 +65,10 @@ def test_read_mgh(): # header h = mgz.header - assert_equal(h['version'], 1) - assert_equal(h['type'], 3) - assert_equal(h['dof'], 0) - assert_equal(h['goodRASFlag'], 1) + assert h['version'] == 1 + assert h['type'] == 3 + assert h['dof'] == 0 + assert h['goodRASFlag'] == 1 assert_array_equal(h['dims'], [3, 4, 5, 2]) assert_almost_equal(h['tr'], 2.0) assert_almost_equal(h['flip_angle'], 0.0) @@ -81,7 +79,7 @@ def test_read_mgh(): assert_array_almost_equal(h.get_vox2ras_tkr(), v2rtkr) # data. will be different for your own mri_volsynth invocation - v = mgz.get_data() + v = mgz.get_fdata() assert_almost_equal(v[1, 2, 3, 0], -0.3047, 4) assert_almost_equal(v[1, 2, 3, 1], 0.0018, 4) @@ -97,15 +95,15 @@ def test_write_mgh(): # read from the tmp file and see if it checks out mgz = load('tmpsave.mgz') h = mgz.header - dat = mgz.get_data() + dat = mgz.get_fdata() # Delete loaded image to allow file deletion by windows del mgz # header - assert_equal(h['version'], 1) - assert_equal(h['type'], 3) - assert_equal(h['dof'], 0) - assert_equal(h['goodRASFlag'], 1) - assert_array_equal(h['dims'], [5, 4, 3, 2]) + assert h['version'] == 1 + assert h['type'] == 3 + assert h['dof'] == 0 + assert h['goodRASFlag'] == 1 + assert np.array_equal(h['dims'], [5, 4, 3, 2]) assert_almost_equal(h['tr'], 0.0) assert_almost_equal(h['flip_angle'], 0.0) assert_almost_equal(h['te'], 0.0) @@ -131,11 +129,11 @@ def test_write_noaffine_mgh(): # Delete loaded image to allow file deletion by windows del mgz # header - assert_equal(h['version'], 1) - assert_equal(h['type'], 0) # uint8 for mgh - assert_equal(h['dof'], 0) - assert_equal(h['goodRASFlag'], 1) - assert_array_equal(h['dims'], [7, 13, 3, 22]) + assert h['version'] == 1 + assert h['type'] == 0 # uint8 for mgh + assert h['dof'] == 0 + assert h['goodRASFlag'] == 1 + assert np.array_equal(h['dims'], [7, 13, 3, 22]) assert_almost_equal(h['tr'], 0.0) assert_almost_equal(h['flip_angle'], 0.0) assert_almost_equal(h['te'], 0.0) @@ -157,7 +155,7 @@ def test_set_zooms(): (1, 1, -1, 1), (1, 1, 1, -1), (1, 1, 1, 1, 5)): - with assert_raises(HeaderDataError): + with pytest.raises(HeaderDataError): h.set_zooms(zooms) # smoke test for tr=0 h.set_zooms((1, 1, 1, 0)) @@ -177,7 +175,8 @@ def bad_dtype_mgh(): def test_bad_dtype_mgh(): # Now test the above function - assert_raises(MGHError, bad_dtype_mgh) + with pytest.raises(MGHError): + bad_dtype_mgh() def test_filename_exts(): @@ -193,7 +192,7 @@ def test_filename_exts(): save(img, fname) # read from the tmp file and see if it checks out img_back = load(fname) - assert_array_equal(img_back.get_data(), v) + assert_array_equal(img_back.get_fdata(), v) del img_back @@ -218,14 +217,14 @@ def test_header_updating(): assert_almost_equal(mgz.affine, exp_aff, 6) assert_almost_equal(hdr.get_affine(), exp_aff, 6) # Test that initial wonky header elements have not changed - assert_equal(hdr['delta'], 1) + assert np.all(hdr['delta'] == 1) assert_almost_equal(hdr['Mdc'].T, exp_aff[:3, :3]) # Save, reload, same thing img_fobj = io.BytesIO() mgz2 = _mgh_rt(mgz, img_fobj) hdr2 = mgz2.header assert_almost_equal(hdr2.get_affine(), exp_aff, 6) - assert_equal(hdr2['delta'], 1) + assert_array_equal(hdr2['delta'],1) # Change affine, change underlying header info exp_aff_d = exp_aff.copy() exp_aff_d[0, -1] = -14 @@ -258,17 +257,17 @@ def test_eq(): # Test headers compare properly hdr = MGHHeader() hdr2 = MGHHeader() - assert_equal(hdr, hdr2) + assert hdr == hdr2 hdr.set_data_shape((2, 3, 4)) - assert_false(hdr == hdr2) + assert(hdr != hdr2) hdr2.set_data_shape((2, 3, 4)) - assert_equal(hdr, hdr2) + assert hdr == hdr2 def test_header_slope_inter(): # Test placeholder slope / inter method hdr = MGHHeader() - assert_equal(hdr.get_slope_inter(), (None, None)) + assert hdr.get_slope_inter() == (None, None) def test_mgh_load_fileobj(): @@ -280,22 +279,22 @@ def test_mgh_load_fileobj(): # pass the filename to the array proxy, please feel free to change this # test. img = MGHImage.load(MGZ_FNAME) - assert_equal(img.dataobj.file_like, MGZ_FNAME) + assert img.dataobj.file_like == MGZ_FNAME # Check fileobj also passed into dataobj with ImageOpener(MGZ_FNAME) as fobj: contents = fobj.read() bio = io.BytesIO(contents) fm = MGHImage.make_file_map(mapping=dict(image=bio)) img2 = MGHImage.from_file_map(fm) - assert_true(img2.dataobj.file_like is bio) - assert_array_equal(img.get_data(), img2.get_data()) + assert(img2.dataobj.file_like is bio) + assert_array_equal(img.get_fdata(), img2.get_fdata()) def test_mgh_affine_default(): hdr = MGHHeader() hdr['goodRASFlag'] = 0 hdr2 = MGHHeader(hdr.binaryblock) - assert_equal(hdr2['goodRASFlag'], 1) + assert hdr2['goodRASFlag'] == 1 assert_array_equal(hdr['Mdc'], hdr2['Mdc']) assert_array_equal(hdr['Pxyz_c'], hdr2['Pxyz_c']) @@ -310,33 +309,33 @@ def test_mgh_set_data_shape(): assert_array_equal(hdr.get_data_shape(), (5, 4, 3)) hdr.set_data_shape((5, 4, 3, 2)) assert_array_equal(hdr.get_data_shape(), (5, 4, 3, 2)) - with assert_raises(ValueError): + with pytest.raises(ValueError): hdr.set_data_shape((5, 4, 3, 2, 1)) def test_mghheader_default_structarr(): hdr = MGHHeader.default_structarr() - assert_equal(hdr['version'], 1) + assert hdr['version'] == 1 assert_array_equal(hdr['dims'], 1) - assert_equal(hdr['type'], 3) - assert_equal(hdr['dof'], 0) - assert_equal(hdr['goodRASFlag'], 1) + assert hdr['type'] == 3 + assert hdr['dof'] == 0 + assert hdr['goodRASFlag'] == 1 assert_array_equal(hdr['delta'], 1) assert_array_equal(hdr['Mdc'], [[-1, 0, 0], [0, 0, 1], [0, -1, 0]]) assert_array_equal(hdr['Pxyz_c'], 0) - assert_equal(hdr['tr'], 0) - assert_equal(hdr['flip_angle'], 0) - assert_equal(hdr['te'], 0) - assert_equal(hdr['ti'], 0) - assert_equal(hdr['fov'], 0) + assert hdr['tr'] == 0 + assert hdr['flip_angle'] == 0 + assert hdr['te'] == 0 + assert hdr['ti'] == 0 + assert hdr['fov'] == 0 for endianness in (None,) + BIG_CODES: hdr2 = MGHHeader.default_structarr(endianness=endianness) - assert_equal(hdr2, hdr) - assert_equal(hdr2.newbyteorder('>'), hdr) + assert hdr2 == hdr + assert hdr2.newbyteorder('>') == hdr for endianness in LITTLE_CODES: - with assert_raises(ValueError): + with pytest.raises(ValueError): MGHHeader.default_structarr(endianness=endianness) @@ -351,17 +350,17 @@ def test_deprecated_fields(): hdr['mrparams'] = [1, 2, 3, 4] assert_array_almost_equal(hdr['mrparams'], [1, 2, 3, 4]) - assert_equal(hdr['tr'], 1) - assert_equal(hdr['flip_angle'], 2) - assert_equal(hdr['te'], 3) - assert_equal(hdr['ti'], 4) - assert_equal(hdr['fov'], 0) + assert hdr['tr'] == 1 + assert hdr['flip_angle'] == 2 + assert hdr['te'] == 3 + assert hdr['ti'] == 4 + assert hdr['fov'] == 0 assert_array_almost_equal(hdr_data['mrparams'], [1, 2, 3, 4]) - assert_equal(hdr_data['tr'], 1) - assert_equal(hdr_data['flip_angle'], 2) - assert_equal(hdr_data['te'], 3) - assert_equal(hdr_data['ti'], 4) - assert_equal(hdr_data['fov'], 0) + assert hdr_data['tr'] == 1 + assert hdr_data['flip_angle'] == 2 + assert hdr_data['te'] == 3 + assert hdr_data['ti'] == 4 + assert hdr_data['fov'] == 0 hdr['tr'] = 5 hdr['flip_angle'] = 6 @@ -388,7 +387,7 @@ def check_dtypes(self, expected, actual): # Some images will want dtypes to be equal including endianness, # others may only require the same type # MGH requires the actual to be a big endian version of expected - assert_equal(expected.newbyteorder('>'), actual) + assert expected.newbyteorder('>') == actual class TestMGHHeader(_TestLabeledWrapStruct): @@ -405,9 +404,9 @@ def test_general_init(self): hdr = self.header_class() # binaryblock has length given by header data dtype binblock = hdr.binaryblock - assert_equal(len(binblock), hdr.structarr.dtype.itemsize) + assert len(binblock) == hdr.structarr.dtype.itemsize # Endianness will always be big, and cannot be set - assert_equal(hdr.endianness, '>') + assert hdr.endianness == '>' # You can also pass in a check flag, without data this has no # effect hdr = self.header_class(check=False) @@ -416,15 +415,15 @@ def test__eq__(self): # Test equal and not equal hdr1 = self.header_class() hdr2 = self.header_class() - assert_equal(hdr1, hdr2) + assert hdr1 == hdr2 self._set_something_into_hdr(hdr1) - assert_not_equal(hdr1, hdr2) + assert hdr1 != hdr2 self._set_something_into_hdr(hdr2) - assert_equal(hdr1, hdr2) + assert hdr1 == hdr2 # REMOVED as_byteswapped() test # Check comparing to funny thing says no - assert_not_equal(hdr1, None) - assert_not_equal(hdr1, 1) + assert hdr1 != None + assert hdr1 != 1 def test_to_from_fileobj(self): # Successful write using write_to @@ -433,56 +432,58 @@ def test_to_from_fileobj(self): hdr.write_to(str_io) str_io.seek(0) hdr2 = self.header_class.from_fileobj(str_io) - assert_equal(hdr2.endianness, '>') - assert_equal(hdr2.binaryblock, hdr.binaryblock) + assert hdr2.endianness == '>' + assert hdr2.binaryblock == hdr.binaryblock def test_endian_guess(self): # Check guesses of endian eh = self.header_class() - assert_equal(eh.endianness, '>') - assert_equal(self.header_class.guessed_endian(eh), '>') + assert eh.endianness == '>' + assert self.header_class.guessed_endian(eh) == '>' def test_bytes(self): # Test get of bytes hdr1 = self.header_class() bb = hdr1.binaryblock hdr2 = self.header_class(hdr1.binaryblock) - assert_equal(hdr1, hdr2) - assert_equal(hdr1.binaryblock, hdr2.binaryblock) + assert hdr1 == hdr2 + assert hdr1.binaryblock == hdr2.binaryblock # Do a set into the header, and try again. The specifics of 'setting # something' will depend on the nature of the bytes object self._set_something_into_hdr(hdr1) hdr2 = self.header_class(hdr1.binaryblock) - assert_equal(hdr1, hdr2) - assert_equal(hdr1.binaryblock, hdr2.binaryblock) + assert hdr1 == hdr2 + assert hdr1.binaryblock == hdr2.binaryblock # Short binaryblocks give errors (here set through init) # Long binaryblocks are truncated - assert_raises(WrapStructError, - self.header_class, - bb[:self.header_class._hdrdtype.itemsize - 1]) + with pytest.raises(WrapStructError): + self.header_class(bb[:self.header_class._hdrdtype.itemsize - 1]) + # Checking set to true by default, and prevents nonsense being # set into the header. bb_bad = self.get_bad_bb() if bb_bad is None: return with imageglobals.LoggingOutputSuppressor(): - assert_raises(HeaderDataError, self.header_class, bb_bad) + with pytest.raises(HeaderDataError): + self.header_class(bb_bad) + # now slips past without check _ = self.header_class(bb_bad, check=False) def test_as_byteswapped(self): # Check byte swapping hdr = self.header_class() - assert_equal(hdr.endianness, '>') + assert hdr.endianness == '>' # same code just returns a copy for endianness in BIG_CODES: hdr2 = hdr.as_byteswapped(endianness) - assert_false(hdr2 is hdr) - assert_equal(hdr2, hdr) + assert(hdr2 is not hdr) + assert hdr2 == hdr # Different code raises error for endianness in (None,) + LITTLE_CODES: - with assert_raises(ValueError): + with pytest.raises(ValueError): hdr.as_byteswapped(endianness) # Note that contents is not rechecked on swap / copy class DC(self.header_class): @@ -490,7 +491,9 @@ def check_fix(self, *args, **kwargs): raise Exception # Assumes check=True default - assert_raises(Exception, DC, hdr.binaryblock) + with pytest.raises(Exception): + DC(hdr.binaryblock) + hdr = DC(hdr.binaryblock, check=False) hdr2 = hdr.as_byteswapped('>') @@ -499,8 +502,8 @@ def test_checks(self): hdr_t = self.header_class() # _dxer just returns the diagnostics as a string # Default hdr is OK - assert_equal(self._dxer(hdr_t), '') + assert self._dxer(hdr_t) == '' # Version should be 1 hdr = hdr_t.copy() hdr['version'] = 2 - assert_equal(self._dxer(hdr), 'Unknown MGH format version') + assert self._dxer(hdr) == 'Unknown MGH format version' diff --git a/nibabel/funcs.py b/nibabel/funcs.py index 240b20f802..b5fa5d0b4b 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -79,8 +79,7 @@ def squeeze_image(img): if slen == len(shape): return klass.from_image(img) shape = shape[:slen] - data = img.get_data() - data = data.reshape(shape) + data = np.asanyarray(img.dataobj).reshape(shape) return klass(data, img.affine, img.header, @@ -88,7 +87,7 @@ def squeeze_image(img): def concat_images(images, check_affines=True, axis=None): - ''' Concatenate images in list to single image, along specified dimension + r''' Concatenate images in list to single image, along specified dimension Parameters ---------- @@ -102,6 +101,7 @@ def concat_images(images, check_affines=True, axis=None): be the same shape. If not None, concatenates on the specified dimension. This requires all images to be the same shape, except on the specified dimension. + Returns ------- concat_img : ``SpatialImage`` @@ -144,7 +144,7 @@ def concat_images(images, check_affines=True, axis=None): raise ValueError('Affine for image {0} does not match affine ' 'for first image'.format(i)) # Do not fill cache in image if it is empty - out_data[i] = img.get_data(caching='unchanged') + out_data[i] = np.asanyarray(img.dataobj) if axis is None: out_data = np.rollaxis(out_data, 0, out_data.ndim) @@ -169,7 +169,7 @@ def four_to_three(img): imgs : list list of 3D images ''' - arr = img.get_data() + arr = np.asanyarray(img.dataobj) header = img.header affine = img.affine image_maker = img.__class__ diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 997ba78523..0497556a2d 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -11,24 +11,18 @@ The Gifti specification was (at time of writing) available as a PDF download from http://www.nitrc.org/projects/gifti/ """ -from __future__ import division, print_function, absolute_import import sys - import numpy as np +import base64 from .. import xmlutils as xml -from ..filebasedimages import FileBasedImage +from ..filebasedimages import SerializableImage from ..nifti1 import data_type_codes, xform_codes, intent_codes from .util import (array_index_order_codes, gifti_encoding_codes, gifti_endian_codes, KIND2FMT) from ..deprecated import deprecate_with_version -# {en,de}codestring in deprecated in Python3, but -# {en,de}codebytes not available in Python2. -# Therefore set the proper functions depending on the Python version. -import base64 - class GiftiMetaData(xml.XmlSerializable): """ A sequence of GiftiNVPairs containing metadata for a gifti data array @@ -213,18 +207,21 @@ class GiftiCoordSystem(xml.XmlSerializable): Attributes ---------- dataspace : int - From the spec: "Contains the stereotaxic space of a DataArray's data + From the spec: Contains the stereotaxic space of a DataArray's data prior to application of the transformation matrix. The stereotaxic space should be one of: - NIFTI_XFORM_UNKNOWN - NIFTI_XFORM_SCANNER_ANAT - NIFTI_XFORM_ALIGNED_ANAT - NIFTI_XFORM_TALAIRACH - NIFTI_XFORM_MNI_152" + + - NIFTI_XFORM_UNKNOWN + - NIFTI_XFORM_SCANNER_ANAT + - NIFTI_XFORM_ALIGNED_ANAT + - NIFTI_XFORM_TALAIRACH + - NIFTI_XFORM_MNI_152 + xformspace : int Spec: "Contains the stereotaxic space of a DataArray's data after application of the transformation matrix. See the DataSpace element for a list of stereotaxic spaces." + xform : array-like shape (4, 4) Affine transformation matrix """ @@ -270,16 +267,21 @@ def _to_xml_element(self): return DataTag(dataarray, encoding, datatype, ordering).to_xml() -def _data_tag_element(dataarray, encoding, datatype, ordering): +def _data_tag_element(dataarray, encoding, dtype, ordering): """ Creates data tag with given `encoding`, returns as XML element """ import zlib - ord = array_index_order_codes.npcode[ordering] + order = array_index_order_codes.npcode[ordering] enclabel = gifti_encoding_codes.label[encoding] if enclabel == 'ASCII': - da = _arr2txt(dataarray, datatype) + # XXX Accommodating data_tag API + # On removal (nibabel 4.0) drop str case + da = _arr2txt(dataarray, dtype if isinstance(dtype, str) else KIND2FMT[dtype.kind]) elif enclabel in ('B64BIN', 'B64GZ'): - out = dataarray.tostring(ord) + # XXX Accommodating data_tag API - don't try to fix dtype + if isinstance(dtype, str): + dtype = dataarray.dtype + out = np.asanyarray(dataarray, dtype).tostring(order) if enclabel == 'B64GZ': out = zlib.compress(out) da = base64.b64encode(out).decode() @@ -462,11 +464,10 @@ def _to_xml_element(self): if self.coordsys is not None: data_array.append(self.coordsys._to_xml_element()) # write data array depending on the encoding - dt_kind = data_type_codes.dtype[self.datatype].kind data_array.append( _data_tag_element(self.data, gifti_encoding_codes.specs[self.encoding], - KIND2FMT[dt_kind], + data_type_codes.dtype[self.datatype], self.ind_ord)) return data_array @@ -534,7 +535,7 @@ def metadata(self): return self.meta.metadata -class GiftiImage(xml.XmlSerializable, FileBasedImage): +class GiftiImage(xml.XmlSerializable, SerializableImage): """ GIFTI image object The Gifti spec suggests using the following suffixes to your @@ -680,6 +681,141 @@ def get_arrays_from_intent(self, intent): it = intent_codes.code[intent] return [x for x in self.darrays if x.intent == it] + def agg_data(self, intent_code=None): + """ + Aggregate GIFTI data arrays into an ndarray or tuple of ndarray + + In the general case, the numpy data array is extracted from each ``GiftiDataArray`` + object and returned in a ``tuple``, in the order they are found in the GIFTI image. + + If all ``GiftiDataArray`` s have ``intent`` of 2001 (``NIFTI_INTENT_TIME_SERIES``), + then the data arrays are concatenated as columns, producing a vertex-by-time array. + If an ``intent_code`` is passed, data arrays are filtered by the selected intents, + before being aggregated. + This may be useful for images containing several intents, or ensuring an expected + data type in an image of uncertain provenance. + If ``intent_code`` is a ``tuple``, then a ``tuple`` will be returned with the result of + ``agg_data`` for each element, in order. + This may be useful for ensuring that expected data arrives in a consistent order. + + Parameters + ---------- + intent_code : None, string, integer or tuple of strings or integers, optional + code(s) specifying nifti intent + + Returns + ------- + tuple of ndarrays or ndarray + If the input is a tuple, the returned tuple will match the order. + + Examples + -------- + + Consider a surface GIFTI file: + + >>> import nibabel as nib + >>> from nibabel.testing import test_data + >>> surf_img = nib.load(test_data('gifti', 'ascii.gii')) + + The coordinate data, which is indicated by the ``NIFTI_INTENT_POINTSET`` + intent code, may be retrieved using any of the following equivalent + calls: + + >>> coords = surf_img.agg_data('NIFTI_INTENT_POINTSET') + >>> coords_2 = surf_img.agg_data('pointset') + >>> coords_3 = surf_img.agg_data(1008) # Numeric code for pointset + >>> print(np.array2string(coords, precision=3)) + [[-16.072 -66.188 21.267] + [-16.706 -66.054 21.233] + [-17.614 -65.402 21.071]] + >>> np.array_equal(coords, coords_2) + True + >>> np.array_equal(coords, coords_3) + True + + Similarly, the triangle mesh can be retrieved using various intent + specifiers: + + >>> triangles = surf_img.agg_data('NIFTI_INTENT_TRIANGLE') + >>> triangles_2 = surf_img.agg_data('triangle') + >>> triangles_3 = surf_img.agg_data(1009) # Numeric code for pointset + >>> print(np.array2string(triangles)) + [0 1 2] + >>> np.array_equal(triangles, triangles_2) + True + >>> np.array_equal(triangles, triangles_3) + True + + All arrays can be retrieved as a ``tuple`` by omitting the intent + code: + + >>> coords_4, triangles_4 = surf_img.agg_data() + >>> np.array_equal(coords, coords_4) + True + >>> np.array_equal(triangles, triangles_4) + True + + Finally, a tuple of intent codes may be passed in order to select + the arrays in a specific order: + + >>> triangles_5, coords_5 = surf_img.agg_data(('triangle', 'pointset')) + >>> np.array_equal(triangles, triangles_5) + True + >>> np.array_equal(coords, coords_5) + True + + The following image is a GIFTI file with ten (10) data arrays of the same + size, and with intent code 2001 (``NIFTI_INTENT_TIME_SERIES``): + + >>> func_img = nib.load(test_data('gifti', 'task.func.gii')) + + When aggregating time series data, these arrays are concatenated into + a single, vertex-by-timestep array: + + >>> series = func_img.agg_data() + >>> series.shape + (642, 10) + + In the case of a GIFTI file with unknown data arrays, it may be preferable + to specify the intent code, so that a time series array is always returned: + + >>> series_2 = func_img.agg_data('NIFTI_INTENT_TIME_SERIES') + >>> series_3 = func_img.agg_data('time series') + >>> series_4 = func_img.agg_data(2001) + >>> np.array_equal(series, series_2) + True + >>> np.array_equal(series, series_3) + True + >>> np.array_equal(series, series_4) + True + + Requesting a data array from a GIFTI file with no matching intent codes + will result in an empty tuple: + + >>> surf_img.agg_data('time series') + () + >>> func_img.agg_data('triangle') + () + """ + + # Allow multiple intents to specify the order + # e.g., agg_data(('pointset', 'triangle')) ensures consistent order + + if isinstance(intent_code, tuple): + return tuple(self.agg_data(intent_code=code) for code in intent_code) + + darrays = self.darrays if intent_code is None else self.get_arrays_from_intent(intent_code) + all_data = tuple(da.data for da in darrays) + all_intent = {intent_codes.niistring[da.intent] for da in darrays} + + if all_intent == {'NIFTI_INTENT_TIME_SERIES'}: # stack when the gifti is a timeseries + return np.column_stack(all_data) + + if len(all_data) == 1: + all_data = all_data[0] + + return all_data + @deprecate_with_version( 'getArraysFromIntent method deprecated. ' "Use get_arrays_from_intent instead.", @@ -724,6 +860,9 @@ def to_xml(self, enc='utf-8'): """ + xml.XmlSerializable.to_xml(self, enc) + # Avoid the indirection of going through to_file_map + to_bytes = to_xml + def to_file_map(self, file_map=None): """ Save the current image to the specified file_map diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index de02f4c76b..044a70fede 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -6,7 +6,6 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from __future__ import division, print_function, absolute_import import base64 import sys @@ -33,43 +32,41 @@ class GiftiParseError(ExpatError): def read_data_block(encoding, endian, ordering, datatype, shape, data): """ Tries to unzip, decode, parse the funny string data """ - ord = array_index_order_codes.npcode[ordering] enclabel = gifti_encoding_codes.label[encoding] + dtype = data_type_codes.type[datatype] if enclabel == 'ASCII': # GIFTI_ENCODING_ASCII c = StringIO(data) - da = np.loadtxt(c) - da = da.astype(data_type_codes.type[datatype]) + da = np.loadtxt(c, dtype=dtype) return da # independent of the endianness - elif enclabel == 'B64BIN': - # GIFTI_ENCODING_B64BIN - dec = base64.b64decode(data.encode('ascii')) - dt = data_type_codes.type[datatype] - sh = tuple(shape) - newarr = np.frombuffer(dec, dtype=dt) - if len(newarr.shape) != len(sh): - newarr = newarr.reshape(sh, order=ord) - - elif enclabel == 'B64GZ': - # GIFTI_ENCODING_B64GZ - # convert to bytes array for python 3.2 - # http://www.diveintopython3.net/strings.html#byte-arrays - dec = base64.b64decode(data.encode('ascii')) - zdec = zlib.decompress(dec) - dt = data_type_codes.type[datatype] - sh = tuple(shape) - newarr = np.frombuffer(zdec, dtype=dt) - if len(newarr.shape) != len(sh): - newarr = newarr.reshape(sh, order=ord) - elif enclabel == 'External': # GIFTI_ENCODING_EXTBIN raise NotImplementedError("In what format are the external files?") - else: + elif enclabel not in ('B64BIN', 'B64GZ'): return 0 + # Numpy arrays created from bytes objects are read-only. + # Neither b64decode nor decompress will return bytearrays, and there + # are not equivalents to fobj.readinto to allow us to pass them, so + # there is not a simple way to avoid making copies. + # If this becomes a problem, we should write a decoding interface with + # a tunable chunk size. + dec = base64.b64decode(data.encode('ascii')) + if enclabel == 'B64BIN': + # GIFTI_ENCODING_B64BIN + buff = bytearray(dec) + else: + # GIFTI_ENCODING_B64GZ + buff = bytearray(zlib.decompress(dec)) + del dec + + sh = tuple(shape) + newarr = np.frombuffer(buff, dtype=dtype) + if len(newarr.shape) != len(sh): + newarr = newarr.reshape(sh, order=array_index_order_codes.npcode[ordering]) + # check if we need to byteswap required_byteorder = gifti_endian_codes.byteorder[endian] if (required_byteorder in ('big', 'little') and diff --git a/nibabel/gifti/tests/data/task.func.gii b/nibabel/gifti/tests/data/task.func.gii new file mode 100644 index 0000000000..0814a6dd33 --- /dev/null +++ b/nibabel/gifti/tests/data/task.func.gii @@ -0,0 +1,33 @@ + + +UserNamemarkiewiczDateThu Sep 12 08:59:28 2019gifticlib-versiongifti library version 1.09, 28 June, 2010TimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lnlU1dUWx0EZLtwB7sC993dHLsgFCnxokJmFwPmQklQWiYpEvnhmapRTYkI8ns9ZK4ckRxwrhxWFE44kli8wNXAiyzEQQVSUUBBB3s2WrdUf7+1/ztl7n3X2WWd/v99zvh4ho8wtkDt9JZLz3fkxRcvh6062jDfj5rKN2x2k9+kSH3i3i/vJJ8WG77wo6G9kQ5OWr1Z68dNBG6XbDEy8auPxJ/SUPm2lxzVvKkptyHdbUAfbSIiSE3XbyLrzNr683Z2yMhN+OU5eSPHn6DSJnFCfh7VOfGNlwORZQrp0VJy/s0B4zv9NjFyrfJjLH+xHVIOOd4qCmJBvIjBVYke8HtV7Sirq/Mic3C4GLbBhlStZ7qlldoSJxFYtw7M1PDvfREChL9+ZNRzOMZJdG0hbgYKM5xpEpEc4v+WYGd5k4tI5Hd81+JOdamD5TgN7Z1qonGfkYJdEZZKe7vv8GZDtjbungZJUBf80mJgyxIdjP7szcYuKM9PVhISraf7Cyo4PPenp4cnQXDOJA0NIKnDwicqK7rCZpDYbQ1eZ2RXsxey3bRxYYeYuWuSuvXMXGtg/XscVlRLFDiepFhsxPUN4q6+F+qG+LPvewt2WQNZNsbFnrz+XhxtxLlKQ8uLXotBm41aRk1kekQSvfoy0QDv33wwk0s348A4fWeMFa8KYpuMie24Ib5zUEHs9gF49w/GcLNHhtV0UZ3SJHmU+FF++JYY7doiJmouidP4vorW8QyQVtIjc6ePE8I+/FQ9axwmto1g86H9VPJ7ow5qWH8W5I568mN8lVqo1RG9U0TPVg81OBS0jNopH9Wt7vC4O7Vzz0Lc2GRN+HyOaNPQp1JOnl9N87ZbQ+XWJZB8j+8IcxMRITBjtmq+z0jDISspYIy+/EkDscRUVz2p4qkZPlauv9QOsRA/WEanU0W+OivqbRrISA1hxTE3/RAWJ4wNoKPFk5Vpv4n7pEJPi2sTFGn/UWyVUFVaOLpZTuUrHxgYZ599z56zNyIhFMozVEl9ODqHngh6suOiHXu/DWYWd6sMa2p4zUqVz9STZwrlDrtoxZhqfMNC70kJtvYWhNSZil5hYOKhFZJfJ6C9peDJMycYUb9rn69gwTI9ip4bdZXr2TlQQn2Jmf1YQ6wtdmF1vJDpKwq9IyyufdGP8pjaxZ161mDpJRXmRCs1UGZu3O9kmGTnVHoh6diQDIxVU1xl4cpmNQn8TQd0cdPS28f49CUUvI6MCLGjqJDrfU+Obq2X5q3rEazp+OOTPiVoDuy9LvPZGMHGvWrhUYWaOi9va3ga+6Qwi4l8mRiyReOAVyLDFOuYv13H0qp7McisXoozc267k3EAt/4jQUZGl49IuA9M8NYze6ceUe3KCXHjo5uLXmGUGDGYd4bEmitO96V6soGC1isRGNbWxalLaAyh628LtdA0fz/NjyA96ZLs0jDkkMfKUN3n3FRi22ql8x4LXGQ1Sppz8hd0wXzMws0BDvb+CGW83iRUj1Yx37TW4S8HSUSqO91CQJvNn3iQjOXY/8tL1FAa7dK/JyqS+auKnS8waZWfcp67YCR13f7DQurBD5H3VKXx2yXgsTcXlXb4Y61087xXM4j42Gt+3s3CPg4yX7ISFh9KzRIeuTxDfLw8lON+J+zk9hi+MWJYa8MuQcOyX0By0477UyLGVJnrbJQ6fkXh3h6uHfXSczA8kqLedD1Y68F8ksS3TwPkeGsaF64mcrSGv1o1nUn3IG27G0mAjdbGdk99IrL1gZnG+hTsurtzGwr+vuc6xQMfCyd6Uv9whZAd9KXlTwel7MnJlKrYIG3NrJa7LTGRe1rFkrg/WfBVJz/iyNVRFeqiCNd39WDHaTjhW7C5tXpdhYfRpG1JHMI07Qoivl8jda6Rxi41+Lr1d0M31Lhw38WOImUETg+l82cHa0wYqB5l5YZmS0iwt4Xfl3C90wy3SwvO77PgVO5m5JxRlmROdZwjaGDufz3DgcPGxxmDnYKXE2Qot7S+ZGNUQwM0AJQNueuJj07LAV812pYP3XZg5p5HTuMSdMf28WG2vE+XVzcJ70xXRFWCn/gMn5accpHxoJV/j0o+TQZTWOWicG8qa5FCar4SxzRLCtKowHiTrmfKag9a2YMa5BZO528GxykCy9rn4s8rJ8xcdyMNsfG510G9sOLGax8k+3y6a6kI4v6YHs+Kkv2jt/7IbMwvEp049R9rUCY9iySVn/9TI2BQ7K5aYKbLY6ftMIDYvOyfuhRI230lco46fmvWsHxHA2LfU7M/pEA8eBHDlZzOzjjvwWx7M1b+biDtpZUZkKF+ol4qPCuwc2OYk73pRQsaGZcL9IzNvhG8SSRt9+b7Ki+L9D0RVlJb6CY3C1xBBuevtbLiu57D8iChaWyLS7taIQ5uMVPcPF84Di8RbA0+JvPP+SGc2ity0GnEwrU3M+XWdyMu082lmNIfc/Ggu9WewzJPLn80RITF3xKYId9a/60O6kJNg8kLb1Sw6on8S5qizYk2LLOHbntqE0/saBGcWiU9unBbxvzriI0ZliSl/2yte/XZGgv3WMRHR0CVeV7qh8vClZnV3MqvdUR5oEdG2UyJF5UWOypPOd66LmfoO4X/DjRefqhN1Gd3ICagQvfQbxIScNlEVr2DkNTkxDaOFOa5TTHi6RZRUNYp7tR4MGVIlElbdFLfdr4qX6hpEU3ujWN9Lx7QLvlTF6mjx8aW22oTTqWZopy95W++KyRPviM82d4gLP3sxtp+Mdg9vWmuUGDbLmdym5X6DO+Nu/PGf2v30YfGbXsnY3sb/i5Nh3TPE9FbTX9aMn/iHPygrTiRu80DujBZTC5clzM5aLAzD6v7EUfrFMqEsSBP/WW3F+cpjCbv7F/+Z65t0If738Xk3IU4tOpIwvn6PmJoyS/wXIVx9fg==TimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lXlYFOQWxmFknRlmmH1jBpgBRM1dBKPE5vupYdy01JLETFNcyi1vF9NKUERLTFHkRkWmeFVIMTE0d8NwwS1xQc0ERcElIkFUZLtTPvU8/XHv+ec833e+5zvnOed93xP5wJPFBTbyzhnotbBN2Dqp+TUghDUjTbi57KVjVra90SaiOj8WecVnxLPjPdiboEed4s/8uRJefs+G5LqGghwLzlU63moyk/emDz2MVm6dNlPa30ImMnbl6kn70MbMaglF+UYe5jlIn6/EsN/ItTCfP3Klr7Gy9YVFom9UqfguYomQrb8nsibJ/4iN/FTO8gYV33e2of7OgMcMI6cDtbR+60tcsR9ftTSKkT4m4ifLKB2t5N00A9/fUFLuoYLuFpoCfDmXpSQ/XE9qoY3jfaTcH3dT9H4/jNQ8C61hJoaXabg+WEnHHXpq3fX0ibSyv1TPMV8TK1fryO6mYEO4J+uOaag/IuPHewY+6eMN59xoXO+Hqcyfo5cV/NbZwr4Id0Zs7MBKhYHgZAcVOYGkvWN2nU0MXBHAuQtGUtI8WDPWwpFTJrrVqvCb5MWdC3qKp2uYovZj88uhDHzLStyCEJ59YCHbV0r+BiPX37axe6CVi0vlNFTp+ChXzoxlW0VZs4XpDXYiU7tgfqUzZoOVmnMBbFut/6OHf9qh0WbnkgMnhcLHwdVqNbuTdVxVdmL21wbaD24Tg7Y2i8jXvFl2ok40rC4S8Z9cFmU1l8Wm55uFzKtBrBzWS0RPKxZ5R98QSxO/Efc61YgPpklp3HpK3H3Zg/71j0XuCQUh0/0YMbYD7y6V0lH5lfgzf33RGPHxlfVPzo1m5+/u0XJX7+q0JCz0Qd1aK6780iKGFusoCwugIlpDTZSWCUoLCw5bSDis58VyLV9sUlDQQ8P2bB0JVXqUhRbesfnz8wIls5QKtvUz4Nyj4UCEnNhoH4LuqrmUJeHLRx0Y6PNQhOY0imybkqGf6wmbaeDsEV8+WazifqgXsRdbhMcQHd1yPTkwUY/YGkzor3amPVTQr8yblCk2YqJcXLmk5+5gC74vmah/6M/5fWbGGnXUTjbTcXEAzfuMJL7qmu/pejH0OW9G1vqjVki58Lwne3JUGLpqMC7158J0HRMPS9mQbGLz/kDaupq4tVRP/R492dH+TOzpxk8/1Ym3blwQEbtk9HpajuaqJ2FJdkZ103FolRWltRN7XHM4NcvA6ZYAYjRm3EcFEumq5dtUPdcLDex2YbDbLDOrdf6Ms6vp+oOW7ls1SHMVbE/RE9Rfz9RzNvoVmgmMMbM8Q8fyW2p+9rCTWWRibbmB+2k2Lpk0HL6jIaW/jl2hVsbc1uEskPOFUHN/nZqKr9W0n9QRW6Zi5hUF64qkePf04+k8JWmJOura1CycasB/mBfvV8lYliKnfbM/v8gUXKrUsGyUibnZKvwDFcTN1vOjREuIxEzd2570HO/Sk7etLHxkZOIOJQULfOnRW8KZMzo+H6Fm1Wty4h7VivtH/Hk4VMfooXJ0cj8K90mZMtGlDV2MxJdIeTdZS98IHeMSzTTf8mNNVwPrb1hpWKFm+AwN7rfNfOx8JJJPPRbxNi9OZchQ5vjy/HsqVjcG8fQIKxOn2bjTPZg5FVa6O8L46J6OythgBo0NpaTRgY9cy5wgPSse6VjcbMLdhWn9U4EM2uvC1loTvb80Io0zsednF/6GqAg7bGXgRTNXVVYyfA3MnaPj5HgFdS6OJm1X8qanG8dTfWkY79KSfCsvHbSxS28iNslCXIaZZNfsJ9dYqLNrGZehYYHcm8z0FhFplLJkn4yYGd40fC7nhMxV+w0jXhIT8zpp+XC2FwN6+XH8jJQDX8kIz3L10IX1/AIrlbPNTPo4gO2JAbwTYiVgrYsLFSEEzTaS3mZg/xAby4aYSNhp5FODiaR2E5LddlbMDMYx0sCB/WYSVDLK3bV06iBneLobX48w80wfK9fcQiguCcVkDWXTd3bCC62k1QRizzQzLyMAww4jWQ9UnJhgZOY/NETO9cUj1oOSPiqSpEpeKbUSWaFky0Ep8yrbxYjdHiwprhKPH/8m4o9XCe/rLh6ctXM6OpApwRbuzQ1g0JFAUvrZcNQ6+LTJgbMllMjRDhRTwjnQWceZkkCOfWknbLCDNWeCaXEE4fOMkc3NoUjyApGdtbB5mI1iZxiPnwrn5rwm8Z9tDoY77MQvMv1Na/+XbUzOEvkTdDT09Hf+eTdm3IW/NPLFowFsaTViz3P1QB5ItStftV8YS3MdnFRqef19Hd9011GeqeZiTauInaxl9AQL/1oXTOBVO+vnm0gNszH/aCgtstWircSKtzmU0g8KnNot/xbZia59nbNBRHeRIs3w4HTfZrEnSU1G201hLe/M+TEaEiZoiaj8QfjZdohrn1WIio0GVkSECYk6XeSkl4m17kpazq0XzrZK8cXuJhE1aK249qqVw116UTlMySgvF9everBl7iJxqeMDMcHPjXuve2MMkjE625PbLzSI/nvPiz7h5aL9NXfn4PD453ZduSXK9qaK6l5nRcPs+wPyz78uSmJ2in6qfzpb60pF9u02Mah/q5jRz5cVxzvQUy3B1lYvVraeFpYBnmxf5dqpbbfFxkPNoravO7KqmyLnsYSzk46KVzNyRe3UR2JttYy94XLs0kQRoWwRsyQNYsCLd8XFMR4c2nJK1HS5I1qnVgnf3rfE0X01YuxlFZmf+VCcoiIlyZs5BQYiQlz75RUf7o6sFxfDGsX5TU3imxmeZDV5cyfEm9auflxRyzjk2hFR990orH6Ci8yoEuEZLaeoTvN/cdI+LEHE7Tb+7c1PB5/8sfhkjJik7cC1p3qIK4uynN3aVoolVTV/4Shn0QFxuH6UuDkngNzKjs6usdv/ilkKrz73u//+1xhRU1bqLFq4U+yULBT/BXpBi4Y=TimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lXlU1XUaxlnkXuBeuJe77xcviwopopIJGMr3kzo2phgOZW5lueGWa2Ux5a5TbkUCepIQslwSBMncN3BUnDRFyC0PIGiCLJIKhMxPZ+qc/ph5/3nPdznnPe/zPs/zGifImL7Yzs6VRnInPRK7pmioPuOi8QMzbk/C1ZlnP+gQcVNaxAbvH8XEGV7sT9XTmqbm+x86RPY3Tj5fpmO6m413h+oRlRby93mTlGXjxZ0WDtaYiRvjQ9c4A9cf2jk514NpYRaqGoKY3EPF82Emlod6P611pcVG8JxlYsimsyK1bLlIqW8QiTmKp2+xUg6fpiHZ18Enz5hpCDXhM0bHrb5ybv+s5OrI+6KPxoTmIwV+y1TMKjaydrY/HQfVzDNYac2U41OloibeSEKSg/Djvhy11wivymAitlspftXCmFotO83+5JQYmL3XwIn+DqozjIRNNDP6gIFR15Q0RXTi0VQNh3cpOT7LyIAoGQZ/d7qrlLxYqCKyXknJm1ambnwo/Nd58m2Bgeh+LmonOoldZ+bcARNl6204V1gI2+rJtHNWemZb+Px4ANZAGavzDHyyXEtcmhLftBDaz9pJt4fylztW/K96c/QLE6N32wn5zI7veF+OddPzaa6Ciwd3iUHP2Zhz2kViRzeOnQ6jvMpO3Cwbb5w3/GeW/w1lmTV+9/wSsWRlENt/0LCiRos+pyujs81cu/2taF3eJjJOe/NO+D0xsHuBGP/VVeFfcEW4FG2ivKZJ1C7oLjKTjojESeNEcVauKHunSsxZ5UON/Qex5KwnxwseiQf1foxa7YdXuCftGb5EXt8sfq9feGe0GPT6F0/PKS+Y45/kbTY1Yef1HNJ68+bGOrGpe4fo3aZHXm3jyr80XLikRd3JzpHVNl6rN1J8UUf0OhUjhIbUMzp+2WNg/yUziTo1I1/wY/1NfxalGEnW68nfKSf+JRnNw3Q80+hG6wZ3RuyqF4rKRpGQ5o8lxsDdTXpC07x5nBXAqGkyNo5vF3ENOkpLPLFGmjjY14nsAxcJQ6SeRstJHOykX76GiPsGOp+2ktXHSsJ+DUuWWYlQ6lEk2KhV2zk318TS+Sa++LxRLI6Ws35vAJvl3kRf8qR0soaZgzWkazWMrdDz0xxf4n4zM+qYgy1VZsLvGWgaZ6A2MQD5mHax9EiteCbqomhRKEmd7Ufwr568dySQorV6xo21IwK78leTL9cHGHn7sZ3SqRZW5QWyPdnK0RQDu/KNxNy0ULHXwswrKmIPqDl8SUfQWg1/k+ZV1aLnmz4GJoY4mPialW6Slssk/JIOaBl208WEECuBs82sz3NwO02L+1EdKfl62rPtfJ2t57kAP/yPaYmVaVBptGy0GZjZU03Ze/5UpvjySJqL90Y1q5boyEnRMHy8kfJSGR+e9UFepCBluJpFHyqJKdLQ8L6J2MoAqo/6MVnC4NOJEk/zTVyc4E75fl8ejnHy1ssmwh4riVwiJ3KRG3n5BhZ8rObyJiXl6XWiLUvNjA4t3TcpKL6hYFuUL0PcVGz5SdJYTxn2Phq6FusZLLfx3Xk/tkw2IIuyUXJAx7BZKgavMfNwxgMRM6hN1G3yYnqED3/fISc33Y9qWSBrHtj4uNzOtqEOhl2wUbgvmLYmHWuuOrnwYjBH1rrIk/p87yfJF3ca+LDQRK9cPQtzneTe0LJ4npnAaSaWDTKRWGfA9r2aT5RObhy0ETDXxqllRjKLdPQ/r+DIYh07+qm4HPmbyF7h85TrBZIeQg86sHWysOW2hfoGC3cNRq5IXvGsm57YSVrSDsvIOdAmZg/yoV+lgo6tMlLOKBkjzWxHkgnrajP9Nkjakkv68PUj8bAvGRJWzjIl2joV57o52D/MRnuJxGWpx/Hv2/GIcnG3IRhnqYHXA0wcSnbw8UwLzT4SN0ZY2BZvoYcymLOFQWz5p5GICGkPyBXEdWg4JBR8NcaN+QtNNOXY+GVuMKv2hJATFkJcchDBrTZkd5yMHG9mc4qVilgjGatV9LlmIGiDmntD5QTIPei/XUXnEn+67LZiVKp4ab83vd7qEGMzPcloqRCZ2+pFwckK0XLeRum+zjSec/DKIiu3i20ICeOcVXaca4JYLdWtfyGEIVOCGHelC/Oj9eRlOumf4qJhdxAzpRn2XhHIiftGonuEMDjKiWurjRzJWycZQ+mxritNX7cKR1EwhyqDqFlr/JPX/q/wUaeK3iF6pq1Tx/9+d/jqpT88Mn24g97NJtyP23n/pJNBexwsvRWCzzypxkgNMWEGEgolDWZLejK1itQ8HTZJr48dnRmd7uLLFjM30+wMzgqBfqmiqIuDmOshDAvZHb/wrTQRu0PCzT9bpPb3oa+0097e3C5ed2j46EaNUHUO593hWirTJG0sLxanGgtEyms/i4QqA0muLiJj3ypxS/6jSKhXUXRnqzAnVYjC2BYxz+NL8U2djZizvZm5SE2n8SqO9fQia+tSMX/vA1Hv4U7jKRlzUxR0kXbd7rYm0f2XUtHlzTIxZGXLwPLzowZGb64WGzcvFcp/XBT20PoByeGvCmPMPtF8d1b8iBNnxGczOoRrVrsoneLDvQGd8O/qwcmoZvHG4gsiOcWLoqGdSPK/I2qlXXV5kBtaQ5W4tMOD6qmnhbkxS+w1t4jDPfzoG67EI3KSOPhyq4i42SBC19wWLVukf8HnxGxnlUi+dl00/VYtMnvcEl2VAfRaISfjsprgITIOzzQSkOnHK9/JGeuoF25bmkWvX5vF/QlezF/hTZy3HHeXH7u2K1B317JA687FHNNTXiyYekL41clJX6j9vzyJ/Hq0ML9j/tOf7ZLGn+Tnn+svKoI9Gbijp9ijTYs/lbFOnK6t+INHi5OOCr3uFWEJsJGe1i3+zsu5f7zFvH114JN8Jz1WbJ5eHL/w/l6RvvIj8W/5soIOTimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lnlY1VUax9kv3IXL3X53+d2F5SIhrokgTQZyPpk6pam4pk6ZuxL5pES4jQs6WZaTgrmgZFnpoGSahiOYmDouKOrkbooibgnu5kVgftWTz/THzPvPec55n+e87znP9/s5Z8OMYPbJHl64amVNpU9cn2tkR3kMg/zs+Clx+UQ0DSnNIv6zBuE2HRHqLcG8+66F2UkGEi41ifNr3cyebmLcA5myLhYujHAwPCqM6g0uKoJkVM12pvQLxRBrZdsdD3PLAxh+3k7f+1EMejuc1Ek2EitCf621a7qLiUVzROLI/WJXTZ7ofOyWyHdrf82d+FhD6YcmZrzpQvNnOzkBNqrumxm9VcXljjquVN0VozNshDq0lOXrcU61ET1GT8QXBrS7HUzerKJuUQRziiUu3XDxcyc122NrRacfvJQedOCf4uC7ADNyoZ64col+jRYm+Ll5zmXD2cPBkAlW3JZwxhQGMWqJiVvHtWSm22mzLoSqHD88M7UcSIpg3m0d9xKcjO7kE4sTA+iZKfEPSzSWVm6cQxwkdrVTNdXFms42fBlBjEt0smmiTItZBrLSQ6hcL7H+RRNvm8IJK46Ftm5Etxb0q5HxdFbTYbnEwUIXB20ulsWrOeORuHdVwyHtejE8yUVihJdtc1vyUV08d2Y4OZPsJE65F7//igQhp5/MOyimtPXy3jkjfVuY2Vf9FMOn2cn7sURse7pBTNwVSviDOpFatFH8af0J0TrrtCjJ8ok9Pe+I0B6thWvAd6Jt2DBRXl4iUufUiOk3w3iYfFjULQvkh5E/C/duHSV7dfy8OYBZ/dQUfFwofq//QkZ/saH3yl/nPR/b0n8Zh2/Tc/EbM42vqpi8uk70m90kRK6Fg2VOvP3NFJ8yUbLISf0JJ7e0Nq7vMnPfq+fESSOyU2JoopWFm2TGFkSQtEBHZbdwBjdZ6Vpr4p3loVREqvhAY2Gsy5+dxwM41XBblDfdFe2vhrMsSWKAot2Kt8P4a5CBqA3BfJjaKD7tofQTF0jyGStLkj30SY5hem8d/dequNbbQ029kf61EoMbZTb3dyD1M7I9Qaa2s0TMRidNRU4ef26j1XIbwZduidNTVUQdMnD0LRV9Pwwi7YiRgpFGoocZiZwl8Xm2mo4umaD8SNoPsFPYKFGq6Dyrv4G0cY9FecoNUZRzVMzN0nAvR0vu5CCSekYSGWhhXbabBWlPkfKsmj6pVuK/cGGttWN7y8P7A5T+/Ky0uS3xUi+ZM1/aWfVQj/qnCPyOmbmeqfhXCudmGwnfXImmchdXrjlYXeHggwkWZpw18cyyaBL22RnVzU7tfTfnfSamjDBz1yvxcr2LLgMlpu/RcnKcmZNmE4NlM2klSh8LDEwYr6dpgIb5Nh3d2xnILbDQ9I6J5iuKN2JDWBinocmj5eaKCHYs1eG/2cyMPDvb40y4ksMZsl/CO9nEtVIHly4EskXx+IrFkagO2ChZEc6Xt0I59MCfl3Ot5A8z8M4BDf+eXS++nRdBUJmZzIUazp7TMECtoUOgnmcvWZEdKmbNNPKM4s8x82X6xoTzeQcrBx/JXFli5JHTwKSP7NzMuCeiCxtEx8+C2Ryj5q05Krr20nMuJ4qxtU6OrXAT0zZSYYULY24sOxNMTE6I5FxzLC+1jGVNlZFSlZX18620u2Cn7fsS3ZMiGbrIQtFzDuavs9HnoY1DE63kpRnZa3XT5qSDrOUuls61cbNUYmO2hjmzLEg1et7PfizkJDWDgh2szVA4sM/Dtet2OrZ2se2+jKXZxh6clBWbafejiezvVYRUNIrKqWEc266lukDx1yUd2UUubGo71WtsaFLMPCwNQV6m47XdGlqgJecrHc4MPb4yN+YbMtl3nJR2dbEOFztToqn5xItttYT80Erfd910UNhW8bydaX9z8Eq9wpG9Mfg/HYNtp0So4o1eCRrubjCycqmG+3f8OGq28fIOmW2V0QR+6+VLu5edt6MZUuXC296D5WsHt8/LrFY0kjVVz+kTEvHXI6jeHEJFlT8Te0RQ1hTOqR9lukfqufVpKCN2Notpw/x5duRFcXJsndh1uFo8rJDZuDCaCY9d5P/kYFKKjF4bScNSF90bvQxRGOjJjmVyXDS++Dgur7Pw9Cg3GdoYFppjKN4bxfdKP+MVzST7vFzQePAImXNb3DQ3tSDg5lOsnNkg2oXFYnXGMGP8H1n7v2JqXoEYXWRhdZ+I9N/X9qX+8ISRc+85SV1oI2GWi3Z9IjGddpPui6XlhRjKrpto/djCboOF4WUGJrfyidNBZrrXO6Amim61MQQvcnBloVs5ZywhrfLFi/4eOr/ZgqzDJenrtUuEd4rChNBPRXFtGG9kBzLwbINwZCre9L8qhha1pLzYSOUqC74Be8TrMVvE2i/Oi0cRyptVFCciD78n1MFHxLhJehrnfyY2VV8Qf8l7JPYsWSkeLHASEZLInHU6xmzWMakqiH82zhbezIfi1R1+DJ8Xwuv1agyfBNN64G2R/d5xMV99XBzV+rqs+jijy6qLtcJ8dbaYOvSI2JpzM23S4YFizqmtIjpkXPorb/xL9DM2C7/wx6JDXShBvYLZ+ZU/lbV3RPSJI6J+UzCFqcGMmXdNtB3jEyVKvUEXa8TzIwLpGbZXbK1eLSz7H4lRj7SktNZRvGeESB/iE/oR9WJkzlVRbAhk7YKDYtipapFnOSuycq+I3NgaIY00cOprFVJdhMLNYOJHSKTd1+K9q+JBpzrRfvw98Vquwon1wegqQxmaqWKxRkfUVg1vLjPy1RY/UtN++09l9v5etNkXyn7l3f1/+pi25RWRsv2PWkre9dse4TXPifixAaTmtxGvFSxJz6/6u+h2o/aJjp6ZWSGka4PF6EEy4RUJ6QcWlzzJXQ440+WXkfLOYtbovenbZn4jsl+aKf4Dc4iCdw==TimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lnlY1VUexuHihQt34a6/372/u3CDCxIoipV7qPd8zFwKHXPJXEqxMFP0MRUVRcVlpDRt0qTcUscYVMQlx8xxS1NBUAlzyXRExUmJUKBUVOaO89Tz9MfM95/znOX5fs95z/u+50ydGUpoowfbO3Zub2wUDzPNZL7h48oFO0GBSPohmom3Hgu63xfDl5wWY/ap2VkqMberia2Jwbzyk4syrGQtd/F1oo399xVWL9QwcK0b+3tO7rRzsvpOBL5nZFb1iaI+NYTxqQoj5z/F30/p2dNW5uiCsCe15uo8fH53nqh8t1iM3DZfqKbUinPjdE/mciboWPq6hc8fuuk8wc71vTJXM63Edw3nSJqBfwTVi1EjHHx0TYe3ayQTy2X08yJxTjZxqJ+doV01nO9g5EimTKHbw4EdERxLviE65/k42EXBtN9BVnsLzVdFsr2NhGOgRL7kZlpgf8sXKtR2kEjeqMcwTk1xuJWDJTo6FjtoyAqjqnswqiIdj6YE8r9uIPiqk5e6PRLfFKmwtpfoNDSGohMeNqUr3HM7mIQLV4qDJYnNSK13kSM7aX/CROfuoUxaKbGh3oJ/j57MVrE0n+zh6rJYaoJdxBaGs+qelT6uKMJ7uijNjiCyyEbfDC1tSgvE4BQXyb1i2JGZgCwl8KDAzZcnXMw+Lz/B8LfImqr41/YuERkFMehLzDSbZ6XL9niGbwhwoEOhqCp4IHq5whm8tFactu0UiQPOi+Toi8If+0D0nlgnRu5PFG37HxRpqUPEhv7bRd7mG0J1QcMtd5mYdUfFwMR7wh9lYEkPPb5NIThrwtEOXyN+q3+p/RAxbP66J/3+3RT/f9o325tQldrwjNJQcuQnMb/tI9G9o4Qvx80RjY31dy3sCOA2THKxdrlM1zoLE22RjL1roovHxoedZaYNcrKl3EirCQYqxuqZUy0zUy9RMS2cWrWG67FWIruo+LBQRe7cOvFy6wYRO8JIReCO64MlntkdwalqE/Vlocw6+1hM9VqZbFWT2MlOr9FeZo2JIaFOjzRJQ9kODyU1Jra0sKN530mJw0VJvhl3b4WVeRYWhDj5+rFCpSSz/prM5kN1ImxnGCftZl7M0DAvU82lFRZU48ysuGdi2jqJgbcjyOmm8HxVFI39FfRXJKbvkxn7i4m+SY9FZdkt0WZshbhbokVaosf1XDNyjnlZprIxwechX3maYHsE3x2VeS3XRfefZOq+8VDxVye778i0iJOZmqNwMNhBrMVIwtsmLh+38HOwlVSrAX++jbvnJayLPLQKUpCvONg73saf51v5W59oLuQpeGMcFBPFL60thKZZSVsocaCZm/JaGw3tdbR+0UI7TUAfwkKnLRJntGZ6uCO57NWSP0ZPRJqRTT4bnX4wU7ldwtkmlInjIjhbpSX8kpFhTQYcD21UnVSwvmwmwhzJxnMS4QvMRLZyMqB5CDvOBzTu83JspYOyJCP1mRpmZKto9YXM7EQT0dVaDhTViL8EGWlfauGj21qC0nWsSYrgwZhIZmbYWTpDQ9FOC1cCmLSodfLwAwN1KTIL1ri4N9NKxmkz3xcotLY0iNOrG0XRJTWLayLou0RDqM9I5einuPSKB+PhgHemeYnr4KLF4jgaptvo0tdL5NlYnvs6Fvl9C2lHJK5EybRWOzB/LpP9i4f6Azaeba1wKtiOaqNMr3My7aLMKD08JBx28nGWB7vLzrYmiWKnnqafrQw4ZWSZ55HYmxrQ/TyF3D95SM6PIqe5gqOFiwmTFK567fTd7aS+c2D9Aitz08LY5X8o5h4OZ9D3WnzJYaR20lM40MXdfDvnAnsKn2rh5UFhLBxlwF2mJS9cj26fnt27InnUO1BjkYISyLmop5v6ajefDI5mYHkMa05KPLbb2RU4z9jAWRwHZEZJCq+/pBBniOG9lGj6PS2juBR2FEfgXxHgwldahirB5C6wMyCgm6xbXia97ePo9BhiVD6S09z07RFF1dNOvip24giRWTPIyAdvyGzMMTFsaCgDDwZT/auR7PMGts92sfqLSLbt1TD6+SaxPF2F+U6lSIivFbfVlWJ2QeBuC6LJ+dXDZy0V1hUpLLzoYU+lmy3DfWS86mNFk4+i7GjazmxOu3YBLxnt4ZtPo9k/9Snajvdy6lAUXxyW0c2K5ee4KNICPnNNHcBlXBzVHeN5q+y+8NT6WJ8SQ+5N6Q9e+7/iaNNHYpHWxvYXTP7fxm68WPG7R37wwMWpLx0M3uPi5lYP0zu6mTUiDovHx7BCC0avRO41KyMnm9GoGsUSYSWpRCG9fzTBGTF8JSvMXubmTmksk3XLRb9PPaxPj2Pvxm3+ghkrRcqPARwq1ouzAe21nKHihaUPxc7UAOen3BTHFyVSMtJCQrmN3FnHxNaOu8SvXS+LrZdkzvvjxbgbuaJyzhkxR2ck7rUN4p9XK8Wbx++LvJFrRaEz8OacfYYT5ZGU7zLQZ0Uznq3JEc0v14vnk4JIHxHGmmlaxk9RE3/1rlhU/Z1Qznwn9pobunXaNKhblblKXPxknkhff0ZUXa/u+mnTEDGm4x6xOSXdX1F4TFx//FgoixtFmUFDeSD3Yq2K9Gv1Iqn/GfGwnZrjEWpc7/wo6hY1ioS9QXRZd01Ub1eRaT8m4so2iG7e+2LEcR2eR1pWhb0pXrI9EP3SakXTxX+J6lIV38adFH0mXhXeC98Lw6EbouXJShH2iYmI7DA6Zhj5rE6NulYiPkhH8dAwGgbUCKO3QXgO1Iv3VqrRjw3n7P4w0loZuLlbh6nSzLuWYMrsyhNeLNUdEaNDdLwVbPm/PLmd/arI32z/w5p9z/43x/IhKeJM5xB6zmopNmUv9+ccXiakUVW/88iXd0jssb0meoa4WNIn3v+xbsfvczejfuj25K+V2VnMdR71x2btFru/nS3+De7VaiI=TimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lXlUFdQWxgkv3MuduJc7z8AVEXIk7RlPCtg/Z5wzXw6JM46pTxPqqZWk5mwRiSmVPM05RU3JVHLKUkGx0ggshxwTJ1JB0HfzrVqrP97b/+xzzj5n77W+/e3vjF4Ryrxnveju2QhtXSsRcUY+SvdzO8dBUMDWZkWRNbdOFiTUSsnqUvmyMoTsPAsd7hqYHxlExwwXg1eYKBrl4sk9ZoJOOXEmq/iqnYdRA1zMmemi38cqchRW7i/24VobjGeYgz7johh2U8tIlZ30o6rHtcLWePCmzxbjM0fkramzZey3N+TgeO3jWLdsLd7OJko6e9kUY2fUMzZsCguLGqioXKjjpveOVGy1M6dew9HicLp3tJGTEM7mTkbmbrQxvqWK9I0GJje14TnqZW1nDWEf/iI7uvk52deJtZeDDtUmtFvC2f8PKy8ftbAq0436kJXnB9o5s8XC1ud15JsU+JqZqPpQS8MaG4pMJVmVQRxJ1DAAA4c76al7xUWxrlYGTwqm1UAr3Y9H4c7z8nNjJ8dX2tn9i4u6wHrEPQWHf3aRPNFJUUYEXSJDUbxqZfizJs7qtRT9FsOykx6eoCEdO7lwjAujZ3cT317xsPKom+jFYdxJtZB2T01x2nppcduJebOfz27Esa4ojvj1PlSpXpy9bY8x/MOeesWR2nNDiTRd6meB1cTUDmaWVMVSsMrBroebxJhfL+aFKlK23JB5tdvkQGy55OvLxVJdK0d+uyXPhbeQ0wv3yoExg2Tvwc0yJPmCfDFPxaUpJVL6eTCGtfelLE9P3+ta7BuD2XpIzenX8uWP+mZtf5mxZMXj/Um3K/V3/8rbBpr1tBAd4MLXITekbPwjufObheyRXo6cN7P4sJng59w4FW6eTrYxdLIJdQM9V7ca2dffzIwlFl444EA30Ej5WR2X5+v4V4mN0oFmeteoqLmvZFikmdYBNPccCmbW5tuSnX1H5qvDSW1nIzvcxrkXw9hywMg4k5L4px7K610sDJqmoFO5jXF7ffTPj2ZjZx2v5yrZONHDkjeMKLbZWHjORW2ciyXXjawsdJCgsnBc6aR9igtbkpUVs2yMMd2WoKNKZnxgwDdeyYIcBbHFESyJN9FnZ+BdEwtVGg0/BOZn2WUfbWY58WXYGBXgbsg3Rs5Oeijf7fxVHlwpkxi/htmNdEwdEsJnCVH0MVn4JtlL/qTGvBCr5lyKjaIvXGy+Zed2to/hX7mY+qSN+Ndt9GzvQt3XwezeAZ73M5JstvByvYmRLfX0PmRheYaVnn/zcqXSwZkODpamWBjymgmjOYq9Gx30sDg4cDnQk24mPppqZmw/K+2nu8mqNaMYqqXpvgi+GhuBtmkEZo2FOyfCiRqrp81SNYkB7Aa1DudKqZkydQQZi6yMbafgo5/CuFam4aHayLsT9HS7aWJeYwe1pRFM3abjqbZWBt0zMkLr4FFsA04WannzvBdfvZ3RQ/Qk71aRWxHMsu2BWZ1h4G6Ihv1Fv8rZSgPVWSaaT9NQOELD37uHMeqYjtOXrChzlDwsNFLyhpUenzvRPqunfK+VhW95qMo18WOugX6nHET0qpYLN2slJD2UgWvCaK1ToYgMx18VRa8OXta4vBS38GHMdaN5KYayYhPSwEdibAxnohqysEsEdW9aCS2xcG2Lg631NroEaszsamHCUDvrdwZmc5iNbYHas5xGsi4FsM9w07m5l9hHNrbrbFgPatG/Y2HLYAPDG9VLnVLNqIDWLVJ62JwQ6O+1gFb3DuiwysmNPCtPp7s5FpiPp/PMGKtDydzxQNqGq9m5SsPQ55RMb6IjeJ+LzCZ2lv9gp+vaCPLOhLL8Cx1zJ2noN0rLnFd1KC4GsN3v5laeE8NNN/0TPZzq6mX07kjaLPBz8N8WPq6ycv2il7RdDoJtdpZOd7K8nZO9SX4GK6NZPd1G5ntO3t6npvmGCMbP1XB5QhAzVlmJK3cy0BKJaYOfjX4/mWY/QX08tBniI/73njR082kgZ9K8cLRJNvqNNxC9OpSqgidYVWogcqaeRdFu7rUJZ0ClkvP6h5JoCKbljvOS8PcquVBwTlZmOfkhO4qO0R6unXew0+ege7qPtEIPZWNj2D85hldrG5KT78eXF8usY0YKk32M+SmK08HRSGI0rTyRdK2zM/nFRswcGok21kPScQ9S0QjjM3HkZD+QushAv11+7lz4q9b+Lxu39D3Z1NTCmFJj6h9nk5O++1MjF673sr6NizNL3agqfBQVe2iREcMb6/xUjTOzb5eF4iNmcj81Ujv0gcQbzTTPdpJri+bW836ajw78w+s9xMfFoN6ZI5vue3m4K4aK3Z+mFvR8Xx695MLeuUAqO2g4UBeMfdcDGbnbSOT2S5JRE0/HuSbmtrMyYeVBabFym+xQVMqcwD9Y1CpO3m48X8ZM/Fbaa4z8U7laJh25ILpLNZJVky8NT7jJdiSwf7aOJV/qaTxDQc3BbJlWf1uaJz2StGZKCtI1aNJCmB59W0Zf/V6uek7Jug9qUtb17puyruCiuFLeEmvISWlzqSp5casBMviTIknbPSF1wbSvJa3/I5nyTp2snaKi32IFHX8JImxZtVR0Oyk9moVQ0kPB3dOXJK97jRzeG0TiuAsydWSAL97DsmtBgVRcuC/mgB68tF7D9D3DZWWTGom23JAFmZel8G4wr/14TNz6s/K9u0J29rooH0w5LxN/NJBYGMr+Hka+/DyEsBNWDNd1VKWraBl2Qx40q5Y9X1dL0OQQkotUlAe4OHeDDvVdDd0VETTs8gTabc7HvJg57aAYPlGz2Gr+vzxx9x4gU+46/nJHE/rfHGnr2kqPNQ0Y4WguTUpyU8uvvyvr4q/+yaOyRftk0/YBctbr4nB9XOqJXVv+jOner0j53be711bWTDqUuqPxDuk6bab8B26Rb6I=TimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lXlU1dUWx+HKfAe43OF37+/+uBcuMmg+xzKHUOF81CzNMk1SfKZPTTJzfJqm5pRiRpJCKhpQSs6JoQ9xpBTNBYo4hE/R5xyooAwaksj75Vq1Vn+8t//Z65x91tnfs/d3f8/xdT5kzHBhjrTx4rUnYn6zkRU9wxntsuOh2pQMF1lLmsS28l9Fys8l4ni2N7sarFyJDyapryedshTuyWa81ytEXDITbZcZqfjx/D2Fsm0ygzY66HgzgB1rrPTo40RTpsG1wIbcGEYJeq6WS2im+T3L5Shx8n3iYvFF2yKh0S4R0z3vi6ZJ2mex2sM6cvuaSA5yktPNzulzEk8nWBgU78fgQzo+HfBQnF5rY2udlptJQazea2HcVwbmtDNS+r2dy4P8ePgokJS2NixeobTM0DKk+ZZYtrElyZ4OFiIzSn1H3uVADOutTJxlpb0phM2vWAk4ZGNDvoXJKt5zsjeDO5iJV/P0aWdn82EflNEepNToSFLv/80rkDZ7HQy+1yQybrcgYbrExl/dVOlDWbVDJmaAjXYdnIw7b+NpsRcL62QinHZEkZEXYr1pWGdlSCcTExQ9OwdHsCBR4Vp1OL80OzjWPwDfEyYij4Rw7AMnIxP9ubzSgm6ElkNLt4lLZxykrHRzpUtrJkS1pryvi5SeTg5nWZ/V8A9z+tvjOhSUiFv2cCqHm/i0hYVhG6L5cqpM//PfiaPhT0TrS37YYh+ILmt2C+dPF0Sv18rF+KuNYue5GlF25DlRohwWz0UPF8N/yBFZ226Ko84A7BNOidZ1Gp7GN4gF+/RkddYz5SsNNTlapmZ8Jf7If3z/2+Lgrsxn63mTlbjf/d03jZzXWhlZ5sdHOfeFv61ZPFxi5fMtTgpnWvg81szKWoUst8I7PSVWe5oIbjTwcrCRsYssyJkqH5bJ2KqCePGxgRVq77tflzhVaeZRnj/P3/dD01Wt62YNubktiE2uF7d714uXsgNJHSGhlFmJLvMnUTHxwWRfRGKT0N4z0zTMi0khNvpWuui5K4yR2XpOLfZl5w0nvgHBHHzNxhGh0D/WQdLaYB6o/P9soJUn6TJzoxSmZatYR0t4FNQJ61A/js030m+mL2l5XtT2CWZgqImA9sFUjLHy96AAimtkCo67mHpE5YSQKLojceB9IyGLnooT7apEuOWsOLdXS5sX9NQM8+Y7dxgnEi10DXCSnhpNXCst+ZMkZsxQWLjOxodDXSSEO3i5i8SW5RKTRjjU+ZS5eTGQlyYbmfuDhbuTzFRvNfDqZTPjIiXurHKy9lsHT1+xs7ezmaQVJoqeC2P5KTujYu1s7+Li7BUToXvMhDRYeEvNNeJzM7OG6qjQmNg4x0R6ejATT1jw8jOSnGpgXkAA42t1uK4HYg20sC8lmAwVUyezN5lxAax4rGXfkyDa1Bjwclq40dFOwtdBHL6jx73fyhG1ts6PZGap+rHPoWNkqZNHN6yMKTdwodYX9J4UmCTcP6n6NUXH1rVVYv0EI9+r2Md30lF1QEvyj/7MLDSQvF+iW40/wWNM1GRJzExQ2NBkoDRNYkduCGejzCxWexOU4uDslofihYxGcazIh4kWVTOO+dH1SiCrL4YRc9rFiPdclCaHkbRTIbdlFMGLzHT2DGVuVgSPZkcw80IwM0Il1u2w0rBbZs4bNjalO9HftTJnno3sBom5ZlVr91jptyIIV/sQtgxROLnVSe12iWteEj2+0BM128KatCCOzHoqWlX6U93ORqgrhLxv1XM5EkMy7IxcZUO/20pSvcJUu5nUHiYyh/pS0vqJyJnij/y+loNPfFhbriN6rANFttO+WKKst5lTeb5MKTAw9k0tO1StfViip36ZgaglCsUv2vEfp3DmdYUih5PBo0O5/qGbt85YaPrEimJwUnfHRnO+RGJvmYRlDoKmu9kUGkZ8o5X9BepMLA1geZgJt0bLgDUeZH9tZdA5mbRTYeyXWvLJwnBO57v57h2F/EQX05fKTCiRqVotcbQgkPRWErowI0VzfNjzrtrraUZeG6XnUpDC3XcDiSn1I+/SU1HYUcNbba6Lj2OrxYGLV8Wy6zIVPd1srg/h3VwZZ6FMWIWL7v9RMGyMIO/VCEbVReA9Ipzs6ig0KkaPr52c/DaMLf5ucg+GMXleGF1MNsZ8EMnuglCimxVGLXVRfyeSidtbse5Io7BfaMnaAjcrx0h/0dr/Zf3saeJ6oZmKB4Fxf+y1df38p0ZOzHRSqv4j5zUh/EOd7enRTs6LSH7p2pLwBDN1dlXzDBY2lRmxlT8Ra3PUvX/K1A538++GcLbOlHGdCiFoRgT97qWKsT85can412fsjNu3a7XwbaPg868s8TBSy9uuFgyzNwm3bzDl634RudtbEznbxNx0K+d6HRO9h+WK5SXlwqX+uS0crYTBuExExZwRH/c00m3+RnEh5aaYmPZYFCVniHxvJ9VdO0EXAx4xBvrXe1E8dbFIi38kKpd6MKnQh05JWkJ3+DAtoE5snVYmMp8vEzWWR7G3KgfFdii+LS5/sUh4TDstjh6u7PXDrXjRsmKPyC8eG3fz8HHRv1WzmDOmSfiO8qPtbG9ev+pJj811YsL1UpF53pshv3mh710hugY3ikvnPVj+5Q3xTVcNDDgu3iv+RnQb+lismqWj5yYdOT+OFbWHHotF66pFtqtCbDitoXfxSVHtdU2Mt5SLHRtuiyy/G0IMNLLgog+zY4IoDPShj1biYp4Or12+NE65L4bH1InSTvUivdobZbAfzZk+9G7Uc0yj48HPJroneCItlJ/xIqf7UeEeo8XdbPq/PBn4UYLY1Fn+y5nHqY5n653evUSbNi14MOlvYtDoNXFVvqli1rnKP3nUUFkg3ug4TPj0cxCfHx0XI+/8MzYqqTz2d//ZyRhhnlEYpz+wR2xunC/+C/e1hA0=TimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lXlUlWUex7no5V64+/7elcsiEiCCORWuwfOxcMXURAwVscZKmjIV1ykTTXGrRnFtSFzTzF0csnHBLCU3MHfLDTE33JBFtrk5p87pj5nfOe95zrO9v+f9Pp/f9/3aoCDomIcvdBKO8gZxQ2Xku4BWqOfZ8fNF/AIv777eIGyj6kR1wwmRulxOwSwrfl8ZiM+Q0XmMm+QzJvJfdnMpxkLwaTtJJQqyJngYM9hOrNLJuFIlf1NaWbbMTZ9p/rz6RCLgXBgiUcONYRL+uxVPc7X+yINn4UwxNb1EZM2dIf66pVLMHqZ+Otdeo+H9OBPdrnoYNsnO4aM29hwzkyUPZJPQ8M6KKnGp0Y5Hr8GyWMcIs8TIWXo+zDdQf0viy9hABul0/FRn482KYNr3ULFnb4U4XRFOt41O1u6z433fTIdkHfeLrLzxsZXC7m4+Hmtl7SsSK3pb0HXRoO8gZ15XE9dSVfSusGOdpOBOph+RPdQY7ugZU6Ph7W+d+OX5MaWuBYvcNo4+G07ZYS/uUAf1Mjtr3nEzwGGnoLkF791zkJdrZ8clAx84A3AvsxK7ysjhPhq2TwznwEUn66+GoXa4uB4fSMZXVqozPDRUeRiYG0jMEAsrn1fRw7VRlBW72NImjFuqaDLaRbNsQzCRgz10nmN9quHvMUptT0prOiZqpoSzt9rEuPMW+ic+w6LnHKw0fi0y+jaKn3cqORtSKe7EF4qSuefEqV8viCm360XuFw/FI2OsEFf3ifCJQ8Uv2VvE6l3XReN7QTS9d1zcm9OSfadrRU6qluGTNcw55Y8UreLOrC/E7/lTyl4Tut0FT/vxHRxJv7XZwwysSbewYncg7TPuiTtnmsXEQivzozyoC81U9bLwcms3qbkuus+18VaMmaVtdJR/ZqBGZWHuRCttAxys62Vg8wItp3Zq2TtCousAM4WuILS9lPS9aKLTEH8KV/lzJuWRyMp6LDrt0fNpSx/7dVYO/SuIFvkm/jJVSUU3GTXhFlI/lTO7WEKWHoL8n6GcvKrhkFzJpVI3s3IMKHx7f77uJGW9k0ULjfT2aRixz8L5Xk6iOjohSmJ0msQPxioxtFDJya0GbmqVnBgsZ+th3/q/m5BdNXJ/o4V7ShXfHHdwrKuXH6Y7ULxgo9hP4ttaA/3yG8XkQb77OFwmQj9XsWK1huXJcrqMC0EqsjD4soe2xiimLQ5i8zYbP2a6ca+zY18SzGcLXdQg8cqzNuKHOXmtj53HbfT0+dHA7psWpGoziT7NihMsHBhoY44imOf6Ohj/wM7WZhOeUhPhpSGEJTh4rYtEbUMw+4+YiCo2kxlnpZ3TRfkMM6YENXEXjFT4m1g9y0irOjM9fLWoWq6hLjmI0TG+M7t0qHMtxBwxEJRp43qOnB17gri3V82qjgY2Nmup32EmR9iJjjOyq1LDzA0+xvVGtqscOKNb0N1fQ86KYHJvSKwfpiMrSon1gox7Ghstbhm4PFzN0NRKYXhWzzajkVE31dwdqSJtXSCutjqm+9m59mEg126amBtqJTLfSftYHf18377woovvfe/Z4KvHhZUOQmTV4uCUenFtUACtlgYRfVnJfIWBMc1eMnyan9jppnKUl8h1bmSTI3z5zKRO8PJvVWsKB4SjesFEwkIr7/5kJVlr543HNjpO9lK13sbp3hILzklcmSZxLtPKHKMB+343Sx85WZXtwXTGRt8SC/kttWCysKS7nk01jeI5UxAZHolpZW5yZrrRpkg8bLIzqtpO+H4r520u9gZaWP/QxIvDA8gyNIjqg4F8eVFFB4WCN0+oabjvICBHIvqhxMkqH38yBQnHNZQsC6LEp1f/bA3pt7WcG+9CPczBkMNOvNcdeOpcJP/gZc74MHo6fb5y1crjfDevnrUz2mojZYOD/Wm+50Yos31sqkutmHVOchOCeDjWxB6fb65J9mNNhERmopMRtWEsd7QisHM42u9CaY5xk3XLy/FPnVT193FVJBGZqGdAtI301QZGlgbgnCaj7zU9pYVa/tHDxWSvjl2xSh682SxmXpZx6toVsXbnXdGn6Irw7HbSa34Ig+56+HiEi0WVTlZHhVA03MPDuFbUWyLokB9BvyVh9C9uzflsM+/O95K+LhS/MaFQF0b6thASh9jRj2/Nk8shrHV4MOZ5mDEggo6/RCKLfCLamlrhlcL5fKD0J6/9X1H+UZ7Qfea70yn6pN/Hep47/YdHRg4J5q6vbo9tchF21sO4Qg/K3hGsbB9OuzZm4t620Hmxr3a9Br7PaxQzbpn5wMfxjyUhpGnCefWAnd5pHjZNi0BbkCdemBDMhAURpOduSfI/uESklrjok14gotqrkM/wZ1ZFvfjcZuDs1nKh9f0/vM+bubjRiib3oGhq2i6yl14Qt/tJ3MyOELcnzRZ1pjKRMENP9Yy14oK1XMi/qReDklaIAzEemuvbofDXk9VTz1Ehp3h9jvj6mxqRcsyPlSkKuivV9OwWwDTVI3Fy1CmxTXtGNBXVJhbH9EtsvndDJFVMF6mvl4lndtx58cqmNFHec5fY3XdUUo7isIid1CRe6tQoqqcGsu9RCy51kTH44EPh6X5SfPKJnMAlLcm49KuYMPaJmGOTce5CuVi+0p+F6w4J/cRVwpJcK7bHq6mZqsY7cKS4/HateGndfTHuwa8ipWtLXrYdFQVTrogpKT+L4sQKcbR7uQio0xM8PYCxb+nxby+ndq2VuUYNZy8qeFF+XzxRPBYUVYlXjsh5w1/J+0MVnF2mwVWm4ky9Cf0JGSPyHU+5MGgPinnbgsgcYv6/nJQHDBGdPM4/rRm9+b/90dbO4ss+LbB54kWPvMVJ1+blic4Xb/7BUUjlfhGXPlhs7eImUR2VVPvBtj/mCjafT/ytPXK7k/jpwaGk/gcKxWWf2/4HLgxx9A==TimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lntUFVQWxgF5c7lw3+97eVxAFEmlosl8cX5OaZqgKSJZPkA0JzWVRDFSgdQSRjQfjZNSCkoqiYbKpJgkGCokRGoaiIaSKCIFgkgwN1u1Vn/M7LXO2muffdbZe+1vn2+fxTecWTfWhy/adHwys1vYz5CTdt2K+wQDdjZJL7JgUHSLEN+HYoNDtQie4kzeN2piUbDCzoH6GiPvNioYZzGS9qyK4gA9X8e4ElJsJvmslv+EG9kc7UbZWxoaJ5k5VehA8YdaXoryxaHLk7QoHYGnXB7HOnfOzJmUdJGwolykVKWLD+a1iOyPJY9931/x5PxeBTd1PlRY9MxZpkOfo0IZ4kbFi1IKzzwQM213DU+U8HGKN+WhGkq+8CbvmIwtCj1+u1zpCfNmUK2Wd1b6kDTGgxNLG8Rz/gHc8zdQ+1DHgUNK3tvmRfhBNfmeGtZkGnE8qmaKTI81RMWE3Z7seN+JXYuUrEqQIMbpqD3vQlRyr0j6SsIOiYxBtlwWlxsY5mbHQBxZd19F4SM/jo+2MKmvniV6PReGmvBv1ZEV5UhqmoE+Ljpiv5QTP8qZM9VqPJrkTFR5Ur7Yyv5hJjxXW9l6wUhlvRu5OWoeXDFjfN5MW7g7Q6Vquos9aM/fJy6+amTsTX9Edn8alwfzs8nCgTAz62rVj2v4h5QkGCK22V0QpfOtdB1W4O6moqBfPwK36Lg24IDoeLlHzOnrRkZni0iMPiQWZl4S+wb+IM5N7RK53/4sjrX3Fy/cPCnsEqeJG5kHRVBpgzjU6UbQT5Vix8k+eMQ9FN9FenGhyZOZx/qw6VsPjm7OFn/E31MbLXLzdjy2p+80Rfym339GxqxcNSe/dMPw0T2RPaBXhDarSbhuZv3rarIeqehoN5K10sjsbVrq1ikZe9aLpWFy6kxqguPUnJcZmJckp+GyF8vDpVw2aDn7kgqHExLuTnVjd7QC508duPyeI3VPtYmhp9vFiUxvBizX0jZHg5vRg4iBCm7NdmHBaTvG9VOR/J0TX0u06HN9eeO2H805UgYVu9IhsXA8X07zeQ37HhqomWVgppuC7FN6Fm1XsVRroP5fRnpWaEj6XEPexl/EmFmuKMfLyat3ITHGifmhCppaFJi+l3OgSI1Dmjtv/2DAONeHUfMNVNriHnlSi1OejND6HrHZ9Z4Y/FG1sHeU8LLt7Xwy3pmTy3z49xUVFc1mGvT9iH7FnamlGsIeGBlsw7M11cL+owaSorSMStTQmm5An65jwXAZr4xXMD1BzRsNSsYnS7ltUDMpVcNSVwslvXoGrdBT8q6ShbFKot70Za1GT3SkDv+FFiKtSnoHqPg4QcN2q5Ep25W0/kNCUaucYQkKDpTLkd1VM2OwjLFFUia0uBG+VkJQiBcKk4raezISCzWM+8WJ4Em2/pjvSedOGfOavSjaqGTzq3oqVss5PcaLf45XE/OrnB2j9Yye60jvl7Y4wRbuH9YSf8QLfbQr0R/YEx+n4mSWnKh6Ca+aW8TkW97UdMrIsdm5Fg+kh91YkuDF4GQdF7zdOfGOgrbtGp64bmDjj1Lq0rT4upp5P19FZoScu6UG1hx9IByvdYldGS6wzIOOYa58FuRFWaIva3zMjGwyE1Lqy4JKE6+FBiLrVHLEhkn+zgDqkq0UrVfwXKGaTV5qrIt0VM3R8rd5PsRf0bBXY8vjkpa3qrVUL9cQc8WbnOlmEp8wMeiBiTcP2vCaomZ9pA2bgUoqRshYvLBHpNS5khWnIyfdxJGLJoILtDZu1bOtWsexfWpWbzdisNXwVxtGhdedaSvoFluHu+PSV8KavS707ZDQ9IoB7LT8EqmlLETB1Uxn6uylbJnnQZ8rEj7f5km6i60mjibufKXl/CU9Gyfrqcs0UZ3iy60R/pRVqJm9VUPgTybaLupIGK1hXayetHN6RJcfEyN9idqqZk+ggVNb3fEJV/D8Bg+K7vQKfbeWkhlGNj3th3KYFYd2f8486Y86x4R6vA+PZhvpb7DNjkk6Lrd6M9mk5d0OGap8Z5K67QnNkPHaWSkRN4ysy/ZiTKUrL4f2iv1lDpQsuy4yaprFc0uvi/g8I1M/8eX+p2b63jGw+G0Dby7wJSPAzJrcACQdVlJrA9lxzZ+g+L6s3qJkfo2JgFW+HI70o2aaHxkHfYjt1WJXFcCWRl/qZpi5P8lC09kgrtp49sinXeL0NSsztvozcIHmL1z7vyR/42bR2arGNVgW8cde3ImLf3LknUNmfq0zcnqRicG3zBQYzMTHBDLkswBefNaG61oViVY15dNlFE57JI67qBhTZsAlwY+EdCsfzNLzfKSZ0LhAXhy9RcwsNLPnfiAfDjkY0TJhm7CMNuG1OlvsseFyI8ueb9q7hXuwnIVht8T4of0p61Kw446ao3mlQj7/sJhcdVX0G6wD5yAxKG2tWJVdJW4rZEwct1sseaZBDG/rFjGWneJp22wqrg6jWeOJX5YUn1InVvWkihs2ri0s6xUxBhe+O+3OqlpHfnqpVVQNuSQmN14S/lHtIx/NnThyU2SjGHkgVcyqqhLZTzSNKFgVI56yHBEeQbMjNhR/LQ7e6RFWx26R8aQrc0v6cCzBHs3en4Uuv0oceuREw1QnTn3YKILaH4qHfe1RpDSImjAHfAPOCHnoLpFb81C0N3gw2bZit8eJzU2dIir6vuitbxR2LX0oKK8QSZofxZLgOpEzqlHsld8UJ6fKeD3XhfUfybhpdEY3UEPcUimVtjkwq7JF6Ke1izeSHwj3k86khLrR1OCCtE7K3wd4skqn5K7Kgf3Jv/+nTC+UinC5lJVXlP+3T9yvxopnd+v/cmbD7N/vKFCNFHtdHTkaP0BIjm+LMCVtEgEdN//sI2VeiSgiVvS06TnXGRzhkPrZn74LA2tH/qYzlw0REWllEelFhWLd7ZXivw1CkBc=TimeStep0.000000NIFTI_XFORM_UNKNOWNNIFTI_XFORM_UNKNOWN 1.000000 0.000000 0.000000 0.000000 + 0.000000 1.000000 0.000000 0.000000 + 0.000000 0.000000 1.000000 0.000000 + 0.000000 0.000000 0.000000 1.000000eJx9lnl0jecWh+MkJznJOTnn5MxzTgZB4lapoppKZD+GkqC0pURvb5egtCXcKkprdkurglSklBgqNYRbU5UWbYypmEUFISSUJhoqCJL71V3tWv3j3v3PXt9+v/W9e/3273veN65vCDUbI/lOnPhL6qSwKIIjZbE4x7gIUKLbET8ppgbRzLkvc/cclbun1ITNs3EjwMydtSpq0jxMyraQk+NhxxsWOl10ElMVwtjTXsLSXJxb42FvTBhZWTby07x0665CNcaJMT6KufvDKREHRd+GPN5rQm4kpz6fLs0mHBL/u9Mk2vCr6FbrHq9dXBfOhwkWhvXyUR3lYs9eO/PaWEkqCcMyWo8u7K54m7k49nU4KxcZSFtk5/wEI5dORvBBoJNOt0Ipm2ek6ICDSdV+TvfVcbBJpSzuGsf6LBfD0p0kPDCz7ryBZqPt3KqxYl7iRb/SxrGJTuqvWbiu9BBdHsQn3S20+VJL8igXXUZo6JjaINEbtJzabaTyZz3hT3qJGhRA2sxAaiY6GLoimiXt/FQHuWkR52RZkod+W53IuiA2Vyk6pzrIvhLBLwuCuav07RxlZv5RHXHbYllr9DL02Vi2qzx0ahZGsNZKr2Qfhxsr9blh9HzJRt/JWs6krpWBu9284I+l5+F4bhviSTL6aLbSx+Q8+2MN/wi6uFLCHh2RJoGxeHpbSO5jZUx5U+JOueiu2iB9UuplYpGG5UU3pcmPm2Xrj2dEqysVz7g6OdPpljyVkyALDuySRoPSpe3DjcKwCnlzrYaS3CMyvjqQn67fl4c39Gi76rl1N5BZy7Vsr/pc/tj/7IZXpHN43uPnmhvelN/zgRgT302x0n5gKFP23xTrngZpfs3G8/0jmdfOxvvxNlI/9aI956GN10Hh62aO5eqp6G8i4Qcr41bZCJjrolzx7+zVEdiG68k+6uDBDCtHW4azpjiU96ZYKN0SSLdSNc9Y7kiD+o5k5xtZlOtgyxY77d4IY+QMM5dnhzB8ZQD6SgsdAtQ8nO1gqi4KTV4M1dvDeXaohmNTvdQuNDF9oZ0DIzx0veXGudrEcJWL1760MjpeyZluFh63MxkHL564I3vNGkYOMLOsaSiJXYMZMchMwg0zX0WZqZxvZ/MyHXc/cNPjiSjU59xs/4cDe76DL4pMZHUP4Jd3q6XccEpqFuk42aCj7zdqisOiaBltY12/SPY/GU/CpDCWvOhgeL2bLnYHnXtE0qbGTas9djq8auezAW7q+jo5c8RA1eUIfEYbg8ZZUb+l53Z3G2uP2NHt9KFVvJC504V/jvJv3zaz2xXNskdO8pR+Dt3zKV5RavlWlmy1EXjcw61MC7ezdLzxwMTnvc0U5JvJUGY2vd5IcLGBy2WhnByiY3u6gd67LByNNyH77AzLVlPaWcs7S8P5UW8i5oGevEALMbgo+iqC3B16kpw24reaeLXAxZIDKioP6Njzso+KYAcviJ6qTA1xXhVL79nYnmri7SQd9hbVMnWRkaG1inb2cMWHYSxPCCUzyEDhaSeqC6FsyjBjHOLgYpaHxCVGCnLs2A56aN7JQqv3TIzq6WL2tlo5/n6d1H8TzDtttLR4W4PplBFn7ygiir1MW+zjozlRvKKwIu9uY3KwsrLYT7vkOAYeiOVjLPjH22jdx4b2rItUhY+XPvNxVGFo3LcORqqdrKp0MHqNjU4ZEcQo2g60eyk2RzLN7CR5vo2NM/S0+9ZC1ylG9u6tl/l1oWx81ckrKi899iiMKnewzeIkapuTE4q2V/Z7mNzTwr3ZZrrdDqZ0wEOZPSCMwbu0eJI1xEwKp2uOmw5JDkq+cNA3xky/m8HYA/WUpWsZXaxj8h0dRRoDx3d6eHmfk/ndXDy3zE3AC1786X4iJJrdu2wsqLbhrvXy3TUnK3QO0ircaG+5WN0jmrBZUaQW28kqVbykfLdZuZm4Z3RMW9ggx7Y5mHzeTfFIP+VnY0gti8GgjuGTBC9ZyZH8OsrNh5EeIn6z0/onI/WdHawYY2LX9BCat1RxtUMEJ/fpWbzIw4y/G7jUXoNuSb0Mf07hzZpyOXm7SmY1XJKJcV7WPRXF0CAfP+DBO1Zh5Cw/ieuVud2LZXl6YwrPNibTGsNzpU2oCjGT3d7HO+5oer4bzUcJUbyZ5yf3hoOHWY25f9rPVWXWbSMiyR3ThGfXN6VriwfyYWJjLuTEcD3A8RfW/q9Qr/hUrirc2pRoTPmjtvDj038ysmkfH6OPu2mwKNpuiGTO6z6e+TWOTT/HcrbKTKtyKzPesrG8j4l+unop2GXltUFuVs2MpmBwDOOnu0hv7SOteRzql7Kl0YVIlg2LY+/df6f8sCBHOn7kIc2cJ4Nzw2h1XYXh5EPRmk2oDldI7rwE9FstiDLbFmsLJb9hkxSeKJWmrZ3MuNlERlT8S4ZdOybBmyN4esgq2d34shzsXy8Xz+dJ1QQftWdasXS9ntQvDPRXq+m2f6rsealWOic1yN9WBVM2S8sTW9Tktr0tX1eelrqMEtnx/J2OM3e82HHX1quyZtVUGT/4uFy7fiO5S3o/qRy0RTLyh6U83fagFNbVSzAPpXehht9CgliZ0QjD1N+koP6EPHpejVWjZufSq3Jo3AMpG9uIioorciRThWffPkkZuFKGz74vOYd0nH9CYVWvIRIxtk48PWpkbuZ1eT8piJKAw1ISdEVy8s9Ll++viifviiR2jmBzTQiPOhu5qnCgZYiDy0l6Eq0aEv55U45bamXxV7VS3CSYQRmhrF6ooeWNcJJTdPSaZSEqREWS77/3qUnnCqVPgZYByrn7//yxPHygaDu4/vLOJOWM+T0HfdNR8joEcXpnCyluuygluixbDFlVf/rINed7uTczXVQZHtyvN0uZmVXw51p3udDx9/xZYpJsvLQvpenHW6R96WT5D2vigws= \ No newline at end of file diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index f1285d441d..2d60482c59 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -6,55 +6,82 @@ import numpy as np -import nibabel as nib -from six import string_types -from nibabel.gifti import (GiftiImage, GiftiDataArray, GiftiLabel, - GiftiLabelTable, GiftiMetaData, GiftiNVPairs, - GiftiCoordSystem) -from nibabel.gifti.gifti import data_tag -from nibabel.nifti1 import data_type_codes -from nibabel.fileholders import FileHolder - -from numpy.testing import (assert_array_almost_equal, - assert_array_equal) -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) -from nibabel.testing import clear_and_catch_warnings +from ... import load +from .. import (GiftiImage, GiftiDataArray, GiftiLabel, + GiftiLabelTable, GiftiMetaData, GiftiNVPairs, + GiftiCoordSystem) +from ..gifti import data_tag +from ...nifti1 import data_type_codes +from ...fileholders import FileHolder + +from numpy.testing import assert_array_almost_equal, assert_array_equal +import pytest +from ...testing import clear_and_catch_warnings, test_data from .test_parse_gifti_fast import (DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6) +import itertools +def test_agg_data(): + surf_gii_img = load(test_data('gifti', 'ascii.gii')) + func_gii_img = load(test_data('gifti', 'task.func.gii')) + shape_gii_img = load(test_data('gifti', 'rh.shape.curv.gii')) + # add timeseries data with intent code ``none`` + + point_data = surf_gii_img.get_arrays_from_intent('pointset')[0].data + triangle_data = surf_gii_img.get_arrays_from_intent('triangle')[0].data + func_da = func_gii_img.get_arrays_from_intent('time series') + func_data = np.column_stack(tuple(da.data for da in func_da)) + shape_data = shape_gii_img.get_arrays_from_intent('shape')[0].data + + assert surf_gii_img.agg_data() == (point_data, triangle_data) + assert_array_equal(func_gii_img.agg_data(), func_data) + assert_array_equal(shape_gii_img.agg_data(), shape_data) + + assert_array_equal(surf_gii_img.agg_data('pointset'), point_data) + assert_array_equal(surf_gii_img.agg_data('triangle'), triangle_data) + assert_array_equal(func_gii_img.agg_data('time series'), func_data) + assert_array_equal(shape_gii_img.agg_data('shape'), shape_data) + + assert surf_gii_img.agg_data('time series') == () + assert func_gii_img.agg_data('triangle') == () + assert shape_gii_img.agg_data('pointset') == () + + assert surf_gii_img.agg_data(('pointset', 'triangle')) == (point_data, triangle_data) + assert surf_gii_img.agg_data(('triangle', 'pointset')) == (triangle_data, point_data) + def test_gifti_image(): # Check that we're not modifying the default empty list in the default # arguments. gi = GiftiImage() - assert_equal(gi.darrays, []) - assert_equal(gi.meta.metadata, {}) - assert_equal(gi.labeltable.labels, []) + assert gi.darrays == [] + assert gi.meta.metadata == {} + assert gi.labeltable.labels == [] arr = np.zeros((2, 3)) gi.darrays.append(arr) # Now check we didn't overwrite the default arg gi = GiftiImage() - assert_equal(gi.darrays, []) + assert gi.darrays == [] # Test darrays / numDA gi = GiftiImage() - assert_equal(gi.numDA, 0) + assert gi.numDA == 0 # Test from numpy numeric array data = np.random.random((5,)) da = GiftiDataArray(data) gi.add_gifti_data_array(da) - assert_equal(gi.numDA, 1) + assert gi.numDA == 1 assert_array_equal(gi.darrays[0].data, data) # Test removing gi.remove_gifti_data_array(0) - assert_equal(gi.numDA, 0) + assert gi.numDA == 0 # Remove from empty gi = GiftiImage() gi.remove_gifti_data_array_by_intent(0) - assert_equal(gi.numDA, 0) + assert gi.numDA == 0 # Remove one gi = GiftiImage() @@ -62,113 +89,112 @@ def test_gifti_image(): gi.add_gifti_data_array(da) gi.remove_gifti_data_array_by_intent(3) - assert_equal(gi.numDA, 1, "data array should exist on 'missed' remove") + assert gi.numDA == 1, "data array should exist on 'missed' remove" gi.remove_gifti_data_array_by_intent(da.intent) - assert_equal(gi.numDA, 0) + assert gi.numDA == 0 def test_gifti_image_bad_inputs(): img = GiftiImage() # Try to set a non-data-array - assert_raises(TypeError, img.add_gifti_data_array, 'not-a-data-array') + pytest.raises(TypeError, img.add_gifti_data_array, 'not-a-data-array') # Try to set to non-table def assign_labeltable(val): img.labeltable = val - assert_raises(TypeError, assign_labeltable, 'not-a-table') + pytest.raises(TypeError, assign_labeltable, 'not-a-table') # Try to set to non-table def assign_metadata(val): img.meta = val - assert_raises(TypeError, assign_metadata, 'not-a-meta') + pytest.raises(TypeError, assign_metadata, 'not-a-meta') def test_dataarray_empty(): # Test default initialization of DataArray null_da = GiftiDataArray() - assert_equal(null_da.data, None) - assert_equal(null_da.intent, 0) - assert_equal(null_da.datatype, 0) - assert_equal(null_da.encoding, 3) - assert_equal(null_da.endian, 2 if sys.byteorder == 'little' else 1) - assert_equal(null_da.coordsys.dataspace, 0) - assert_equal(null_da.coordsys.xformspace, 0) + assert null_da.data is None + assert null_da.intent == 0 + assert null_da.datatype == 0 + assert null_da.encoding == 3 + assert null_da.endian == (2 if sys.byteorder == 'little' else 1) + assert null_da.coordsys.dataspace == 0 + assert null_da.coordsys.xformspace == 0 assert_array_equal(null_da.coordsys.xform, np.eye(4)) - assert_equal(null_da.ind_ord, 1) - assert_equal(null_da.meta.metadata, {}) - assert_equal(null_da.ext_fname, '') - assert_equal(null_da.ext_offset, 0) + assert null_da.ind_ord == 1 + assert null_da.meta.metadata == {} + assert null_da.ext_fname == '' + assert null_da.ext_offset == 0 def test_dataarray_init(): # Test non-default dataarray initialization gda = GiftiDataArray # shortcut - assert_equal(gda(None).data, None) + assert gda(None).data is None arr = np.arange(12, dtype=np.float32).reshape((3, 4)) assert_array_equal(gda(arr).data, arr) # Intents - assert_raises(KeyError, gda, intent=1) # Invalid code - assert_raises(KeyError, gda, intent='not an intent') # Invalid string - assert_equal(gda(intent=2).intent, 2) - assert_equal(gda(intent='correlation').intent, 2) - assert_equal(gda(intent='NIFTI_INTENT_CORREL').intent, 2) + pytest.raises(KeyError, gda, intent=1) # Invalid code + pytest.raises(KeyError, gda, intent='not an intent') # Invalid string + assert gda(intent=2).intent == 2 + assert gda(intent='correlation').intent == 2 + assert gda(intent='NIFTI_INTENT_CORREL').intent == 2 # Datatype - assert_equal(gda(datatype=2).datatype, 2) - assert_equal(gda(datatype='uint8').datatype, 2) - assert_raises(KeyError, gda, datatype='not_datatype') + assert gda(datatype=2).datatype == 2 + assert gda(datatype='uint8').datatype == 2 + pytest.raises(KeyError, gda, datatype='not_datatype') # Float32 datatype comes from array if datatype not set - assert_equal(gda(arr).datatype, 16) + assert gda(arr).datatype == 16 # Can be overriden by init - assert_equal(gda(arr, datatype='uint8').datatype, 2) + assert gda(arr, datatype='uint8').datatype == 2 # Encoding - assert_equal(gda(encoding=1).encoding, 1) - assert_equal(gda(encoding='ASCII').encoding, 1) - assert_equal(gda(encoding='GIFTI_ENCODING_ASCII').encoding, 1) - assert_raises(KeyError, gda, encoding='not an encoding') + assert gda(encoding=1).encoding == 1 + assert gda(encoding='ASCII').encoding == 1 + assert gda(encoding='GIFTI_ENCODING_ASCII').encoding == 1 + pytest.raises(KeyError, gda, encoding='not an encoding') # Endian - assert_equal(gda(endian=1).endian, 1) - assert_equal(gda(endian='big').endian, 1) - assert_equal(gda(endian='GIFTI_ENDIAN_BIG').endian, 1) - assert_raises(KeyError, gda, endian='not endian code') + assert gda(endian=1).endian == 1 + assert gda(endian='big').endian == 1 + assert gda(endian='GIFTI_ENDIAN_BIG').endian == 1 + pytest.raises(KeyError, gda, endian='not endian code') # CoordSys aff = np.diag([2, 3, 4, 1]) cs = GiftiCoordSystem(1, 2, aff) da = gda(coordsys=cs) - assert_equal(da.coordsys.dataspace, 1) - assert_equal(da.coordsys.xformspace, 2) + assert da.coordsys.dataspace == 1 + assert da.coordsys.xformspace == 2 assert_array_equal(da.coordsys.xform, aff) # Ordering - assert_equal(gda(ordering=2).ind_ord, 2) - assert_equal(gda(ordering='F').ind_ord, 2) - assert_equal(gda(ordering='ColumnMajorOrder').ind_ord, 2) - assert_raises(KeyError, gda, ordering='not an ordering') + assert gda(ordering=2).ind_ord == 2 + assert gda(ordering='F').ind_ord == 2 + assert gda(ordering='ColumnMajorOrder').ind_ord == 2 + pytest.raises(KeyError, gda, ordering='not an ordering') # metadata meta_dict=dict(one=1, two=2) - assert_equal(gda(meta=GiftiMetaData.from_dict(meta_dict)).meta.metadata, - meta_dict) - assert_equal(gda(meta=meta_dict).meta.metadata, meta_dict) - assert_equal(gda(meta=None).meta.metadata, {}) + assert gda(meta=GiftiMetaData.from_dict(meta_dict)).meta.metadata == meta_dict + assert gda(meta=meta_dict).meta.metadata == meta_dict + assert gda(meta=None).meta.metadata == {} # ext_fname and ext_offset - assert_equal(gda(ext_fname='foo').ext_fname, 'foo') - assert_equal(gda(ext_offset=12).ext_offset, 12) + assert gda(ext_fname='foo').ext_fname == 'foo' + assert gda(ext_offset=12).ext_offset == 12 def test_dataarray_from_array(): with clear_and_catch_warnings() as w: warnings.filterwarnings('always', category=DeprecationWarning) da = GiftiDataArray.from_array(np.ones((3, 4))) - assert_equal(len(w), 1) + assert len(w) == 1 for dt_code in data_type_codes.value_set(): data_type = data_type_codes.type[dt_code] if data_type is np.void: # not supported continue arr = np.zeros((10, 3), dtype=data_type) da = GiftiDataArray.from_array(arr, 'triangle') - assert_equal(da.datatype, data_type_codes[arr.dtype]) + assert da.datatype == data_type_codes[arr.dtype] bs_arr = arr.byteswap().newbyteorder() da = GiftiDataArray.from_array(bs_arr, 'triangle') - assert_equal(da.datatype, data_type_codes[arr.dtype]) + assert da.datatype == data_type_codes[arr.dtype] def test_to_xml_open_close_deprecations(): @@ -176,35 +202,35 @@ def test_to_xml_open_close_deprecations(): da = GiftiDataArray(np.ones((1,)), 'triangle') with clear_and_catch_warnings() as w: warnings.filterwarnings('always', category=DeprecationWarning) - assert_true(isinstance(da.to_xml_open(), string_types)) - assert_equal(len(w), 1) + assert isinstance(da.to_xml_open(), str) + assert len(w) == 1 with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) - assert_true(isinstance(da.to_xml_close(), string_types)) - assert_equal(len(w), 1) + assert isinstance(da.to_xml_close(), str) + assert len(w) == 1 def test_num_dim_deprecation(): da = GiftiDataArray(np.ones((2, 3, 4))) # num_dim is property, set automatically from len(da.dims) - assert_equal(da.num_dim, 3) + assert da.num_dim == 3 with clear_and_catch_warnings() as w: warnings.filterwarnings('always', category=DeprecationWarning) # OK setting num_dim to correct value, but raises DeprecationWarning da.num_dim = 3 - assert_equal(len(w), 1) + assert len(w) == 1 # Any other value gives a ValueError - assert_raises(ValueError, setattr, da, 'num_dim', 4) + pytest.raises(ValueError, setattr, da, 'num_dim', 4) def test_labeltable(): img = GiftiImage() - assert_equal(len(img.labeltable.labels), 0) + assert len(img.labeltable.labels) == 0 new_table = GiftiLabelTable() new_table.labels += ['test', 'me'] img.labeltable = new_table - assert_equal(len(img.labeltable.labels), 2) + assert len(img.labeltable.labels) == 2 # Test deprecations with clear_and_catch_warnings() as w: @@ -212,23 +238,23 @@ def test_labeltable(): newer_table = GiftiLabelTable() newer_table.labels += ['test', 'me', 'again'] img.set_labeltable(newer_table) - assert_equal(len(w), 1) - assert_equal(len(img.get_labeltable().labels), 3) - assert_equal(len(w), 2) + assert len(w) == 1 + assert len(img.get_labeltable().labels) == 3 + assert len(w) == 2 def test_metadata(): nvpair = GiftiNVPairs('key', 'value') md = GiftiMetaData(nvpair=nvpair) - assert_equal(md.data[0].name, 'key') - assert_equal(md.data[0].value, 'value') + assert md.data[0].name == 'key' + assert md.data[0].value == 'value' # Test deprecation with clear_and_catch_warnings() as w: warnings.filterwarnings('always', category=DeprecationWarning) - assert_equal(md.get_metadata(), dict(key='value')) - assert_equal(len(w), 1) - assert_equal(len(GiftiDataArray().get_metadata()), 0) - assert_equal(len(w), 2) + assert md.get_metadata() == dict(key='value') + assert len(w) == 1 + assert len(GiftiDataArray().get_metadata()) == 0 + assert len(w) == 2 def test_gifti_label_rgba(): @@ -239,44 +265,44 @@ def test_gifti_label_rgba(): assert_array_equal(rgba, gl1.rgba) gl1.red = 2 * gl1.red - assert_false(np.allclose(rgba, gl1.rgba)) # don't just store the list! + assert not np.allclose(rgba, gl1.rgba) # don't just store the list! gl2 = GiftiLabel() gl2.rgba = rgba assert_array_equal(rgba, gl2.rgba) gl2.blue = 2 * gl2.blue - assert_false(np.allclose(rgba, gl2.rgba)) # don't just store the list! + assert not np.allclose(rgba, gl2.rgba) # don't just store the list! def assign_rgba(gl, val): gl.rgba = val gl3 = GiftiLabel(**kwargs) - assert_raises(ValueError, assign_rgba, gl3, rgba[:2]) - assert_raises(ValueError, assign_rgba, gl3, rgba.tolist() + rgba.tolist()) + pytest.raises(ValueError, assign_rgba, gl3, rgba[:2]) + pytest.raises(ValueError, assign_rgba, gl3, rgba.tolist() + rgba.tolist()) # Test deprecation with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) - assert_equal(kwargs['red'], gl3.get_rgba()[0]) - assert_equal(len(w), 1) + assert kwargs['red'] == gl3.get_rgba()[0] + assert len(w) == 1 # Test default value gl4 = GiftiLabel() - assert_equal(len(gl4.rgba), 4) - assert_true(np.all([elem is None for elem in gl4.rgba])) + assert len(gl4.rgba) == 4 + assert np.all([elem is None for elem in gl4.rgba]) def test_print_summary(): for fil in [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6]: - gimg = nib.load(fil) + gimg = load(fil) gimg.print_summary() def test_gifti_coord(): from ..gifti import GiftiCoordSystem gcs = GiftiCoordSystem() - assert_true(gcs.xform is not None) + assert gcs.xform is not None # Smoke test gcs.xform = None @@ -288,7 +314,7 @@ def test_data_tag_deprecated(): with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) data_tag(np.array([]), 'ASCII', '%i', 1) - assert_equal(len(w), 1) + assert len(w) == 1 def test_gifti_round_trip(): @@ -400,3 +426,20 @@ def test_data_array_round_trip(): gio = GiftiImage.from_file_map(fmap) vertices = gio.darrays[0].data assert_array_equal(vertices, verts) + + +def test_darray_dtype_coercion_failures(): + dtypes = (np.uint8, np.int32, np.int64, np.float32, np.float64) + encodings = ('ASCII', 'B64BIN', 'B64GZ') + for data_dtype, darray_dtype, encoding in itertools.product(dtypes, + dtypes, + encodings): + da = GiftiDataArray(np.arange(10).astype(data_dtype), + encoding=encoding, + intent='NIFTI_INTENT_NODE_INDEX', + datatype=darray_dtype) + gii = GiftiImage(darrays=[da]) + gii_copy = GiftiImage.from_bytes(gii.to_bytes()) + da_copy = gii_copy.darrays[0] + assert np.dtype(da_copy.data.dtype) == np.dtype(darray_dtype) + assert_array_equal(da_copy.data, da.data) diff --git a/nibabel/gifti/tests/test_giftiio.py b/nibabel/gifti/tests/test_giftiio.py index 90fe3e4d37..8269618b0c 100644 --- a/nibabel/gifti/tests/test_giftiio.py +++ b/nibabel/gifti/tests/test_giftiio.py @@ -7,33 +7,16 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import warnings +from ..giftiio import read, write +from .test_parse_gifti_fast import DATA_FILE1 -from nose.tools import (assert_true, assert_false, assert_equal, - assert_raises) -from nibabel.testing import clear_and_catch_warnings -from nibabel.tmpdirs import InTemporaryDirectory +import pytest -from .test_parse_gifti_fast import (DATA_FILE1, DATA_FILE2, DATA_FILE3, - DATA_FILE4, DATA_FILE5, DATA_FILE6) - - -class TestGiftiIO(object): - - def setUp(self): - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) - assert_equal(len(w), 1) - - -def test_read_deprecated(): - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) - from nibabel.gifti.giftiio import read, write - +def test_read_deprecated(tmp_path): + with pytest.deprecated_call(): img = read(DATA_FILE1) - assert_equal(len(w), 1) - with InTemporaryDirectory(): - write(img, 'test.gii') - assert_equal(len(w), 2) + + fname = tmp_path / 'test.gii' + with pytest.deprecated_call(): + write(img, fname) diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index 9adc03d8fd..54d8e78621 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -6,7 +6,6 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from __future__ import division, print_function, absolute_import from os.path import join as pjoin, dirname import sys @@ -14,17 +13,16 @@ import numpy as np -import nibabel.gifti as gi -from nibabel.gifti.util import gifti_endian_codes -from nibabel.gifti.parse_gifti_fast import Outputter, parse_gifti_file -from nibabel.loadsave import load, save -from nibabel.nifti1 import xform_codes -from nibabel.tmpdirs import InTemporaryDirectory +from .. import gifti as gi +from ..util import gifti_endian_codes +from ..parse_gifti_fast import Outputter, parse_gifti_file +from ...loadsave import load, save +from ...nifti1 import xform_codes +from ...tmpdirs import InTemporaryDirectory from numpy.testing import assert_array_almost_equal -from nose.tools import (assert_true, assert_false, assert_equal, - assert_raises) +import pytest from ...testing import clear_and_catch_warnings @@ -107,9 +105,9 @@ def assert_default_types(loaded): if defaulttype is type(None): continue loadedtype = type(getattr(loaded, attr)) - assert_equal(loadedtype, defaulttype, - "Type mismatch for attribute: {} ({!s} != {!s})".format( - attr, loadedtype, defaulttype)) + assert loadedtype == defaulttype, ( + "Type mismatch for attribute: {} ({!s} != {!s})".format( + attr, loadedtype, defaulttype)) def test_default_types(): @@ -143,18 +141,18 @@ def test_read_ordering(): # read another image first (DATA_FILE2) then the shape is wrong # Read an image img2 = load(DATA_FILE2) - assert_equal(img2.darrays[0].data.shape, (143479, 1)) + assert img2.darrays[0].data.shape == (143479, 1) # Read image for which we know output shape img = load(DATA_FILE1) - assert_equal(img.darrays[0].data.shape, (3, 3)) + assert img.darrays[0].data.shape == (3, 3) def test_load_metadata(): for i, dat in enumerate(datafiles): img = load(dat) img.meta - assert_equal(numDA[i], img.numDA) - assert_equal(img.version, '1.0') + assert numDA[i] == img.numDA + assert img.version == '1.0' def test_metadata_deprecations(): @@ -164,12 +162,12 @@ def test_metadata_deprecations(): # Test deprecation with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) - assert_equal(me, img.get_meta()) + assert me == img.get_meta() with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) img.set_metadata(me) - assert_equal(me, img.meta) + assert me == img.meta def test_load_dataarray1(): @@ -182,14 +180,14 @@ def test_load_dataarray1(): assert_array_almost_equal(img.darrays[0].data, DATA_FILE1_darr1) assert_array_almost_equal(img.darrays[1].data, DATA_FILE1_darr2) me = img.darrays[0].meta.metadata - assert_true('AnatomicalStructurePrimary' in me) - assert_true('AnatomicalStructureSecondary' in me) - assert_equal(me['AnatomicalStructurePrimary'], 'CortexLeft') + assert 'AnatomicalStructurePrimary' in me + assert 'AnatomicalStructureSecondary' in me + me['AnatomicalStructurePrimary'] == 'CortexLeft' assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4)) - assert_equal(xform_codes.niistring[img.darrays[ - 0].coordsys.dataspace], 'NIFTI_XFORM_TALAIRACH') - assert_equal(xform_codes.niistring[img.darrays[ - 0].coordsys.xformspace], 'NIFTI_XFORM_TALAIRACH') + assert xform_codes.niistring[ + img.darrays[0].coordsys.dataspace] == 'NIFTI_XFORM_TALAIRACH' + assert xform_codes.niistring[img.darrays[ + 0].coordsys.xformspace] == 'NIFTI_XFORM_TALAIRACH' def test_load_dataarray2(): @@ -224,7 +222,7 @@ def test_load_dataarray4(): def test_dataarray5(): img5 = load(DATA_FILE5) for da in img5.darrays: - assert_equal(gifti_endian_codes.byteorder[da.endian], 'little') + gifti_endian_codes.byteorder[da.endian] == 'little' assert_array_almost_equal(img5.darrays[0].data, DATA_FILE5_darr1) assert_array_almost_equal(img5.darrays[1].data, DATA_FILE5_darr2) # Round trip tested below @@ -235,24 +233,24 @@ def test_base64_written(): with open(DATA_FILE5, 'rb') as fobj: contents = fobj.read() # Confirm the bad tags are still in the file - assert_true(b'GIFTI_ENCODING_B64BIN' in contents) - assert_true(b'GIFTI_ENDIAN_LITTLE' in contents) + assert b'GIFTI_ENCODING_B64BIN' in contents + assert b'GIFTI_ENDIAN_LITTLE' in contents # The good ones are missing - assert_false(b'Base64Binary' in contents) - assert_false(b'LittleEndian' in contents) + assert b'Base64Binary' not in contents + assert b'LittleEndian' not in contents # Round trip img5 = load(DATA_FILE5) save(img5, 'fixed.gii') with open('fixed.gii', 'rb') as fobj: contents = fobj.read() # The bad codes have gone, replaced by the good ones - assert_false(b'GIFTI_ENCODING_B64BIN' in contents) - assert_false(b'GIFTI_ENDIAN_LITTLE' in contents) - assert_true(b'Base64Binary' in contents) + assert b'GIFTI_ENCODING_B64BIN' not in contents + assert b'GIFTI_ENDIAN_LITTLE' not in contents + assert b'Base64Binary' in contents if sys.byteorder == 'little': - assert_true(b'LittleEndian' in contents) + assert b'LittleEndian' in contents else: - assert_true(b'BigEndian' in contents) + assert b'BigEndian' in contents img5_fixed = load('fixed.gii') darrays = img5_fixed.darrays assert_array_almost_equal(darrays[0].data, DATA_FILE5_darr1) @@ -264,10 +262,17 @@ def test_readwritedata(): with InTemporaryDirectory(): save(img, 'test.gii') img2 = load('test.gii') - assert_equal(img.numDA, img2.numDA) + assert img.numDA == img2.numDA assert_array_almost_equal(img.darrays[0].data, img2.darrays[0].data) +def test_modify_darray(): + for fname in (DATA_FILE1, DATA_FILE2, DATA_FILE5): + img = load(fname) + darray = img.darrays[0] + darray.data[:] = 0 + assert np.array_equiv(darray.data, 0) + def test_write_newmetadata(): img = gi.GiftiImage() @@ -275,32 +280,32 @@ def test_write_newmetadata(): newmeta = gi.GiftiMetaData(attr) img.meta = newmeta myme = img.meta.metadata - assert_true('mykey' in myme) + assert 'mykey' in myme newmeta = gi.GiftiMetaData.from_dict({'mykey1': 'val2'}) img.meta = newmeta myme = img.meta.metadata - assert_true('mykey1' in myme) - assert_false('mykey' in myme) + assert 'mykey1' in myme + assert 'mykey' not in myme def test_load_getbyintent(): img = load(DATA_FILE1) da = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET") - assert_equal(len(da), 1) + assert len(da) == 1 with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=DeprecationWarning) da = img.getArraysFromIntent("NIFTI_INTENT_POINTSET") - assert_equal(len(da), 1) - assert_equal(len(w), 1) - assert_equal(w[0].category, DeprecationWarning) + assert len(da) == 1 + assert len(w) == 1 + w[0].category == DeprecationWarning da = img.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE") - assert_equal(len(da), 1) + assert len(da) == 1 da = img.get_arrays_from_intent("NIFTI_INTENT_CORREL") - assert_equal(len(da), 0) - assert_equal(da, []) + assert len(da) == 0 + assert da == [] def test_load_labeltable(): @@ -311,15 +316,15 @@ def test_load_labeltable(): bimg = load('test.gii') for img in (img6, bimg): assert_array_almost_equal(img.darrays[0].data[:3], DATA_FILE6_darr1) - assert_equal(len(img.labeltable.labels), 36) + assert len(img.labeltable.labels) == 36 labeldict = img.labeltable.get_labels_as_dict() - assert_true(660700 in labeldict) - assert_equal(labeldict[660700], 'entorhinal') - assert_equal(img.labeltable.labels[1].key, 2647065) - assert_equal(img.labeltable.labels[1].red, 0.0980392) - assert_equal(img.labeltable.labels[1].green, 0.392157) - assert_equal(img.labeltable.labels[1].blue, 0.156863) - assert_equal(img.labeltable.labels[1].alpha, 1) + assert 660700 in labeldict + assert labeldict[660700] == 'entorhinal' + assert img.labeltable.labels[1].key == 2647065 + assert img.labeltable.labels[1].red == 0.0980392 + assert img.labeltable.labels[1].green == 0.392157 + assert img.labeltable.labels[1].blue == 0.156863 + assert img.labeltable.labels[1].alpha == 1 def test_labeltable_deprecations(): @@ -329,14 +334,14 @@ def test_labeltable_deprecations(): # Test deprecation with clear_and_catch_warnings() as w: warnings.filterwarnings('always', category=DeprecationWarning) - assert_equal(lt, img.get_labeltable()) - assert_equal(len(w), 1) + assert lt == img.get_labeltable() + assert len(w) == 1 with clear_and_catch_warnings() as w: warnings.filterwarnings('always', category=DeprecationWarning) img.set_labeltable(lt) - assert_equal(len(w), 1) - assert_equal(lt, img.labeltable) + assert len(w) == 1 + assert lt == img.labeltable def test_parse_dataarrays(): @@ -355,8 +360,8 @@ def test_parse_dataarrays(): with clear_and_catch_warnings() as w: warnings.filterwarnings('once', category=UserWarning) load(fn) - assert_equal(len(w), 1) - assert_equal(img.numDA, 0) + assert len(w) == 1 + assert img.numDA == 0 def test_parse_deprecated(): @@ -365,16 +370,16 @@ def test_parse_deprecated(): with clear_and_catch_warnings() as w: warnings.filterwarnings('always', category=DeprecationWarning) op = Outputter() - assert_equal(len(w), 1) + assert len(w) == 1 op.initialize() # smoke test--no error. with clear_and_catch_warnings() as w: warnings.filterwarnings('always', category=DeprecationWarning) - assert_raises(ValueError, parse_gifti_file) - assert_equal(len(w), 1) + pytest.raises(ValueError, parse_gifti_file) + assert len(w) == 1 def test_parse_with_buffersize(): for buff_sz in [None, 1, 2**12]: img2 = load(DATA_FILE2, buffer_size=buff_sz) - assert_equal(img2.darrays[0].data.shape, (143479, 1)) + assert img2.darrays[0].data.shape == (143479, 1) diff --git a/nibabel/info.py b/nibabel/info.py index abe71735cd..bafc3f6adb 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -1,98 +1,26 @@ -""" Define distrubution parameters for nibabel, including package version +""" Define distribution parameters for nibabel, including package version -This file contains defines parameters for nibabel that we use to fill settings -in setup.py, the nibabel top-level docstring, and for building the docs. In -setup.py in particular, we exec this file, so it cannot import nibabel. +The long description parameter is used to fill settings in setup.py, the +nibabel top-level docstring, and in building the docs. +We exec this file in several places, so it cannot import nibabel or use +relative imports. """ -import re -from distutils.version import StrictVersion - -# nibabel version information. An empty _version_extra corresponds to a -# full release. *Any string* in `_version_extra` labels the version as -# pre-release. So, if `_version_extra` is not empty, the version is taken to -# be earlier than the same version where `_version_extra` is empty (see -# `cmp_pkg_version` below). -# -# We usually use `dev` as `_version_extra` to label this as a development -# (pre-release) version. -_version_major = 2 -_version_minor = 4 +# nibabel version information +# This is a fall-back for versioneer when installing from a git archive. +# This should be set to the intended next version + dev to indicate a +# development (pre-release) version. +_version_major = 3 +_version_minor = 1 _version_micro = 0 _version_extra = 'dev' -# _version_extra = '' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" -__version__ = "%s.%s.%s%s" % (_version_major, - _version_minor, - _version_micro, - _version_extra) - - -def _parse_version(version_str): - """ Parse version string `version_str` in our format - """ - match = re.match(r'([0-9.]*\d)(.*)', version_str) - if match is None: - raise ValueError('Invalid version ' + version_str) - return match.groups() - - -def _cmp(a, b): - """ Implementation of ``cmp`` for Python 3 - """ - return (a > b) - (a < b) - - -def cmp_pkg_version(version_str, pkg_version_str=__version__): - """ Compare `version_str` to current package version - - To be valid, a version must have a numerical major version followed by a - dot, followed by a numerical minor version. It may optionally be followed - by a dot and a numerical micro version, and / or by an "extra" string. - *Any* extra string labels the version as pre-release, so `1.2.0somestring` - compares as prior to (pre-release for) `1.2.0`, where `somestring` can be - any string. - - Parameters - ---------- - version_str : str - Version string to compare to current package version - pkg_version_str : str, optional - Version of our package. Optional, set fom ``__version__`` by default. - - Returns - ------- - version_cmp : int - 1 if `version_str` is a later version than `pkg_version_str`, 0 if - same, -1 if earlier. - - Examples - -------- - >>> cmp_pkg_version('1.2.1', '1.2.0') - 1 - >>> cmp_pkg_version('1.2.0dev', '1.2.0') - -1 - """ - version, extra = _parse_version(version_str) - pkg_version, pkg_extra = _parse_version(pkg_version_str) - if version != pkg_version: - return _cmp(StrictVersion(version), StrictVersion(pkg_version)) - return (0 if extra == pkg_extra - else 1 if extra == '' - else -1 if pkg_extra == '' - else _cmp(extra, pkg_extra)) - - -CLASSIFIERS = ["Development Status :: 4 - Beta", - "Environment :: Console", - "Intended Audience :: Science/Research", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - "Programming Language :: Python", - "Topic :: Scientific/Engineering"] - -description = 'Access a multitude of neuroimaging data formats' +VERSION = "%s.%s.%s%s" % (_version_major, + _version_minor, + _version_micro, + _version_extra) + # Note: this long_description is the canonical place to edit this text. # It also appears in README.rst, but it should get there by running @@ -108,14 +36,16 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): This package provides read +/- write access to some common medical and neuroimaging file formats, including: ANALYZE_ (plain, SPM99, SPM2 and later), -GIFTI_, NIfTI1_, NIfTI2_, MINC1_, MINC2_, MGH_ and ECAT_ as well as Philips -PAR/REC. We can read and write FreeSurfer_ geometry, annotation and -morphometry files. There is some very limited support for DICOM_. NiBabel is -the successor of PyNIfTI_. +GIFTI_, NIfTI1_, NIfTI2_, `CIFTI-2`_, MINC1_, MINC2_, `AFNI BRIK/HEAD`_, MGH_ and +ECAT_ as well as Philips PAR/REC. We can read and write FreeSurfer_ geometry, +annotation and morphometry files. There is some very limited support for +DICOM_. NiBabel is the successor of PyNIfTI_. .. _ANALYZE: http://www.grahamwideman.com/gw/brain/analyze/formatdoc.htm +.. _AFNI BRIK/HEAD: https://afni.nimh.nih.gov/pub/dist/src/README.attributes .. _NIfTI1: http://nifti.nimh.nih.gov/nifti-1/ .. _NIfTI2: http://nifti.nimh.nih.gov/nifti-2/ +.. _CIFTI-2: https://www.nitrc.org/projects/cifti/ .. _MINC1: https://en.wikibooks.org/wiki/MINC/Reference/MINC1_File_Format_Reference .. _MINC2: @@ -181,34 +111,3 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): .. _zenodo: https://zenodo.org .. _Digital Object Identifier: https://en.wikipedia.org/wiki/Digital_object_identifier """ - -# versions for dependencies. Check these against: -# doc/source/installation.rst -# requirements.txt -# .travis.yml -NUMPY_MIN_VERSION = '1.7.1' -PYDICOM_MIN_VERSION = '0.9.9' -SIX_MIN_VERSION = '1.3' - -# Main setup parameters -NAME = 'nibabel' -MAINTAINER = "Matthew Brett, Michael Hanke, Eric Larson, Chris Markiewicz" -MAINTAINER_EMAIL = "neuroimaging@python.org" -DESCRIPTION = description -LONG_DESCRIPTION = long_description -URL = "http://nipy.org/nibabel" -DOWNLOAD_URL = "https://github.com/nipy/nibabel" -LICENSE = "MIT license" -CLASSIFIERS = CLASSIFIERS -AUTHOR = "nibabel developers" -AUTHOR_EMAIL = "neuroimaging@python.org" -PLATFORMS = "OS Independent" -MAJOR = _version_major -MINOR = _version_minor -MICRO = _version_micro -ISRELEASE = _version_extra == '' -VERSION = __version__ -PROVIDES = ["nibabel", 'nisext'] -REQUIRES = ["numpy>=%s" % NUMPY_MIN_VERSION, - "six>=%s" % SIX_MIN_VERSION, - 'bz2file; python_version < "3.0"'] diff --git a/nibabel/keywordonly.py b/nibabel/keywordonly.py index 8cb4908c1e..198e70f2c9 100644 --- a/nibabel/keywordonly.py +++ b/nibabel/keywordonly.py @@ -2,6 +2,13 @@ """ from functools import wraps +import warnings + +warnings.warn("We will remove this module from nibabel 5.0. " + "Please use the built-in Python `*` argument to ensure " + "keyword-only parameters (see PEP 3102).", + DeprecationWarning, + stacklevel=2) def kw_only_func(n): diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 8c3041e73c..f8c3e3be0b 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -12,21 +12,20 @@ import os import numpy as np -from .filename_parser import splitext_addext +from .filename_parser import splitext_addext, _stringify_path from .openers import ImageOpener from .filebasedimages import ImageFileError from .imageclasses import all_image_classes from .arrayproxy import is_proxy -from .py3k import FileNotFoundError from .deprecated import deprecate_with_version def load(filename, **kwargs): - ''' Load file given filename, guessing at file type + r''' Load file given filename, guessing at file type Parameters ---------- - filename : string + filename : str or os.PathLike specification of file to load \*\*kwargs : keyword arguments Keyword arguments to format-specific load @@ -36,12 +35,16 @@ def load(filename, **kwargs): img : ``SpatialImage`` Image of guessed type ''' + filename = _stringify_path(filename) + + # Check file exists and is not empty try: stat_result = os.stat(filename) except OSError: raise FileNotFoundError("No such file or no access: '%s'" % filename) if stat_result.st_size <= 0: raise ImageFileError("Empty file: '%s'" % filename) + sniff = None for image_klass in all_image_classes: is_valid, sniff = image_klass.path_maybe_image(filename, sniff) @@ -86,13 +89,14 @@ def save(img, filename): ---------- img : ``SpatialImage`` image to save - filename : str + filename : str or os.PathLike filename (often implying filenames) to which to save `img`. Returns ------- None ''' + filename = _stringify_path(filename) # Save the type as expected try: @@ -156,7 +160,7 @@ def read_img_data(img, prefer='scaled'): """ Read data from image associated with files If you want unscaled data, please use ``img.dataobj.get_unscaled()`` - instead. If you want scaled data, use ``img.get_data()`` (which will cache + instead. If you want scaled data, use ``img.get_fdata()`` (which will cache the loaded array) or ``np.array(img.dataobj)`` (which won't cache the array). If you want to load the data as for a modified header, save the image with the modified header, and reload. @@ -165,7 +169,7 @@ def read_img_data(img, prefer='scaled'): ---------- img : ``SpatialImage`` Image with valid image file in ``img.file_map``. Unlike the - ``img.get_data()`` method, this function returns the data read + ``img.get_fdata()`` method, this function returns the data read from the image file, as specified by the *current* image header and *current* image files. prefer : str, optional diff --git a/nibabel/minc.py b/nibabel/minc.py deleted file mode 100644 index 94e8da57fc..0000000000 --- a/nibabel/minc.py +++ /dev/null @@ -1,10 +0,0 @@ -""" Deprecated MINC1 module """ - -import warnings - -warnings.warn("We will remove this module from nibabel soon; " - "Please use the 'minc1' module instead", - FutureWarning, - stacklevel=2) - -from .minc1 import * # noqa diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 57042f32f0..6dfe7dde67 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -17,7 +17,7 @@ from .spatialimages import SpatialHeader, SpatialImage from .fileslice import canonical_slicers -from .deprecated import FutureWarningMixin +from .deprecated import deprecate_with_version _dt_dict = { ('b', 'unsigned'): np.uint8, @@ -172,7 +172,7 @@ def _normalize(self, data, sliceobj=()): applied to `data` """ ddt = self.get_data_dtype() - if ddt.type in np.sctypes['float']: + if np.issubdtype(ddt.type, np.floating): return data image_max = self._image_max image_min = self._image_min @@ -260,9 +260,25 @@ def ndim(self): def is_proxy(self): return True - def __array__(self): - ''' Read of data from file ''' - return self.minc_file.get_scaled_data() + def __array__(self, dtype=None): + """ Read data from file and apply scaling, casting to ``dtype`` + + If ``dtype`` is unspecified, the dtype is automatically determined. + + Parameters + ---------- + dtype : numpy dtype specifier, optional + A numpy dtype specifier specifying the type of the returned array. + + Returns + ------- + array + Scaled image data with type `dtype`. + """ + arr = self.minc_file.get_scaled_data(sliceobj=()) + if dtype is not None: + arr = arr.astype(dtype, copy=False) + return arr def __getitem__(self, sliceobj): """ Read slice `sliceobj` of data from file """ @@ -310,7 +326,8 @@ class Minc1Image(SpatialImage): ImageArrayProxy = MincImageArrayProxy @classmethod - def from_file_map(klass, file_map): + def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): + # Note that mmap and keep_file_open are included for proper with file_map['image'].get_prepare_fileobj() as fobj: minc_file = Minc1File(netcdf_file(fobj)) affine = minc_file.get_affine() @@ -328,13 +345,13 @@ def from_file_map(klass, file_map): # Backwards compatibility -class MincFile(FutureWarningMixin, Minc1File): - """ Deprecated alternative name for Minc1File - """ - warn_message = 'MincFile is deprecated; please use Minc1File instead' +@deprecate_with_version('MincFile is deprecated; please use Minc1File instead', + since='2.0.0', until='3.0.0', warn_class=FutureWarning) +class MincFile(Minc1File): + pass -class MincImage(FutureWarningMixin, Minc1Image): - """ Deprecated alternative name for Minc1Image - """ - warn_message = 'MincImage is deprecated; please use Minc1Image instead' +@deprecate_with_version('MincImage is deprecated; please use Minc1Image instead', + since='2.0.0', until='3.0.0', warn_class=FutureWarning) +class MincImage(Minc1Image): + pass diff --git a/nibabel/minc2.py b/nibabel/minc2.py index f4ee3eab6b..90b039d8da 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -16,7 +16,7 @@ import nibabel as nib img = nib.load('my_funny.mnc') - data = img.get_data() + data = img.get_fdata() print(data.mean()) print(data.max()) print(data.min()) @@ -27,9 +27,6 @@ """ import numpy as np -from .optpkg import optional_package -h5py, have_h5py, setup_module = optional_package('h5py') - from .minc1 import Minc1File, MincHeader, Minc1Image, MincError @@ -108,7 +105,7 @@ def _get_valid_range(self): def _get_scalar(self, var): """ Get scalar value from HDF5 scalar """ - return var.value + return var[()] def _get_array(self, var): """ Get array from HDF5 array """ @@ -158,7 +155,10 @@ class Minc2Image(Minc1Image): header_class = Minc2Header @classmethod - def from_file_map(klass, file_map): + def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): + # Import of h5py might take awhile for MPI-enabled builds + # So we are importing it here "on demand" + from ._h5py_compat import h5py holder = file_map['image'] if holder.filename is None: raise MincError('MINC2 needs filename for load') diff --git a/nibabel/mriutils.py b/nibabel/mriutils.py index 0f27544fae..b0f3f6a86f 100644 --- a/nibabel/mriutils.py +++ b/nibabel/mriutils.py @@ -9,7 +9,6 @@ """ Utilities for calculations related to MRI """ -from __future__ import division __all__ = ['calculate_dwell_time'] diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py new file mode 100644 index 0000000000..bfdf313b74 --- /dev/null +++ b/nibabel/nicom/ascconv.py @@ -0,0 +1,207 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Parse the "ASCCONV" meta data format found in a variety of Siemens MR files. +""" +import re +import ast +from ..externals import OrderedDict + + +ASCCONV_RE = re.compile( + r'### ASCCONV BEGIN((?:\s*[^=\s]+=[^=\s]+)*) ###\n(.*?)\n### ASCCONV END ###', + flags=re.M | re.S) + + +class AscconvParseError(Exception): + """ Error parsing ascconv file """ + + +class Atom(object): + """ Object to hold operation, object type and object identifier + + An atom represents an element in an expression. For example:: + + a.b[0].c + + has four elements. We call these elements "atoms". + + We represent objects (like ``a``) as dicts for convenience. + + The last element (``.c``) is an ``op = ast.Attribute`` operation where the + object type (`obj_type`) of ``c`` is not constrained (we can't tell from + the operation what type it is). The `obj_id` is the name of the object -- + "c". + + The second to last element ``[0]``, is ``op = ast.Subscript``, with object type + dict (we know from the subsequent operation ``.c`` that this must be an + object, we represent the object by a dict). The `obj_id` is the index 0. + + Parameters + ---------- + op : {'name', 'attr', 'list'} + Assignment type. Assignment to name (root namespace), attribute or + list element. + obj_type : {list, dict, other} + Object type being assigned to. + obj_id : str or int + Key (``obj_type is dict``) or index (``obj_type is list``) + """ + + def __init__(self, op, obj_type, obj_id): + self.op = op + self.obj_type = obj_type + self.obj_id = obj_id + + +class NoValue(object): + """ Signals no value present """ + + +def assign2atoms(assign_ast, default_class=int): + """ Parse single assignment ast from ascconv line into atoms + + Parameters + ---------- + assign_ast : assignment statement ast + ast derived from single line of ascconv file. + default_class : class, optional + Class that will create an object where we cannot yet know the object + type in the assignment. + + Returns + ------- + atoms : list + List of :class:`atoms`. See docstring for :class:`atoms`. Defines + left to right sequence of assignment in `line_ast`. + """ + if not len(assign_ast.targets) == 1: + raise AscconvParseError('Too many targets in assign') + target = assign_ast.targets[0] + atoms = [] + prev_target_type = default_class # Placeholder for any scalar value + while True: + if isinstance(target, ast.Name): + atoms.append(Atom(target, prev_target_type, target.id)) + break + if isinstance(target, ast.Attribute): + atoms.append(Atom(target, prev_target_type, target.attr)) + target = target.value + prev_target_type = OrderedDict + elif isinstance(target, ast.Subscript): + index = target.slice.value.n + atoms.append(Atom(target, prev_target_type, index)) + target = target.value + prev_target_type = list + else: + raise AscconvParseError( + 'Unexpected LHS element {0}'.format(target)) + return reversed(atoms) + + +def _create_obj_in(atom, root): + """ Create object defined in `atom` in dict-like given by `root` + + Return defined object. + """ + name = atom.obj_id + obj = root.get(name, NoValue) + if obj is not NoValue: + return obj + obj = atom.obj_type() + root[name] = obj + return obj + + +def _create_subscript_in(atom, root): + """ Create object defined in `atom` at index ``atom.obj_id`` in list `root` + + Return defined object. + """ + curr_n = len(root) + index = atom.obj_id + if curr_n > index: + return root[index] + obj = atom.obj_type() + root += [None] * (index - curr_n) + [obj] + return obj + + +def obj_from_atoms(atoms, namespace): + """ Return object defined by list `atoms` in dict-like `namespace` + + Parameters + ---------- + atoms : list + List of :class:`atoms` + namespace : dict-like + Namespace in which object will be defined. + + Returns + ------- + obj_root : object + Namespace such that we can set a desired value to the object defined in + `atoms` with ``obj_root[obj_key] = value``. + obj_key : str or int + Index into list or key into dictionary for `obj_root`. + """ + root_obj = namespace + for el in atoms: + prev_root = root_obj + if isinstance(el.op, (ast.Attribute, ast.Name)): + root_obj = _create_obj_in(el, root_obj) + else: + root_obj = _create_subscript_in(el, root_obj) + if not isinstance(root_obj, el.obj_type): + raise AscconvParseError( + 'Unexpected type for {0} in {1}'.format(el.obj_id, prev_root)) + return prev_root, el.obj_id + + +def _get_value(assign): + value = assign.value + if isinstance(value, ast.Num): + return value.n + if isinstance(value, ast.Str): + return value.s + if isinstance(value, ast.UnaryOp) and isinstance(value.op, ast.USub): + return -value.operand.n + raise AscconvParseError('Unexpected RHS of assignment: {0}'.format(value)) + + +def parse_ascconv(ascconv_str, str_delim='"'): + '''Parse the 'ASCCONV' format from `input_str`. + + Parameters + ---------- + ascconv_str : str + The string we are parsing + str_delim : str, optional + String delimiter. Typically '"' or '""' + + Returns + ------- + prot_dict : OrderedDict + Meta data pulled from the ASCCONV section. + attrs : OrderedDict + Any attributes stored in the 'ASCCONV BEGIN' line + + Raises + ------ + AsconvParseError + A line of the ASCCONV section could not be parsed. + ''' + attrs, content = ASCCONV_RE.match(ascconv_str).groups() + attrs = OrderedDict((tuple(x.split('=')) for x in attrs.split())) + # Normalize string start / end markers to something Python understands + content = content.replace(str_delim, '"""') + # Use Python's own parser to parse modified ASCCONV assignments + tree = ast.parse(content) + + prot_dict = OrderedDict() + for assign in tree.body: + atoms = assign2atoms(assign) + obj_to_index, key = obj_from_atoms(atoms, prot_dict) + obj_to_index[key] = _get_value(assign) + + return prot_dict, attrs diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 9847b72d28..1764e2878c 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -98,9 +98,10 @@ def read(csa_str): hdr_type = 1 csa_dict['type'] = hdr_type csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I') - if not 0 < csa_dict['n_tags'] <= 128: + if not 0 < csa_dict['n_tags'] <= MAX_CSA_ITEMS: raise CSAReadError('Number of tags `t` should be ' - '0 < t <= 128') + '0 < t <= %d. Instead found %d tags.' + % (MAX_CSA_ITEMS, csa_dict['n_tags'])) for tag_no in range(csa_dict['n_tags']): name, vm, vr, syngodt, n_items, last3 = \ up_str.unpack('64si4s3i') diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index d18a43af96..ad8d9c6b64 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -1,4 +1,3 @@ -from __future__ import division, print_function, absolute_import from os.path import join as pjoin import glob diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index fc3fbe4905..f37d0323a8 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -11,9 +11,9 @@ processing that needs to raise an error, should be in a method, rather than in a property, or property-like thing. """ -from __future__ import division import operator +import warnings import numpy as np @@ -21,7 +21,8 @@ from .dwiparams import B2q, nearest_pos_semi_def, q2bg from ..openers import ImageOpener from ..onetime import setattr_on_read as one_time -from ..pydicom_compat import tag_for_keyword +from ..pydicom_compat import tag_for_keyword, Sequence +from ..deprecated import deprecate_with_version class WrapperError(Exception): @@ -79,7 +80,13 @@ def wrapper_from_data(dcm_data): return MultiframeWrapper(dcm_data) # Check for Siemens DICOM format types # Only Siemens will have data for the CSA header - csa = csar.get_csa_header(dcm_data) + try: + csa = csar.get_csa_header(dcm_data) + except csar.CSAReadError as e: + warnings.warn('Error while attempting to read CSA header: ' + + str(e.args) + + '\n Ignoring Siemens private (CSA) header info.') + csa = None if csa is None: return Wrapper(dcm_data) if csar.is_mosaic(csa): @@ -94,7 +101,7 @@ class Wrapper(object): Methods: - * get_affine() + * get_affine() (deprecated, use affine property instead) * get_data() * get_pixel_array() * is_same_series(other) @@ -103,6 +110,7 @@ class Wrapper(object): Attributes and things that look like attributes: + * affine : (4, 4) array * dcm_data : object * image_shape : tuple * image_orient_patient : (3,2) array @@ -201,7 +209,7 @@ def voxel_sizes(self): zs = self.get('SpacingBetweenSlices') if zs is None: zs = self.get('SliceThickness') - if zs is None: + if zs is None or zs == '': zs = 1 # Protect from python decimals in pydicom 0.9.7 zs = float(zs) @@ -285,18 +293,19 @@ def get(self, key, default=None): """ Get values from underlying dicom data """ return self.dcm_data.get(key, default) + @deprecate_with_version('get_affine method is deprecated.\n' + 'Please use the ``img.affine`` property ' + 'instead.', + '2.5.1', '4.0') def get_affine(self): - """ Return mapping between voxel and DICOM coordinate system + return self.affine - Parameters - ---------- - None + @property + def affine(self): + """ Mapping between voxel and DICOM coordinate system - Returns - ------- - aff : (4,4) affine - Affine giving transformation between voxels in data array and - mm in the DICOM patient coordinate system. + (4, 4) affine matrix giving transformation between voxels in data array + and mm in the DICOM patient coordinate system. """ # rotation matrix already accounts for the ij transpose in the # DICOM image orientation patient transform. So. column 0 is @@ -502,8 +511,32 @@ def image_shape(self): rows, cols = self.get('Rows'), self.get('Columns') if None in (rows, cols): raise WrapperError("Rows and/or Columns are empty.") + # Check number of frames + first_frame = self.frames[0] n_frames = self.get('NumberOfFrames') + # some Philips may have derived images appended + has_derived = False + if hasattr(first_frame, 'get') and first_frame.get([0x18, 0x9117]): + # DWI image may include derived isotropic, ADC or trace volume + try: + self.frames = Sequence( + frame for frame in self.frames if + frame.MRDiffusionSequence[0].DiffusionDirectionality + != 'ISOTROPIC' + ) + except IndexError: + # Sequence tag is found but missing items! + raise WrapperError("Diffusion file missing information") + except AttributeError: + # DiffusionDirectionality tag is not required + pass + else: + if n_frames != len(self.frames): + warnings.warn("Derived images found and removed") + n_frames = len(self.frames) + has_derived = True + assert len(self.frames) == n_frames frame_indices = np.array( [frame.FrameContentSequence[0].DimensionIndexValues @@ -522,6 +555,15 @@ def image_shape(self): if stackid_tag in dim_seq: stackid_dim_idx = dim_seq.index(stackid_tag) frame_indices = np.delete(frame_indices, stackid_dim_idx, axis=1) + dim_seq.pop(stackid_dim_idx) + if has_derived: + # derived volume is included + derived_tag = tag_for_keyword("DiffusionBValue") + if derived_tag not in dim_seq: + raise WrapperError("Missing information, cannot remove indices " + "with confidence.") + derived_dim_idx = dim_seq.index(derived_tag) + frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) # account for the 2 additional dimensions (row and column) not included # in the indices n_dim = frame_indices.shape[1] + 2 diff --git a/nibabel/nicom/dwiparams.py b/nibabel/nicom/dwiparams.py index 1fda89b0da..e9d05c0d57 100644 --- a/nibabel/nicom/dwiparams.py +++ b/nibabel/nicom/dwiparams.py @@ -21,7 +21,6 @@ ''' import numpy as np import numpy.linalg as npl -from .. import setup_test as setup_module # noqa def B2q(B, tol=None): diff --git a/nibabel/nicom/tests/__init__.py b/nibabel/nicom/tests/__init__.py index c7c3753010..127ad5a6e0 100644 --- a/nibabel/nicom/tests/__init__.py +++ b/nibabel/nicom/tests/__init__.py @@ -1 +1,4 @@ -# init to allow relative imports in tests +from ...pydicom_compat import have_dicom +import unittest + +dicom_test = unittest.skipUnless(have_dicom, "Could not import dicom or pydicom") diff --git a/nibabel/nicom/tests/data/ascconv_sample.txt b/nibabel/nicom/tests/data/ascconv_sample.txt new file mode 100644 index 0000000000..1fd78f788f --- /dev/null +++ b/nibabel/nicom/tests/data/ascconv_sample.txt @@ -0,0 +1,919 @@ +### ASCCONV BEGIN ### +ulVersion = 0x14b44b6 +tSequenceFileName = ""%SiemensSeq%\ep2d_diff"" +tProtocolName = ""CBU+AF8-DTI+AF8-64D+AF8-1A"" +tReferenceImage0 = ""1.3.12.2.1107.5.2.32.35119.2010011420070434054586384"" +tReferenceImage1 = ""1.3.12.2.1107.5.2.32.35119.2010011420070721803086388"" +tReferenceImage2 = ""1.3.12.2.1107.5.2.32.35119.201001142007109937386392"" +ucScanRegionPosValid = 0x1 +ucTablePositioningMode = 0x1 +sProtConsistencyInfo.tBaselineString = ""N4_VB17A_LATEST_20090307"" +sProtConsistencyInfo.tSystemType = ""092"" +sProtConsistencyInfo.flNominalB0 = 2.89362 +sProtConsistencyInfo.flGMax = 26 +sProtConsistencyInfo.flRiseTime = 5.88 +sProtConsistencyInfo.lMaximumNofRxReceiverChannels = 18 +sGRADSPEC.sEddyCompensationX.aflAmplitude[0] = 0.00141208 +sGRADSPEC.sEddyCompensationX.aflAmplitude[1] = 0.000569241 +sGRADSPEC.sEddyCompensationX.aflAmplitude[2] = -0.000514958 +sGRADSPEC.sEddyCompensationX.aflAmplitude[3] = 0.000499075 +sGRADSPEC.sEddyCompensationX.aflAmplitude[4] = 0.000821246 +sGRADSPEC.sEddyCompensationX.aflTimeConstant[0] = 1.81531 +sGRADSPEC.sEddyCompensationX.aflTimeConstant[1] = 0.995025 +sGRADSPEC.sEddyCompensationX.aflTimeConstant[2] = 0.0492598 +sGRADSPEC.sEddyCompensationX.aflTimeConstant[3] = 0.0194645 +sGRADSPEC.sEddyCompensationX.aflTimeConstant[4] = 0.000499659 +sGRADSPEC.sEddyCompensationY.aflAmplitude[0] = 0.00112797 +sGRADSPEC.sEddyCompensationY.aflAmplitude[1] = -0.000565372 +sGRADSPEC.sEddyCompensationY.aflAmplitude[2] = -0.00182913 +sGRADSPEC.sEddyCompensationY.aflAmplitude[3] = -2.65859e-005 +sGRADSPEC.sEddyCompensationY.aflAmplitude[4] = 0.000601077 +sGRADSPEC.sEddyCompensationY.aflTimeConstant[0] = 1.09142 +sGRADSPEC.sEddyCompensationY.aflTimeConstant[1] = 0.661632 +sGRADSPEC.sEddyCompensationY.aflTimeConstant[2] = 0.446457 +sGRADSPEC.sEddyCompensationY.aflTimeConstant[3] = 0.0118729 +sGRADSPEC.sEddyCompensationY.aflTimeConstant[4] = 0.00134346 +sGRADSPEC.sEddyCompensationZ.aflAmplitude[0] = 0.00221038 +sGRADSPEC.sEddyCompensationZ.aflAmplitude[1] = 0.00592667 +sGRADSPEC.sEddyCompensationZ.aflAmplitude[2] = 0.000254437 +sGRADSPEC.sEddyCompensationZ.aflAmplitude[3] = -8.35135e-005 +sGRADSPEC.sEddyCompensationZ.aflAmplitude[4] = -4.25678e-005 +sGRADSPEC.sEddyCompensationZ.aflTimeConstant[0] = 4.32108 +sGRADSPEC.sEddyCompensationZ.aflTimeConstant[1] = 0.923398 +sGRADSPEC.sEddyCompensationZ.aflTimeConstant[2] = 0.0379209 +sGRADSPEC.sEddyCompensationZ.aflTimeConstant[3] = 0.0104227 +sGRADSPEC.sEddyCompensationZ.aflTimeConstant[4] = 0.00199944 +sGRADSPEC.bEddyCompensationValid = 1 +sGRADSPEC.sB0CompensationX.aflAmplitude[0] = -0.0494045 +sGRADSPEC.sB0CompensationX.aflAmplitude[1] = 0.0730311 +sGRADSPEC.sB0CompensationX.aflAmplitude[2] = -0.00670347 +sGRADSPEC.sB0CompensationX.aflTimeConstant[0] = 0.618983 +sGRADSPEC.sB0CompensationX.aflTimeConstant[1] = 0.341914 +sGRADSPEC.sB0CompensationX.aflTimeConstant[2] = 0.002 +sGRADSPEC.sB0CompensationY.aflAmplitude[0] = 0.136281 +sGRADSPEC.sB0CompensationY.aflAmplitude[1] = 0.0376382 +sGRADSPEC.sB0CompensationY.aflAmplitude[2] = -0.0500779 +sGRADSPEC.sB0CompensationY.aflTimeConstant[0] = 0.71999 +sGRADSPEC.sB0CompensationY.aflTimeConstant[1] = 0.00341892 +sGRADSPEC.sB0CompensationY.aflTimeConstant[2] = 0.002 +sGRADSPEC.sB0CompensationZ.aflAmplitude[0] = 0.0776537 +sGRADSPEC.sB0CompensationZ.aflAmplitude[1] = 0.0168151 +sGRADSPEC.sB0CompensationZ.aflAmplitude[2] = -0.0550622 +sGRADSPEC.sB0CompensationZ.aflTimeConstant[0] = 0.669998 +sGRADSPEC.sB0CompensationZ.aflTimeConstant[1] = 0.0213343 +sGRADSPEC.sB0CompensationZ.aflTimeConstant[2] = 0.00186002 +sGRADSPEC.bB0CompensationValid = 1 +sGRADSPEC.sCrossTermCompensationXY.aflAmplitude[0] = -0.00049613 +sGRADSPEC.sCrossTermCompensationXY.aflTimeConstant[0] = 0.562233 +sGRADSPEC.sCrossTermCompensationXZ.aflAmplitude[0] = -0.000499641 +sGRADSPEC.sCrossTermCompensationXZ.aflTimeConstant[0] = 0.693605 +sGRADSPEC.sCrossTermCompensationYX.aflAmplitude[0] = 5.35458e-005 +sGRADSPEC.sCrossTermCompensationYX.aflTimeConstant[0] = 0.598216 +sGRADSPEC.sCrossTermCompensationYZ.aflAmplitude[0] = 0.0004678 +sGRADSPEC.sCrossTermCompensationYZ.aflTimeConstant[0] = 0.705977 +sGRADSPEC.sCrossTermCompensationZX.aflAmplitude[0] = -0.000529382 +sGRADSPEC.sCrossTermCompensationZX.aflTimeConstant[0] = 0.551175 +sGRADSPEC.sCrossTermCompensationZY.aflAmplitude[0] = 8.74925e-005 +sGRADSPEC.sCrossTermCompensationZY.aflTimeConstant[0] = 0.890761 +sGRADSPEC.bCrossTermCompensationValid = 1 +sGRADSPEC.lOffsetX = -7806 +sGRADSPEC.lOffsetY = -8833 +sGRADSPEC.lOffsetZ = -2097 +sGRADSPEC.bOffsetValid = 1 +sGRADSPEC.lDelayX = 14 +sGRADSPEC.lDelayY = 14 +sGRADSPEC.lDelayZ = 10 +sGRADSPEC.bDelayValid = 1 +sGRADSPEC.flSensitivityX = 7.95149e-005 +sGRADSPEC.flSensitivityY = 7.82833e-005 +sGRADSPEC.flSensitivityZ = 9.09015e-005 +sGRADSPEC.bSensitivityValid = 1 +sGRADSPEC.flGSWDMinRiseTime = 9.88 +sGRADSPEC.alShimCurrent[0] = 867 +sGRADSPEC.alShimCurrent[1] = 80 +sGRADSPEC.alShimCurrent[2] = -61 +sGRADSPEC.alShimCurrent[3] = -4 +sGRADSPEC.alShimCurrent[4] = -16 +sGRADSPEC.bShimCurrentValid = 1 +sGRADSPEC.ucMode = 0x11 +sTXSPEC.asNucleusInfo[0].tNucleus = ""1H"" +sTXSPEC.asNucleusInfo[0].lFrequency = 123251815 +sTXSPEC.asNucleusInfo[0].bFrequencyValid = 1 +sTXSPEC.asNucleusInfo[0].flReferenceAmplitude = 384.855 +sTXSPEC.asNucleusInfo[0].bReferenceAmplitudeValid = 1 +sTXSPEC.asNucleusInfo[0].flAmplitudeCorrection = 1 +sTXSPEC.asNucleusInfo[0].bAmplitudeCorrectionValid = 1 +sTXSPEC.asNucleusInfo[0].bRFPAIndexValid = 1 +sTXSPEC.asNucleusInfo[1].bFrequencyValid = 1 +sTXSPEC.asNucleusInfo[1].bReferenceAmplitudeValid = 1 +sTXSPEC.asNucleusInfo[1].flAmplitudeCorrection = 1 +sTXSPEC.asNucleusInfo[1].bAmplitudeCorrectionValid = 1 +sTXSPEC.asNucleusInfo[1].lRFPAIndex = -1 +sTXSPEC.asNucleusInfo[1].bRFPAIndexValid = 1 +sTXSPEC.aRFPULSE[0].tName = ""ExtExciteRF"" +sTXSPEC.aRFPULSE[0].bAmplitudeValid = 0x1 +sTXSPEC.aRFPULSE[0].flAmplitude = 357.891 +sTXSPEC.aRFPULSE[1].tName = ""CSatCSatNS"" +sTXSPEC.aRFPULSE[1].bAmplitudeValid = 0x1 +sTXSPEC.aRFPULSE[1].flAmplitude = 94.871 +sTXSPEC.aRFPULSE[2].tName = ""SLoopFCSatNS"" +sTXSPEC.aRFPULSE[2].bAmplitudeValid = 0x1 +sTXSPEC.aRFPULSE[2].flAmplitude = 94.871 +sTXSPEC.lNoOfTraPulses = 3 +sTXSPEC.lBCExcitationMode = 1 +sTXSPEC.lBCSeqExcitationMode = 4 +sTXSPEC.flKDynMagnitudeMin = 0.5 +sTXSPEC.flKDynMagnitudeMax = 1.5 +sTXSPEC.flKDynMagnitudeClipLow = 1 +sTXSPEC.flKDynMagnitudeClipHigh = 1 +sTXSPEC.flKDynPhaseMax = 0.698132 +sTXSPEC.flKDynPhaseClip = 0.174533 +sTXSPEC.bKDynValid = 1 +sTXSPEC.ucRFPulseType = 0x2 +sTXSPEC.ucExcitMode = 0x1 +sTXSPEC.ucSimultaneousExcitation = 0x1 +sTXSPEC.ucBCExcitationModeValid = 0x1 +sRXSPEC.lGain = 1 +sRXSPEC.bGainValid = 1 +sRXSPEC.alDwellTime[0] = 2800 +sAdjData.uiAdjFreMode = 0x1 +sAdjData.uiAdjShimMode = 0x2 +sAdjData.uiAdjWatSupMode = 0x1 +sAdjData.uiAdjRFMapMode = 0x1 +sAdjData.uiAdjMDSMode = 0x1 +sAdjData.uiAdjTableTolerance = 0x1 +sAdjData.uiAdjProtID = 0x56 +sAdjData.uiAdjFreProtRelated = 0x1 +sAdjData.sAdjVolume.sPosition.dCor = -19.66101724 +sAdjData.sAdjVolume.sPosition.dTra = -8.81356001 +sAdjData.sAdjVolume.sNormal.dCor = 0.005235963828 +sAdjData.sAdjVolume.sNormal.dTra = 0.9999862922 +sAdjData.sAdjVolume.dThickness = 144 +sAdjData.sAdjVolume.dPhaseFOV = 230 +sAdjData.sAdjVolume.dReadoutFOV = 230 +ucEnableNoiseAdjust = 0x1 +alTR[0] = 6600000 +alTI[0] = 2500000 +lContrasts = 1 +alTE[0] = 93000 +acFlowComp[0] = 1 +lCombinedEchoes = 1 +sSliceArray.asSlice[0].sPosition.dCor = -20.03015269 +sSliceArray.asSlice[0].sPosition.dTra = -79.31259361 +sSliceArray.asSlice[0].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[0].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[0].dThickness = 2.5 +sSliceArray.asSlice[0].dPhaseFOV = 230 +sSliceArray.asSlice[0].dReadoutFOV = 230 +sSliceArray.asSlice[1].sPosition.dCor = -20.0144448 +sSliceArray.asSlice[1].sPosition.dTra = -76.31263473 +sSliceArray.asSlice[1].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[1].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[1].dThickness = 2.5 +sSliceArray.asSlice[1].dPhaseFOV = 230 +sSliceArray.asSlice[1].dReadoutFOV = 230 +sSliceArray.asSlice[2].sPosition.dCor = -19.99873691 +sSliceArray.asSlice[2].sPosition.dTra = -73.31267586 +sSliceArray.asSlice[2].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[2].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[2].dThickness = 2.5 +sSliceArray.asSlice[2].dPhaseFOV = 230 +sSliceArray.asSlice[2].dReadoutFOV = 230 +sSliceArray.asSlice[3].sPosition.dCor = -19.98302902 +sSliceArray.asSlice[3].sPosition.dTra = -70.31271698 +sSliceArray.asSlice[3].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[3].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[3].dThickness = 2.5 +sSliceArray.asSlice[3].dPhaseFOV = 230 +sSliceArray.asSlice[3].dReadoutFOV = 230 +sSliceArray.asSlice[4].sPosition.dCor = -19.96732113 +sSliceArray.asSlice[4].sPosition.dTra = -67.3127581 +sSliceArray.asSlice[4].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[4].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[4].dThickness = 2.5 +sSliceArray.asSlice[4].dPhaseFOV = 230 +sSliceArray.asSlice[4].dReadoutFOV = 230 +sSliceArray.asSlice[5].sPosition.dCor = -19.95161324 +sSliceArray.asSlice[5].sPosition.dTra = -64.31279923 +sSliceArray.asSlice[5].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[5].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[5].dThickness = 2.5 +sSliceArray.asSlice[5].dPhaseFOV = 230 +sSliceArray.asSlice[5].dReadoutFOV = 230 +sSliceArray.asSlice[6].sPosition.dCor = -19.93590535 +sSliceArray.asSlice[6].sPosition.dTra = -61.31284035 +sSliceArray.asSlice[6].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[6].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[6].dThickness = 2.5 +sSliceArray.asSlice[6].dPhaseFOV = 230 +sSliceArray.asSlice[6].dReadoutFOV = 230 +sSliceArray.asSlice[7].sPosition.dCor = -19.92019745 +sSliceArray.asSlice[7].sPosition.dTra = -58.31288147 +sSliceArray.asSlice[7].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[7].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[7].dThickness = 2.5 +sSliceArray.asSlice[7].dPhaseFOV = 230 +sSliceArray.asSlice[7].dReadoutFOV = 230 +sSliceArray.asSlice[8].sPosition.dCor = -19.90448956 +sSliceArray.asSlice[8].sPosition.dTra = -55.3129226 +sSliceArray.asSlice[8].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[8].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[8].dThickness = 2.5 +sSliceArray.asSlice[8].dPhaseFOV = 230 +sSliceArray.asSlice[8].dReadoutFOV = 230 +sSliceArray.asSlice[9].sPosition.dCor = -19.88878167 +sSliceArray.asSlice[9].sPosition.dTra = -52.31296372 +sSliceArray.asSlice[9].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[9].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[9].dThickness = 2.5 +sSliceArray.asSlice[9].dPhaseFOV = 230 +sSliceArray.asSlice[9].dReadoutFOV = 230 +sSliceArray.asSlice[10].sPosition.dCor = -19.87307378 +sSliceArray.asSlice[10].sPosition.dTra = -49.31300484 +sSliceArray.asSlice[10].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[10].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[10].dThickness = 2.5 +sSliceArray.asSlice[10].dPhaseFOV = 230 +sSliceArray.asSlice[10].dReadoutFOV = 230 +sSliceArray.asSlice[11].sPosition.dCor = -19.85736589 +sSliceArray.asSlice[11].sPosition.dTra = -46.31304597 +sSliceArray.asSlice[11].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[11].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[11].dThickness = 2.5 +sSliceArray.asSlice[11].dPhaseFOV = 230 +sSliceArray.asSlice[11].dReadoutFOV = 230 +sSliceArray.asSlice[12].sPosition.dCor = -19.841658 +sSliceArray.asSlice[12].sPosition.dTra = -43.31308709 +sSliceArray.asSlice[12].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[12].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[12].dThickness = 2.5 +sSliceArray.asSlice[12].dPhaseFOV = 230 +sSliceArray.asSlice[12].dReadoutFOV = 230 +sSliceArray.asSlice[13].sPosition.dCor = -19.8259501 +sSliceArray.asSlice[13].sPosition.dTra = -40.31312821 +sSliceArray.asSlice[13].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[13].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[13].dThickness = 2.5 +sSliceArray.asSlice[13].dPhaseFOV = 230 +sSliceArray.asSlice[13].dReadoutFOV = 230 +sSliceArray.asSlice[14].sPosition.dCor = -19.81024221 +sSliceArray.asSlice[14].sPosition.dTra = -37.31316934 +sSliceArray.asSlice[14].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[14].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[14].dThickness = 2.5 +sSliceArray.asSlice[14].dPhaseFOV = 230 +sSliceArray.asSlice[14].dReadoutFOV = 230 +sSliceArray.asSlice[15].sPosition.dCor = -19.79453432 +sSliceArray.asSlice[15].sPosition.dTra = -34.31321046 +sSliceArray.asSlice[15].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[15].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[15].dThickness = 2.5 +sSliceArray.asSlice[15].dPhaseFOV = 230 +sSliceArray.asSlice[15].dReadoutFOV = 230 +sSliceArray.asSlice[16].sPosition.dCor = -19.77882643 +sSliceArray.asSlice[16].sPosition.dTra = -31.31325158 +sSliceArray.asSlice[16].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[16].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[16].dThickness = 2.5 +sSliceArray.asSlice[16].dPhaseFOV = 230 +sSliceArray.asSlice[16].dReadoutFOV = 230 +sSliceArray.asSlice[17].sPosition.dCor = -19.76311854 +sSliceArray.asSlice[17].sPosition.dTra = -28.31329271 +sSliceArray.asSlice[17].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[17].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[17].dThickness = 2.5 +sSliceArray.asSlice[17].dPhaseFOV = 230 +sSliceArray.asSlice[17].dReadoutFOV = 230 +sSliceArray.asSlice[18].sPosition.dCor = -19.74741065 +sSliceArray.asSlice[18].sPosition.dTra = -25.31333383 +sSliceArray.asSlice[18].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[18].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[18].dThickness = 2.5 +sSliceArray.asSlice[18].dPhaseFOV = 230 +sSliceArray.asSlice[18].dReadoutFOV = 230 +sSliceArray.asSlice[19].sPosition.dCor = -19.73170276 +sSliceArray.asSlice[19].sPosition.dTra = -22.31337495 +sSliceArray.asSlice[19].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[19].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[19].dThickness = 2.5 +sSliceArray.asSlice[19].dPhaseFOV = 230 +sSliceArray.asSlice[19].dReadoutFOV = 230 +sSliceArray.asSlice[20].sPosition.dCor = -19.71599486 +sSliceArray.asSlice[20].sPosition.dTra = -19.31341608 +sSliceArray.asSlice[20].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[20].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[20].dThickness = 2.5 +sSliceArray.asSlice[20].dPhaseFOV = 230 +sSliceArray.asSlice[20].dReadoutFOV = 230 +sSliceArray.asSlice[21].sPosition.dCor = -19.70028697 +sSliceArray.asSlice[21].sPosition.dTra = -16.3134572 +sSliceArray.asSlice[21].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[21].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[21].dThickness = 2.5 +sSliceArray.asSlice[21].dPhaseFOV = 230 +sSliceArray.asSlice[21].dReadoutFOV = 230 +sSliceArray.asSlice[22].sPosition.dCor = -19.68457908 +sSliceArray.asSlice[22].sPosition.dTra = -13.31349832 +sSliceArray.asSlice[22].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[22].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[22].dThickness = 2.5 +sSliceArray.asSlice[22].dPhaseFOV = 230 +sSliceArray.asSlice[22].dReadoutFOV = 230 +sSliceArray.asSlice[23].sPosition.dCor = -19.66887119 +sSliceArray.asSlice[23].sPosition.dTra = -10.31353945 +sSliceArray.asSlice[23].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[23].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[23].dThickness = 2.5 +sSliceArray.asSlice[23].dPhaseFOV = 230 +sSliceArray.asSlice[23].dReadoutFOV = 230 +sSliceArray.asSlice[24].sPosition.dCor = -19.6531633 +sSliceArray.asSlice[24].sPosition.dTra = -7.313580571 +sSliceArray.asSlice[24].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[24].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[24].dThickness = 2.5 +sSliceArray.asSlice[24].dPhaseFOV = 230 +sSliceArray.asSlice[24].dReadoutFOV = 230 +sSliceArray.asSlice[25].sPosition.dCor = -19.63745541 +sSliceArray.asSlice[25].sPosition.dTra = -4.313621695 +sSliceArray.asSlice[25].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[25].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[25].dThickness = 2.5 +sSliceArray.asSlice[25].dPhaseFOV = 230 +sSliceArray.asSlice[25].dReadoutFOV = 230 +sSliceArray.asSlice[26].sPosition.dCor = -19.62174752 +sSliceArray.asSlice[26].sPosition.dTra = -1.313662818 +sSliceArray.asSlice[26].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[26].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[26].dThickness = 2.5 +sSliceArray.asSlice[26].dPhaseFOV = 230 +sSliceArray.asSlice[26].dReadoutFOV = 230 +sSliceArray.asSlice[27].sPosition.dCor = -19.60603962 +sSliceArray.asSlice[27].sPosition.dTra = 1.686296059 +sSliceArray.asSlice[27].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[27].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[27].dThickness = 2.5 +sSliceArray.asSlice[27].dPhaseFOV = 230 +sSliceArray.asSlice[27].dReadoutFOV = 230 +sSliceArray.asSlice[28].sPosition.dCor = -19.59033173 +sSliceArray.asSlice[28].sPosition.dTra = 4.686254935 +sSliceArray.asSlice[28].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[28].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[28].dThickness = 2.5 +sSliceArray.asSlice[28].dPhaseFOV = 230 +sSliceArray.asSlice[28].dReadoutFOV = 230 +sSliceArray.asSlice[29].sPosition.dCor = -19.57462384 +sSliceArray.asSlice[29].sPosition.dTra = 7.686213812 +sSliceArray.asSlice[29].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[29].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[29].dThickness = 2.5 +sSliceArray.asSlice[29].dPhaseFOV = 230 +sSliceArray.asSlice[29].dReadoutFOV = 230 +sSliceArray.asSlice[30].sPosition.dCor = -19.55891595 +sSliceArray.asSlice[30].sPosition.dTra = 10.68617269 +sSliceArray.asSlice[30].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[30].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[30].dThickness = 2.5 +sSliceArray.asSlice[30].dPhaseFOV = 230 +sSliceArray.asSlice[30].dReadoutFOV = 230 +sSliceArray.asSlice[31].sPosition.dCor = -19.54320806 +sSliceArray.asSlice[31].sPosition.dTra = 13.68613156 +sSliceArray.asSlice[31].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[31].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[31].dThickness = 2.5 +sSliceArray.asSlice[31].dPhaseFOV = 230 +sSliceArray.asSlice[31].dReadoutFOV = 230 +sSliceArray.asSlice[32].sPosition.dCor = -19.52750017 +sSliceArray.asSlice[32].sPosition.dTra = 16.68609044 +sSliceArray.asSlice[32].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[32].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[32].dThickness = 2.5 +sSliceArray.asSlice[32].dPhaseFOV = 230 +sSliceArray.asSlice[32].dReadoutFOV = 230 +sSliceArray.asSlice[33].sPosition.dCor = -19.51179228 +sSliceArray.asSlice[33].sPosition.dTra = 19.68604932 +sSliceArray.asSlice[33].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[33].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[33].dThickness = 2.5 +sSliceArray.asSlice[33].dPhaseFOV = 230 +sSliceArray.asSlice[33].dReadoutFOV = 230 +sSliceArray.asSlice[34].sPosition.dCor = -19.49608438 +sSliceArray.asSlice[34].sPosition.dTra = 22.68600819 +sSliceArray.asSlice[34].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[34].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[34].dThickness = 2.5 +sSliceArray.asSlice[34].dPhaseFOV = 230 +sSliceArray.asSlice[34].dReadoutFOV = 230 +sSliceArray.asSlice[35].sPosition.dCor = -19.48037649 +sSliceArray.asSlice[35].sPosition.dTra = 25.68596707 +sSliceArray.asSlice[35].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[35].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[35].dThickness = 2.5 +sSliceArray.asSlice[35].dPhaseFOV = 230 +sSliceArray.asSlice[35].dReadoutFOV = 230 +sSliceArray.asSlice[36].sPosition.dCor = -19.4646686 +sSliceArray.asSlice[36].sPosition.dTra = 28.68592595 +sSliceArray.asSlice[36].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[36].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[36].dThickness = 2.5 +sSliceArray.asSlice[36].dPhaseFOV = 230 +sSliceArray.asSlice[36].dReadoutFOV = 230 +sSliceArray.asSlice[37].sPosition.dCor = -19.44896071 +sSliceArray.asSlice[37].sPosition.dTra = 31.68588482 +sSliceArray.asSlice[37].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[37].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[37].dThickness = 2.5 +sSliceArray.asSlice[37].dPhaseFOV = 230 +sSliceArray.asSlice[37].dReadoutFOV = 230 +sSliceArray.asSlice[38].sPosition.dCor = -19.43325282 +sSliceArray.asSlice[38].sPosition.dTra = 34.6858437 +sSliceArray.asSlice[38].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[38].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[38].dThickness = 2.5 +sSliceArray.asSlice[38].dPhaseFOV = 230 +sSliceArray.asSlice[38].dReadoutFOV = 230 +sSliceArray.asSlice[39].sPosition.dCor = -19.41754493 +sSliceArray.asSlice[39].sPosition.dTra = 37.68580258 +sSliceArray.asSlice[39].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[39].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[39].dThickness = 2.5 +sSliceArray.asSlice[39].dPhaseFOV = 230 +sSliceArray.asSlice[39].dReadoutFOV = 230 +sSliceArray.asSlice[40].sPosition.dCor = -19.40183703 +sSliceArray.asSlice[40].sPosition.dTra = 40.68576145 +sSliceArray.asSlice[40].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[40].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[40].dThickness = 2.5 +sSliceArray.asSlice[40].dPhaseFOV = 230 +sSliceArray.asSlice[40].dReadoutFOV = 230 +sSliceArray.asSlice[41].sPosition.dCor = -19.38612914 +sSliceArray.asSlice[41].sPosition.dTra = 43.68572033 +sSliceArray.asSlice[41].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[41].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[41].dThickness = 2.5 +sSliceArray.asSlice[41].dPhaseFOV = 230 +sSliceArray.asSlice[41].dReadoutFOV = 230 +sSliceArray.asSlice[42].sPosition.dCor = -19.37042125 +sSliceArray.asSlice[42].sPosition.dTra = 46.68567921 +sSliceArray.asSlice[42].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[42].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[42].dThickness = 2.5 +sSliceArray.asSlice[42].dPhaseFOV = 230 +sSliceArray.asSlice[42].dReadoutFOV = 230 +sSliceArray.asSlice[43].sPosition.dCor = -19.35471336 +sSliceArray.asSlice[43].sPosition.dTra = 49.68563808 +sSliceArray.asSlice[43].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[43].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[43].dThickness = 2.5 +sSliceArray.asSlice[43].dPhaseFOV = 230 +sSliceArray.asSlice[43].dReadoutFOV = 230 +sSliceArray.asSlice[44].sPosition.dCor = -19.33900547 +sSliceArray.asSlice[44].sPosition.dTra = 52.68559696 +sSliceArray.asSlice[44].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[44].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[44].dThickness = 2.5 +sSliceArray.asSlice[44].dPhaseFOV = 230 +sSliceArray.asSlice[44].dReadoutFOV = 230 +sSliceArray.asSlice[45].sPosition.dCor = -19.32329758 +sSliceArray.asSlice[45].sPosition.dTra = 55.68555584 +sSliceArray.asSlice[45].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[45].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[45].dThickness = 2.5 +sSliceArray.asSlice[45].dPhaseFOV = 230 +sSliceArray.asSlice[45].dReadoutFOV = 230 +sSliceArray.asSlice[46].sPosition.dCor = -19.30758969 +sSliceArray.asSlice[46].sPosition.dTra = 58.68551471 +sSliceArray.asSlice[46].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[46].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[46].dThickness = 2.5 +sSliceArray.asSlice[46].dPhaseFOV = 230 +sSliceArray.asSlice[46].dReadoutFOV = 230 +sSliceArray.asSlice[47].sPosition.dCor = -19.29188179 +sSliceArray.asSlice[47].sPosition.dTra = 61.68547359 +sSliceArray.asSlice[47].sNormal.dCor = 0.005235963828 +sSliceArray.asSlice[47].sNormal.dTra = 0.9999862922 +sSliceArray.asSlice[47].dThickness = 2.5 +sSliceArray.asSlice[47].dPhaseFOV = 230 +sSliceArray.asSlice[47].dReadoutFOV = 230 +sSliceArray.anAsc[1] = 1 +sSliceArray.anAsc[2] = 2 +sSliceArray.anAsc[3] = 3 +sSliceArray.anAsc[4] = 4 +sSliceArray.anAsc[5] = 5 +sSliceArray.anAsc[6] = 6 +sSliceArray.anAsc[7] = 7 +sSliceArray.anAsc[8] = 8 +sSliceArray.anAsc[9] = 9 +sSliceArray.anAsc[10] = 10 +sSliceArray.anAsc[11] = 11 +sSliceArray.anAsc[12] = 12 +sSliceArray.anAsc[13] = 13 +sSliceArray.anAsc[14] = 14 +sSliceArray.anAsc[15] = 15 +sSliceArray.anAsc[16] = 16 +sSliceArray.anAsc[17] = 17 +sSliceArray.anAsc[18] = 18 +sSliceArray.anAsc[19] = 19 +sSliceArray.anAsc[20] = 20 +sSliceArray.anAsc[21] = 21 +sSliceArray.anAsc[22] = 22 +sSliceArray.anAsc[23] = 23 +sSliceArray.anAsc[24] = 24 +sSliceArray.anAsc[25] = 25 +sSliceArray.anAsc[26] = 26 +sSliceArray.anAsc[27] = 27 +sSliceArray.anAsc[28] = 28 +sSliceArray.anAsc[29] = 29 +sSliceArray.anAsc[30] = 30 +sSliceArray.anAsc[31] = 31 +sSliceArray.anAsc[32] = 32 +sSliceArray.anAsc[33] = 33 +sSliceArray.anAsc[34] = 34 +sSliceArray.anAsc[35] = 35 +sSliceArray.anAsc[36] = 36 +sSliceArray.anAsc[37] = 37 +sSliceArray.anAsc[38] = 38 +sSliceArray.anAsc[39] = 39 +sSliceArray.anAsc[40] = 40 +sSliceArray.anAsc[41] = 41 +sSliceArray.anAsc[42] = 42 +sSliceArray.anAsc[43] = 43 +sSliceArray.anAsc[44] = 44 +sSliceArray.anAsc[45] = 45 +sSliceArray.anAsc[46] = 46 +sSliceArray.anAsc[47] = 47 +sSliceArray.anPos[1] = 1 +sSliceArray.anPos[2] = 2 +sSliceArray.anPos[3] = 3 +sSliceArray.anPos[4] = 4 +sSliceArray.anPos[5] = 5 +sSliceArray.anPos[6] = 6 +sSliceArray.anPos[7] = 7 +sSliceArray.anPos[8] = 8 +sSliceArray.anPos[9] = 9 +sSliceArray.anPos[10] = 10 +sSliceArray.anPos[11] = 11 +sSliceArray.anPos[12] = 12 +sSliceArray.anPos[13] = 13 +sSliceArray.anPos[14] = 14 +sSliceArray.anPos[15] = 15 +sSliceArray.anPos[16] = 16 +sSliceArray.anPos[17] = 17 +sSliceArray.anPos[18] = 18 +sSliceArray.anPos[19] = 19 +sSliceArray.anPos[20] = 20 +sSliceArray.anPos[21] = 21 +sSliceArray.anPos[22] = 22 +sSliceArray.anPos[23] = 23 +sSliceArray.anPos[24] = 24 +sSliceArray.anPos[25] = 25 +sSliceArray.anPos[26] = 26 +sSliceArray.anPos[27] = 27 +sSliceArray.anPos[28] = 28 +sSliceArray.anPos[29] = 29 +sSliceArray.anPos[30] = 30 +sSliceArray.anPos[31] = 31 +sSliceArray.anPos[32] = 32 +sSliceArray.anPos[33] = 33 +sSliceArray.anPos[34] = 34 +sSliceArray.anPos[35] = 35 +sSliceArray.anPos[36] = 36 +sSliceArray.anPos[37] = 37 +sSliceArray.anPos[38] = 38 +sSliceArray.anPos[39] = 39 +sSliceArray.anPos[40] = 40 +sSliceArray.anPos[41] = 41 +sSliceArray.anPos[42] = 42 +sSliceArray.anPos[43] = 43 +sSliceArray.anPos[44] = 44 +sSliceArray.anPos[45] = 45 +sSliceArray.anPos[46] = 46 +sSliceArray.anPos[47] = 47 +sSliceArray.lSize = 48 +sSliceArray.lConc = 1 +sSliceArray.ucMode = 0x2 +sSliceArray.sTSat.dThickness = 50 +sGroupArray.asGroup[0].nSize = 48 +sGroupArray.asGroup[0].dDistFact = 0.2 +sGroupArray.anMember[1] = 1 +sGroupArray.anMember[2] = 2 +sGroupArray.anMember[3] = 3 +sGroupArray.anMember[4] = 4 +sGroupArray.anMember[5] = 5 +sGroupArray.anMember[6] = 6 +sGroupArray.anMember[7] = 7 +sGroupArray.anMember[8] = 8 +sGroupArray.anMember[9] = 9 +sGroupArray.anMember[10] = 10 +sGroupArray.anMember[11] = 11 +sGroupArray.anMember[12] = 12 +sGroupArray.anMember[13] = 13 +sGroupArray.anMember[14] = 14 +sGroupArray.anMember[15] = 15 +sGroupArray.anMember[16] = 16 +sGroupArray.anMember[17] = 17 +sGroupArray.anMember[18] = 18 +sGroupArray.anMember[19] = 19 +sGroupArray.anMember[20] = 20 +sGroupArray.anMember[21] = 21 +sGroupArray.anMember[22] = 22 +sGroupArray.anMember[23] = 23 +sGroupArray.anMember[24] = 24 +sGroupArray.anMember[25] = 25 +sGroupArray.anMember[26] = 26 +sGroupArray.anMember[27] = 27 +sGroupArray.anMember[28] = 28 +sGroupArray.anMember[29] = 29 +sGroupArray.anMember[30] = 30 +sGroupArray.anMember[31] = 31 +sGroupArray.anMember[32] = 32 +sGroupArray.anMember[33] = 33 +sGroupArray.anMember[34] = 34 +sGroupArray.anMember[35] = 35 +sGroupArray.anMember[36] = 36 +sGroupArray.anMember[37] = 37 +sGroupArray.anMember[38] = 38 +sGroupArray.anMember[39] = 39 +sGroupArray.anMember[40] = 40 +sGroupArray.anMember[41] = 41 +sGroupArray.anMember[42] = 42 +sGroupArray.anMember[43] = 43 +sGroupArray.anMember[44] = 44 +sGroupArray.anMember[45] = 45 +sGroupArray.anMember[46] = 46 +sGroupArray.anMember[47] = 47 +sGroupArray.anMember[48] = -1 +sGroupArray.lSize = 1 +sGroupArray.sPSat.dThickness = 50 +sGroupArray.sPSat.dGap = 10 +sAutoAlign.dAAMatrix[0] = 1 +sAutoAlign.dAAMatrix[5] = 1 +sAutoAlign.dAAMatrix[10] = 1 +sAutoAlign.dAAMatrix[15] = 1 +sNavigatorPara.lBreathHoldMeas = 1 +sNavigatorPara.lRespComp = 4 +sNavigatorPara.alFree[22] = 2 +sNavigatorPara.adFree[13] = 150000 +sBladePara.dBladeCoverage = 100 +sBladePara.ucMotionCorr = 0x2 +sPrepPulses.ucFatSat = 0x1 +sPrepPulses.ucWaterSat = 0x4 +sPrepPulses.ucInversion = 0x4 +sPrepPulses.ucSatRecovery = 0x1 +sPrepPulses.ucT2Prep = 0x1 +sPrepPulses.ucTIScout = 0x1 +sPrepPulses.ucFatSatMode = 0x2 +sPrepPulses.dDarkBloodThickness = 200 +sPrepPulses.dDarkBloodFlipAngle = 200 +sPrepPulses.dT2PrepDuration = 40 +sPrepPulses.dIRPulseThicknessFactor = 0.77 +sKSpace.dPhaseResolution = 1 +sKSpace.dSliceResolution = 1 +sKSpace.dAngioDynCentralRegionA = 20 +sKSpace.dAngioDynSamplingDensityB = 25 +sKSpace.lBaseResolution = 128 +sKSpace.lPhaseEncodingLines = 128 +sKSpace.lPartitions = 64 +sKSpace.lImagesPerSlab = 64 +sKSpace.lRadialViews = 64 +sKSpace.lRadialInterleavesPerImage = 2 +sKSpace.lLinesPerShot = 1 +sKSpace.unReordering = 0x1 +sKSpace.dSeqPhasePartialFourierForSNR = 1 +sKSpace.ucPhasePartialFourier = 0x4 +sKSpace.ucSlicePartialFourier = 0x10 +sKSpace.ucAveragingMode = 0x2 +sKSpace.ucMultiSliceMode = 0x2 +sKSpace.ucDimension = 0x2 +sKSpace.ucTrajectory = 0x1 +sKSpace.ucViewSharing = 0x1 +sKSpace.ucAsymmetricEchoMode = 0x1 +sKSpace.ucPOCS = 0x1 +sFastImaging.lEPIFactor = 128 +sFastImaging.lTurboFactor = 1 +sFastImaging.lSliceTurboFactor = 1 +sFastImaging.lSegments = 1 +sFastImaging.ulEnableRFSpoiling = 0x1 +sFastImaging.ucSegmentationMode = 0x1 +sFastImaging.lShots = 1 +sFastImaging.lEchoTrainDuration = 700 +sPhysioImaging.lSignal1 = 1 +sPhysioImaging.lMethod1 = 1 +sPhysioImaging.lSignal2 = 1 +sPhysioImaging.lMethod2 = 1 +sPhysioImaging.lPhases = 1 +sPhysioImaging.lRetroGatedImages = 16 +sPhysioImaging.sPhysioECG.lTriggerPulses = 1 +sPhysioImaging.sPhysioECG.lTriggerWindow = 5 +sPhysioImaging.sPhysioECG.lArrhythmiaDetection = 1 +sPhysioImaging.sPhysioECG.lCardiacGateOnThreshold = 100000 +sPhysioImaging.sPhysioECG.lCardiacGateOffThreshold = 700000 +sPhysioImaging.sPhysioECG.lTriggerIntervals = 1 +sPhysioImaging.sPhysioPulse.lTriggerPulses = 1 +sPhysioImaging.sPhysioPulse.lTriggerWindow = 5 +sPhysioImaging.sPhysioPulse.lArrhythmiaDetection = 1 +sPhysioImaging.sPhysioPulse.lCardiacGateOnThreshold = 100000 +sPhysioImaging.sPhysioPulse.lCardiacGateOffThreshold = 700000 +sPhysioImaging.sPhysioPulse.lTriggerIntervals = 1 +sPhysioImaging.sPhysioExt.lTriggerPulses = 1 +sPhysioImaging.sPhysioExt.lTriggerWindow = 5 +sPhysioImaging.sPhysioExt.lArrhythmiaDetection = 1 +sPhysioImaging.sPhysioExt.lCardiacGateOnThreshold = 100000 +sPhysioImaging.sPhysioExt.lCardiacGateOffThreshold = 700000 +sPhysioImaging.sPhysioExt.lTriggerIntervals = 1 +sPhysioImaging.sPhysioResp.lRespGateThreshold = 20 +sPhysioImaging.sPhysioResp.lRespGatePhase = 2 +sPhysioImaging.sPhysioResp.dGatingRatio = 0.3 +sPhysioImaging.sPhysioNative.ucMode = 0x1 +sPhysioImaging.sPhysioNative.ucFlowSenMode = 0x1 +sSpecPara.lPhaseCyclingType = 1 +sSpecPara.lPhaseEncodingType = 1 +sSpecPara.lRFExcitationBandwidth = 1 +sSpecPara.ucRemoveOversampling = 0x1 +sSpecPara.lAutoRefScanNo = 1 +sSpecPara.lDecouplingType = 1 +sSpecPara.lNOEType = 1 +sSpecPara.lExcitationType = 1 +sSpecPara.lSpecAppl = 1 +sSpecPara.lSpectralSuppression = 1 +sDiffusion.lDiffWeightings = 2 +sDiffusion.alBValue[1] = 1000 +sDiffusion.lNoiseLevel = 40 +sDiffusion.lDiffDirections = 64 +sDiffusion.ulMode = 0x100 +sAngio.ucPCFlowMode = 0x2 +sAngio.ucTOFInflow = 0x4 +sAngio.lDynamicReconMode = 1 +sAngio.lTemporalInterpolation = 1 +sRawFilter.lSlope_256 = 25 +sRawFilter.ucOn = 0x1 +sRawFilter.ucMode = 0x1 +sDistortionCorrFilter.ucMode = 0x1 +sPat.lAccelFactPE = 2 +sPat.lAccelFact3D = 1 +sPat.lRefLinesPE = 38 +sPat.ucPATMode = 0x2 +sPat.ucRefScanMode = 0x4 +sPat.ucTPatAverageAllFrames = 0x1 +sMDS.ulMdsModeMask = 0x1 +sMDS.ulMdsVariableResolution = 0x1 +sMDS.lTableSpeedNumerator = 1 +sMDS.lmdsLinesPerSegment = 15 +sMDS.sMdsEndPosSBCS_mm.dTra = 600 +sMDS.ulMdsReconMode = 0x1 +sMDS.dMdsRangeExtension = 600 +ucEnableIntro = 0x1 +ucDisableChangeStoreImages = 0x1 +ucAAMode = 0x1 +ucAARegionMode = 1 +ucAARefMode = 1 +ucReconstructionMode = 0x1 +ucOneSeriesForAllMeas = 0x1 +ucPHAPSMode = 0x1 +ucDixon = 0x1 +ucDixonSaveOriginal = 0x1 +ucWaitForPrepareCompletion = 0x1 +lAverages = 1 +dAveragesDouble = 1 +adFlipAngleDegree[0] = 90 +lScanTimeSec = 449 +lTotalScanTimeSec = 450 +dRefSNR = 33479.60771 +dRefSNR_VOI = 33479.60771 +tdefaultEVAProt = ""%SiemensEvaDefProt%\DTI\DTI.evp"" +asCoilSelectMeas[0].tNucleus = ""1H"" +asCoilSelectMeas[0].iUsedRFactor = 3 +asCoilSelectMeas[0].asList[0].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[0].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[0].sCoilElementID.tElement = ""H3P"" +asCoilSelectMeas[0].asList[0].lElementSelected = 1 +asCoilSelectMeas[0].asList[0].lRxChannelConnected = 1 +asCoilSelectMeas[0].asList[1].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[1].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[1].sCoilElementID.tElement = ""H4P"" +asCoilSelectMeas[0].asList[1].lElementSelected = 1 +asCoilSelectMeas[0].asList[1].lRxChannelConnected = 2 +asCoilSelectMeas[0].asList[2].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[2].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[2].sCoilElementID.tElement = ""H4S"" +asCoilSelectMeas[0].asList[2].lElementSelected = 1 +asCoilSelectMeas[0].asList[2].lRxChannelConnected = 3 +asCoilSelectMeas[0].asList[3].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[3].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[3].sCoilElementID.tElement = ""H4T"" +asCoilSelectMeas[0].asList[3].lElementSelected = 1 +asCoilSelectMeas[0].asList[3].lRxChannelConnected = 4 +asCoilSelectMeas[0].asList[4].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[4].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[4].sCoilElementID.tElement = ""H3S"" +asCoilSelectMeas[0].asList[4].lElementSelected = 1 +asCoilSelectMeas[0].asList[4].lRxChannelConnected = 5 +asCoilSelectMeas[0].asList[5].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[5].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[5].sCoilElementID.tElement = ""H3T"" +asCoilSelectMeas[0].asList[5].lElementSelected = 1 +asCoilSelectMeas[0].asList[5].lRxChannelConnected = 6 +asCoilSelectMeas[0].asList[6].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[6].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[6].sCoilElementID.tElement = ""H1P"" +asCoilSelectMeas[0].asList[6].lElementSelected = 1 +asCoilSelectMeas[0].asList[6].lRxChannelConnected = 7 +asCoilSelectMeas[0].asList[7].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[7].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[7].sCoilElementID.tElement = ""H2P"" +asCoilSelectMeas[0].asList[7].lElementSelected = 1 +asCoilSelectMeas[0].asList[7].lRxChannelConnected = 8 +asCoilSelectMeas[0].asList[8].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[8].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[8].sCoilElementID.tElement = ""H2S"" +asCoilSelectMeas[0].asList[8].lElementSelected = 1 +asCoilSelectMeas[0].asList[8].lRxChannelConnected = 9 +asCoilSelectMeas[0].asList[9].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[9].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[9].sCoilElementID.tElement = ""H2T"" +asCoilSelectMeas[0].asList[9].lElementSelected = 1 +asCoilSelectMeas[0].asList[9].lRxChannelConnected = 10 +asCoilSelectMeas[0].asList[10].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[10].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[10].sCoilElementID.tElement = ""H1S"" +asCoilSelectMeas[0].asList[10].lElementSelected = 1 +asCoilSelectMeas[0].asList[10].lRxChannelConnected = 11 +asCoilSelectMeas[0].asList[11].sCoilElementID.tCoilID = ""HeadMatrix"" +asCoilSelectMeas[0].asList[11].sCoilElementID.lCoilCopy = 1 +asCoilSelectMeas[0].asList[11].sCoilElementID.tElement = ""H1T"" +asCoilSelectMeas[0].asList[11].lElementSelected = 1 +asCoilSelectMeas[0].asList[11].lRxChannelConnected = 12 +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[0] = 0xff +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[1] = 0xee +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[2] = 0xee +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[3] = 0xad +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[4] = 0xee +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[5] = 0xee +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[6] = 0x5d +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[7] = 0xb1 +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[8] = 0xee +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[9] = 0xb2 +asCoilSelectMeas[0].sCOILPLUGS.aulPlugId[10] = 0xee +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[0] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[1] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[2] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[3] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[4] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[5] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[6] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[7] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[8] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[9] = 0x2 +asCoilSelectMeas[0].sCOILPLUGS.auiNmbrOfNibbles[10] = 0x2 +asCoilSelectMeas[0].aFFT_SCALE[0].flFactor = 3.77259 +asCoilSelectMeas[0].aFFT_SCALE[0].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[0].lRxChannel = 1 +asCoilSelectMeas[0].aFFT_SCALE[1].flFactor = 3.83164 +asCoilSelectMeas[0].aFFT_SCALE[1].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[1].lRxChannel = 2 +asCoilSelectMeas[0].aFFT_SCALE[2].flFactor = 3.7338 +asCoilSelectMeas[0].aFFT_SCALE[2].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[2].lRxChannel = 3 +asCoilSelectMeas[0].aFFT_SCALE[3].flFactor = 4.08449 +asCoilSelectMeas[0].aFFT_SCALE[3].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[3].lRxChannel = 4 +asCoilSelectMeas[0].aFFT_SCALE[4].flFactor = 3.82172 +asCoilSelectMeas[0].aFFT_SCALE[4].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[4].lRxChannel = 5 +asCoilSelectMeas[0].aFFT_SCALE[5].flFactor = 3.86816 +asCoilSelectMeas[0].aFFT_SCALE[5].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[5].lRxChannel = 6 +asCoilSelectMeas[0].aFFT_SCALE[6].flFactor = 4.48252 +asCoilSelectMeas[0].aFFT_SCALE[6].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[6].lRxChannel = 7 +asCoilSelectMeas[0].aFFT_SCALE[7].flFactor = 4.39406 +asCoilSelectMeas[0].aFFT_SCALE[7].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[7].lRxChannel = 8 +asCoilSelectMeas[0].aFFT_SCALE[8].flFactor = 4.50498 +asCoilSelectMeas[0].aFFT_SCALE[8].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[8].lRxChannel = 9 +asCoilSelectMeas[0].aFFT_SCALE[9].flFactor = 4.57011 +asCoilSelectMeas[0].aFFT_SCALE[9].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[9].lRxChannel = 10 +asCoilSelectMeas[0].aFFT_SCALE[10].flFactor = 4.6211 +asCoilSelectMeas[0].aFFT_SCALE[10].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[10].lRxChannel = 11 +asCoilSelectMeas[0].aFFT_SCALE[11].flFactor = 4.69845 +asCoilSelectMeas[0].aFFT_SCALE[11].bValid = 1 +asCoilSelectMeas[0].aFFT_SCALE[11].lRxChannel = 12 +sEFISPEC.bEFIDataValid = 1 +ucCineMode = 0x1 +ucSequenceType = 0x4 +ucCoilCombineMode = 0x2 +ucFlipAngleMode = 0x1 +lTOM = 1 +lProtID = -434 +ucReadOutMode = 0x1 +ucBold3dPace = 0x1 +ucForcePositioningOnNDIS = 0x1 +ucInternalTablePosValid = 0x1 +sParametricMapping.ucParametricMap = 0x1 +sIR.lScanNumber = 1 +sAsl.ulMode = 0x1 +WaitForUserStart = 0x1 +ucAutoAlignInit = 0x1 +### ASCCONV END ### \ No newline at end of file diff --git a/nibabel/nicom/tests/data/slicethickness_empty_string.dcm b/nibabel/nicom/tests/data/slicethickness_empty_string.dcm new file mode 100644 index 0000000000..c2718cfdeb Binary files /dev/null and b/nibabel/nicom/tests/data/slicethickness_empty_string.dcm differ diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py new file mode 100644 index 0000000000..974b917415 --- /dev/null +++ b/nibabel/nicom/tests/test_ascconv.py @@ -0,0 +1,63 @@ +""" Testing Siemens "ASCCONV" parser +""" + +from os.path import join as pjoin, dirname + +import numpy as np + +from .. import ascconv +from ...externals import OrderedDict + +from numpy.testing import assert_array_equal, assert_array_almost_equal + +DATA_PATH = pjoin(dirname(__file__), 'data') +ASCCONV_INPUT = pjoin(DATA_PATH, 'ascconv_sample.txt') + + +def test_ascconv_parse(): + with open(ASCCONV_INPUT, 'rt') as fobj: + contents = fobj.read() + ascconv_dict, attrs = ascconv.parse_ascconv(contents, str_delim='""') + assert attrs == OrderedDict() + assert len(ascconv_dict) == 72 + assert ascconv_dict['tProtocolName'] == 'CBU+AF8-DTI+AF8-64D+AF8-1A' + assert ascconv_dict['ucScanRegionPosValid'] == 1 + assert_array_almost_equal(ascconv_dict['sProtConsistencyInfo']['flNominalB0'], + 2.89362) + assert ascconv_dict['sProtConsistencyInfo']['flGMax'] == 26 + assert (list(ascconv_dict['sSliceArray'].keys()) == + ['asSlice', 'anAsc', 'anPos', 'lSize', 'lConc', 'ucMode', + 'sTSat']) + slice_arr = ascconv_dict['sSliceArray'] + as_slice = slice_arr['asSlice'] + assert_array_equal([e['dPhaseFOV'] for e in as_slice], 230) + assert_array_equal([e['dReadoutFOV'] for e in as_slice], 230) + assert_array_equal([e['dThickness'] for e in as_slice], 2.5) + # Some lists defined starting at 1, so have None as first element + assert slice_arr['anAsc'] == [None] + list(range(1, 48)) + assert slice_arr['anPos'] == [None] + list(range(1, 48)) + # A top level list + assert len(ascconv_dict['asCoilSelectMeas']) == 1 + as_list = ascconv_dict['asCoilSelectMeas'][0]['asList'] + # This lower-level list does start indexing at 0 + assert len(as_list) == 12 + for i, el in enumerate(as_list): + assert (list(el.keys()) == + ['sCoilElementID', 'lElementSelected', 'lRxChannelConnected']) + assert el['lElementSelected'] == 1 + assert el['lRxChannelConnected'] == i + 1 + # Test negative number + assert_array_almost_equal(as_slice[0]['sPosition']['dCor'], -20.03015269) + + +def test_ascconv_w_attrs(): + in_str = ("### ASCCONV BEGIN object=MrProtDataImpl@MrProtocolData " + "version=41340006 " + "converter=%MEASCONST%/ConverterList/Prot_Converter.txt ###\n" + "test = \"hello\"\n" + "### ASCCONV END ###") + ascconv_dict, attrs = ascconv.parse_ascconv(in_str, '""') + assert attrs['object'] == 'MrProtDataImpl@MrProtocolData' + assert attrs['version'] == '41340006' + assert attrs['converter'] == '%MEASCONST%/ConverterList/Prot_Converter.txt' + assert ascconv_dict['test'] == 'hello' diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 592dd2ba54..1692aad622 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -7,15 +7,13 @@ import numpy as np +from ...pydicom_compat import pydicom from .. import csareader as csa from .. import dwiparams as dwp -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) - -from ...testing import skipif - -from nibabel.pydicom_compat import dicom_test, pydicom -from .test_dicomwrappers import (IO_DATA_PATH, DATA) +import pytest +from . import dicom_test +from .test_dicomwrappers import IO_DATA_PATH, DATA CSA2_B0 = open(pjoin(IO_DATA_PATH, 'csa2_b0.bin'), 'rb').read() CSA2_B1000 = open(pjoin(IO_DATA_PATH, 'csa2_b1000.bin'), 'rb').read() @@ -27,59 +25,61 @@ @dicom_test def test_csa_header_read(): hdr = csa.get_csa_header(DATA, 'image') - assert_equal(hdr['n_tags'], 83) - assert_equal(csa.get_csa_header(DATA, 'series')['n_tags'], 65) - assert_raises(ValueError, csa.get_csa_header, DATA, 'xxxx') - assert_true(csa.is_mosaic(hdr)) + assert hdr['n_tags'] == 83 + assert csa.get_csa_header(DATA, 'series')['n_tags'] == 65 + with pytest.raises(ValueError): + csa.get_csa_header(DATA, 'xxxx') + assert csa.is_mosaic(hdr) # Get a shallow copy of the data, lacking the CSA marker # Need to do it this way because del appears broken in pydicom 0.9.7 data2 = pydicom.dataset.Dataset() for element in DATA: if (element.tag.group, element.tag.elem) != (0x29, 0x10): data2.add(element) - assert_equal(csa.get_csa_header(data2, 'image'), None) + assert csa.get_csa_header(data2, 'image') is None # Add back the marker - CSA works again data2[(0x29, 0x10)] = DATA[(0x29, 0x10)] - assert_true(csa.is_mosaic(csa.get_csa_header(data2, 'image'))) + assert csa.is_mosaic(csa.get_csa_header(data2, 'image')) def test_csas0(): for csa_str in (CSA2_B0, CSA2_B1000): csa_info = csa.read(csa_str) - assert_equal(csa_info['type'], 2) - assert_equal(csa_info['n_tags'], 83) + assert csa_info['type'] == 2 + assert csa_info['n_tags'] == 83 tags = csa_info['tags'] - assert_equal(len(tags), 83) + assert len(tags) == 83 n_o_m = tags['NumberOfImagesInMosaic'] - assert_equal(n_o_m['items'], [48]) + assert n_o_m['items'] == [48] csa_info = csa.read(CSA2_B1000) b_matrix = csa_info['tags']['B_matrix'] - assert_equal(len(b_matrix['items']), 6) + assert len(b_matrix['items']) == 6 b_value = csa_info['tags']['B_value'] - assert_equal(b_value['items'], [1000]) + assert b_value['items'] == [1000] def test_csa_len0(): # We did get a failure for item with item_len of 0 - gh issue #92 csa_info = csa.read(CSA2_0len) - assert_equal(csa_info['type'], 2) - assert_equal(csa_info['n_tags'], 44) + assert csa_info['type'] == 2 + assert csa_info['n_tags'] == 44 tags = csa_info['tags'] - assert_equal(len(tags), 44) + assert len(tags) == 44 def test_csa_nitem(): # testing csa.read's ability to raise an error when n_items >= 200 - assert_raises(csa.CSAReadError, csa.read, CSA_STR_1001n_items) + with pytest.raises(csa.CSAReadError): + csa.read(CSA_STR_1001n_items) # OK when < 1000 csa_info = csa.read(CSA_STR_valid) - assert_equal(len(csa_info['tags']), 1) + assert len(csa_info['tags']) == 1 # OK after changing module global n_items_thresh = csa.MAX_CSA_ITEMS try: csa.MAX_CSA_ITEMS = 2000 csa_info = csa.read(CSA_STR_1001n_items) - assert_equal(len(csa_info['tags']), 1) + assert len(csa_info['tags']) == 1 finally: csa.MAX_CSA_ITEMS = n_items_thresh @@ -88,32 +88,30 @@ def test_csa_params(): for csa_str in (CSA2_B0, CSA2_B1000): csa_info = csa.read(csa_str) n_o_m = csa.get_n_mosaic(csa_info) - assert_equal(n_o_m, 48) + assert n_o_m == 48 snv = csa.get_slice_normal(csa_info) - assert_equal(snv.shape, (3,)) - assert_true(np.allclose(1, - np.sqrt((snv * snv).sum()))) + assert snv.shape == (3,) + assert np.allclose(1, np.sqrt((snv * snv).sum())) amt = csa.get_acq_mat_txt(csa_info) - assert_equal(amt, '128p*128') + assert amt == '128p*128' csa_info = csa.read(CSA2_B0) b_matrix = csa.get_b_matrix(csa_info) - assert_equal(b_matrix, None) + assert b_matrix is None b_value = csa.get_b_value(csa_info) - assert_equal(b_value, 0) + assert b_value == 0 g_vector = csa.get_g_vector(csa_info) - assert_equal(g_vector, None) + assert g_vector is None csa_info = csa.read(CSA2_B1000) b_matrix = csa.get_b_matrix(csa_info) - assert_equal(b_matrix.shape, (3, 3)) + assert b_matrix.shape == (3, 3) # check (by absence of error) that the B matrix is positive # semi-definite. dwp.B2q(b_matrix) # no error b_value = csa.get_b_value(csa_info) - assert_equal(b_value, 1000) + assert b_value == 1000 g_vector = csa.get_g_vector(csa_info) - assert_equal(g_vector.shape, (3,)) - assert_true( - np.allclose(1, np.sqrt((g_vector * g_vector).sum()))) + assert g_vector.shape == (3,) + assert np.allclose(1, np.sqrt((g_vector * g_vector).sum())) def test_ice_dims(): @@ -124,14 +122,11 @@ def test_ice_dims(): for csa_str, ex_dims in ((CSA2_B0, ex_dims0), (CSA2_B1000, ex_dims1)): csa_info = csa.read(csa_str) - assert_equal(csa.get_ice_dims(csa_info), - ex_dims) - assert_equal(csa.get_ice_dims({}), None) + assert csa.get_ice_dims(csa_info) == ex_dims + assert csa.get_ice_dims({}) is None @dicom_test -@skipif(sys.version_info < (2,7) and pydicom.__version__ < '1.0', - 'Known issue for python 2.6 and pydicom < 1.0') def test_missing_csa_elem(): # Test that we get None instead of raising an Exception when the file has # the PrivateCreator element for the CSA dict but not the element with the @@ -140,4 +135,4 @@ def test_missing_csa_elem(): csa_tag = pydicom.dataset.Tag(0x29, 0x1010) del dcm[csa_tag] hdr = csa.get_csa_header(dcm, 'image') - assert_equal(hdr, None) + assert hdr is None diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index cb03aae74b..167cb26de6 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -2,20 +2,17 @@ """ -from os.path import join as pjoin, abspath +from os.path import join as pjoin import numpy as np from .. import dicomreaders as didr +from ...pydicom_compat import pydicom -from nibabel.pydicom_compat import dicom_test, pydicom +import pytest +from . import dicom_test -from .test_dicomwrappers import (EXPECTED_AFFINE, - EXPECTED_PARAMS, - IO_DATA_PATH, - DATA) - -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) +from .test_dicomwrappers import EXPECTED_AFFINE, EXPECTED_PARAMS, IO_DATA_PATH, DATA from numpy.testing import assert_array_equal, assert_array_almost_equal @@ -24,7 +21,7 @@ def test_read_dwi(): img = didr.mosaic_to_nii(DATA) arr = img.get_data() - assert_equal(arr.shape, (128, 128, 48)) + assert arr.shape == (128, 128, 48) assert_array_almost_equal(img.affine, EXPECTED_AFFINE) @@ -32,11 +29,12 @@ def test_read_dwi(): def test_read_dwis(): data, aff, bs, gs = didr.read_mosaic_dwi_dir(IO_DATA_PATH, 'siemens_dwi_*.dcm.gz') - assert_equal(data.ndim, 4) + assert data.ndim == 4 assert_array_almost_equal(aff, EXPECTED_AFFINE) assert_array_almost_equal(bs, (0, EXPECTED_PARAMS[0])) assert_array_almost_equal(gs, (np.zeros((3,)), EXPECTED_PARAMS[1])) - assert_raises(IOError, didr.read_mosaic_dwi_dir, 'improbable') + with pytest.raises(IOError): + didr.read_mosaic_dwi_dir('improbable') @dicom_test @@ -53,29 +51,21 @@ def test_passing_kwds(): dicom_kwargs=dict(force=True)) assert_array_equal(data, data2) # This should raise an error in pydicom.dicomio.read_file - assert_raises(TypeError, - func, - IO_DATA_PATH, - dwi_glob, - dicom_kwargs=dict(not_a_parameter=True)) + with pytest.raises(TypeError): + func(IO_DATA_PATH, dwi_glob, dicom_kwargs=dict(not_a_parameter=True)) # These are invalid dicoms, so will raise an error unless force=True - assert_raises(pydicom.filereader.InvalidDicomError, - func, - IO_DATA_PATH, - csa_glob) + with pytest.raises(pydicom.filereader.InvalidDicomError): + func(IO_DATA_PATH, csa_glob) # But here, we catch the error because the dicoms are in the wrong # format - assert_raises(didr.DicomReadError, - func, - IO_DATA_PATH, - csa_glob, - dicom_kwargs=dict(force=True)) + with pytest.raises(didr.DicomReadError): + func(IO_DATA_PATH, csa_glob, dicom_kwargs=dict(force=True)) @dicom_test def test_slices_to_series(): dicom_files = (pjoin(IO_DATA_PATH, "%d.dcm" % i) for i in range(2)) wrappers = [didr.wrapper_from_file(f) for f in dicom_files] series = didr.slices_to_series(wrappers) - assert_equal(len(series), 1) - assert_equal(len(series[0]), 2) + assert len(series) == 1 + assert len(series[0]) == 2 diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 7e611c569d..0bb875002b 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -9,18 +9,18 @@ import numpy as np -from nibabel.pydicom_compat import (have_dicom, pydicom, read_file, dicom_test, - tag_for_keyword) +from nibabel.pydicom_compat import have_dicom, pydicom, read_file, tag_for_keyword from .. import dicomwrappers as didw from .. import dicomreaders as didr from ...volumeutils import endian_codes +import pytest from unittest import TestCase -from nose.tools import (assert_true, assert_false, assert_equal, - assert_not_equal, assert_raises) +from . import dicom_test from numpy.testing import assert_array_equal, assert_array_almost_equal +from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data IO_DATA_PATH = pjoin(dirname(__file__), 'data') DATA_FILE = pjoin(IO_DATA_PATH, 'siemens_dwi_1000.dcm.gz') @@ -35,6 +35,11 @@ DATA_FILE_SLC_NORM = pjoin(IO_DATA_PATH, 'csa_slice_norm.dcm') DATA_FILE_DEC_RSCL = pjoin(IO_DATA_PATH, 'decimal_rescale.dcm') DATA_FILE_4D = pjoin(IO_DATA_PATH, '4d_multiframe_test.dcm') +DATA_FILE_EMPTY_ST = pjoin(IO_DATA_PATH, 'slicethickness_empty_string.dcm') +DATA_FILE_4D_DERIVED = pjoin(get_nibabel_data(), 'nitest-dicom', + '4d_multiframe_with_derived.dcm') +DATA_FILE_CT = pjoin(get_nibabel_data(), 'nitest-dicom', + 'siemens_ct_header_csa.dcm') # This affine from our converted image was shown to match our image spatially # with an image from SPM DICOM conversion. We checked the matching with SPM @@ -65,31 +70,37 @@ def test_wrappers(): (didw.MosaicWrapper, ({}, None, 10)), (didw.MultiframeWrapper, (multi_minimal,))): dw = maker(*args) - assert_equal(dw.get('InstanceNumber'), None) - assert_equal(dw.get('AcquisitionNumber'), None) - assert_raises(KeyError, dw.__getitem__, 'not an item') - assert_raises(didw.WrapperError, dw.get_data) - assert_raises(didw.WrapperError, dw.get_affine) - assert_raises(TypeError, maker) + assert dw.get('InstanceNumber') is None + assert dw.get('AcquisitionNumber') is None + with pytest.raises(KeyError): + dw['not an item'] + with pytest.raises(didw.WrapperError): + dw.get_data() + with pytest.raises(didw.WrapperError): + dw.affine + with pytest.raises(TypeError): + maker() # Check default attributes if not maker is didw.MosaicWrapper: - assert_false(dw.is_mosaic) - assert_equal(dw.b_matrix, None) - assert_equal(dw.q_vector, None) + assert not dw.is_mosaic + assert dw.b_matrix is None + assert dw.q_vector is None for maker in (didw.wrapper_from_data, didw.Wrapper, didw.SiemensWrapper, didw.MosaicWrapper ): dw = maker(DATA) - assert_equal(dw.get('InstanceNumber'), 2) - assert_equal(dw.get('AcquisitionNumber'), 2) - assert_raises(KeyError, dw.__getitem__, 'not an item') + assert dw.get('InstanceNumber') == 2 + assert dw.get('AcquisitionNumber') == 2 + with pytest.raises(KeyError): + dw['not an item'] for maker in (didw.MosaicWrapper, didw.wrapper_from_data): dw = maker(DATA) - assert_true(dw.is_mosaic) + assert dw.is_mosaic # DATA is not a Multiframe DICOM file - assert_raises(didw.WrapperError, didw.MultiframeWrapper, DATA) + with pytest.raises(didw.WrapperError): + didw.MultiframeWrapper(DATA) def test_get_from_wrapper(): @@ -97,12 +108,13 @@ def test_get_from_wrapper(): # data dcm_data = {'some_key': 'some value'} dw = didw.Wrapper(dcm_data) - assert_equal(dw.get('some_key'), 'some value') - assert_equal(dw.get('some_other_key'), None) + assert dw.get('some_key') == 'some value' + assert dw.get('some_other_key') is None # Getitem uses the same dictionary access - assert_equal(dw['some_key'], 'some value') + assert dw['some_key'] == 'some value' # And raises a WrapperError for missing keys - assert_raises(KeyError, dw.__getitem__, 'some_other_key') + with pytest.raises(KeyError): + dw['some_other_key'] # Test we don't use attributes for get class FakeData(dict): @@ -110,7 +122,7 @@ class FakeData(dict): d = FakeData() d.some_key = 'another bit of data' dw = didw.Wrapper(d) - assert_equal(dw.get('some_key'), None) + assert dw.get('some_key') is None # Check get defers to dcm_data get class FakeData2(object): @@ -120,7 +132,7 @@ def get(self, key, default): d = FakeData2() d.some_key = 'another bit of data' dw = didw.Wrapper(d) - assert_equal(dw.get('some_key'), 1) + assert dw.get('some_key') == 1 @dicom_test @@ -128,36 +140,40 @@ def test_wrapper_from_data(): # test wrapper from data, wrapper from file for dw in (didw.wrapper_from_data(DATA), didw.wrapper_from_file(DATA_FILE)): - assert_equal(dw.get('InstanceNumber'), 2) - assert_equal(dw.get('AcquisitionNumber'), 2) - assert_raises(KeyError, dw.__getitem__, 'not an item') - assert_true(dw.is_mosaic) + assert dw.get('InstanceNumber') == 2 + assert dw.get('AcquisitionNumber') == 2 + with pytest.raises(KeyError): + dw['not an item'] + assert dw.is_mosaic assert_array_almost_equal( - np.dot(didr.DPCS_TO_TAL, dw.get_affine()), + np.dot(didr.DPCS_TO_TAL, dw.affine), EXPECTED_AFFINE) for dw in (didw.wrapper_from_data(DATA_PHILIPS), didw.wrapper_from_file(DATA_FILE_PHILIPS)): - assert_equal(dw.get('InstanceNumber'), 1) - assert_equal(dw.get('AcquisitionNumber'), 3) - assert_raises(KeyError, dw.__getitem__, 'not an item') - assert_true(dw.is_multiframe) + assert dw.get('InstanceNumber') == 1 + assert dw.get('AcquisitionNumber') == 3 + with pytest.raises(KeyError): + dw['not an item'] + assert dw.is_multiframe # Another CSA file dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM) - assert_true(dw.is_mosaic) + assert dw.is_mosaic # Check that multiframe requires minimal set of DICOM tags fake_data = dict() fake_data['SOPClassUID'] = '1.2.840.10008.5.1.4.1.1.4.2' dw = didw.wrapper_from_data(fake_data) - assert_false(dw.is_multiframe) + assert not dw.is_multiframe # use the correct SOPClassUID fake_data['SOPClassUID'] = '1.2.840.10008.5.1.4.1.1.4.1' - assert_raises(didw.WrapperError, didw.wrapper_from_data, fake_data) + with pytest.raises(didw.WrapperError): + didw.wrapper_from_data(fake_data) fake_data['PerFrameFunctionalGroupsSequence'] = [None] - assert_raises(didw.WrapperError, didw.wrapper_from_data, fake_data) + with pytest.raises(didw.WrapperError): + didw.wrapper_from_data(fake_data) fake_data['SharedFunctionalGroupsSequence'] = [None] # minimal set should now be met dw = didw.wrapper_from_data(fake_data) - assert_true(dw.is_multiframe) + assert dw.is_multiframe @dicom_test @@ -173,19 +189,18 @@ def test_wrapper_args_kwds(): assert_array_equal(data, dcm2.get_data()) # Trying to read non-dicom file raises pydicom error, usually csa_fname = pjoin(IO_DATA_PATH, 'csa2_b0.bin') - assert_raises(pydicom.filereader.InvalidDicomError, - didw.wrapper_from_file, - csa_fname) + with pytest.raises(pydicom.filereader.InvalidDicomError): + didw.wrapper_from_file(csa_fname) # We can force the read, in which case rubbish returns dcm_malo = didw.wrapper_from_file(csa_fname, force=True) - assert_false(dcm_malo.is_mosaic) + assert not dcm_malo.is_mosaic @dicom_test def test_dwi_params(): dw = didw.wrapper_from_data(DATA) b_matrix = dw.b_matrix - assert_equal(b_matrix.shape, (3, 3)) + assert b_matrix.shape == (3, 3) q = dw.q_vector b = np.sqrt(np.sum(q * q)) # vector norm g = q / b @@ -198,9 +213,9 @@ def test_q_vector_etc(): # Test diffusion params in wrapper classes # Default is no q_vector, b_value, b_vector dw = didw.Wrapper(DATA) - assert_equal(dw.q_vector, None) - assert_equal(dw.b_value, None) - assert_equal(dw.b_vector, None) + assert dw.q_vector is None + assert dw.b_value is None + assert dw.b_vector is None for pos in range(3): q_vec = np.zeros((3,)) q_vec[pos] = 10. @@ -208,12 +223,12 @@ def test_q_vector_etc(): dw = didw.Wrapper(DATA) dw.q_vector = q_vec assert_array_equal(dw.q_vector, q_vec) - assert_equal(dw.b_value, 10) + assert dw.b_value == 10 assert_array_equal(dw.b_vector, q_vec / 10.) # Reset wrapped dicom to refresh one_time property dw = didw.Wrapper(DATA) dw.q_vector = np.array([0, 0, 1e-6]) - assert_equal(dw.b_value, 0) + assert dw.b_value == 0 assert_array_equal(dw.b_vector, np.zeros((3,))) # Test MosaicWrapper sdw = didw.MosaicWrapper(DATA) @@ -224,7 +239,7 @@ def test_q_vector_etc(): # Reset wrapped dicom to refresh one_time property sdw = didw.MosaicWrapper(DATA) sdw.q_vector = np.array([0, 0, 1e-6]) - assert_equal(sdw.b_value, 0) + assert sdw.b_value == 0 assert_array_equal(sdw.b_vector, np.zeros((3,))) @@ -232,51 +247,51 @@ def test_q_vector_etc(): def test_vol_matching(): # make the Siemens wrapper, check it compares True against itself dw_siemens = didw.wrapper_from_data(DATA) - assert_true(dw_siemens.is_mosaic) - assert_true(dw_siemens.is_csa) - assert_true(dw_siemens.is_same_series(dw_siemens)) + assert dw_siemens.is_mosaic + assert dw_siemens.is_csa + assert dw_siemens.is_same_series(dw_siemens) # make plain wrapper, compare against itself dw_plain = didw.Wrapper(DATA) - assert_false(dw_plain.is_mosaic) - assert_false(dw_plain.is_csa) - assert_true(dw_plain.is_same_series(dw_plain)) + assert not dw_plain.is_mosaic + assert not dw_plain.is_csa + assert dw_plain.is_same_series(dw_plain) # specific vs plain wrapper compares False, because the Siemens # wrapper has more non-empty information - assert_false(dw_plain.is_same_series(dw_siemens)) + assert not dw_plain.is_same_series(dw_siemens) # and this should be symmetric - assert_false(dw_siemens.is_same_series(dw_plain)) + assert not dw_siemens.is_same_series(dw_plain) # we can even make an empty wrapper. This compares True against # itself but False against the others dw_empty = didw.Wrapper({}) - assert_true(dw_empty.is_same_series(dw_empty)) - assert_false(dw_empty.is_same_series(dw_plain)) - assert_false(dw_plain.is_same_series(dw_empty)) + assert dw_empty.is_same_series(dw_empty) + assert not dw_empty.is_same_series(dw_plain) + assert not dw_plain.is_same_series(dw_empty) # Just to check the interface, make a pretend signature-providing # object. class C(object): series_signature = {} - assert_true(dw_empty.is_same_series(C())) + assert dw_empty.is_same_series(C()) # make the Philips wrapper, check it compares True against itself dw_philips = didw.wrapper_from_data(DATA_PHILIPS) - assert_true(dw_philips.is_multiframe) - assert_true(dw_philips.is_same_series(dw_philips)) + assert dw_philips.is_multiframe + assert dw_philips.is_same_series(dw_philips) # make plain wrapper, compare against itself dw_plain_philips = didw.Wrapper(DATA) - assert_false(dw_plain_philips.is_multiframe) - assert_true(dw_plain_philips.is_same_series(dw_plain_philips)) + assert not dw_plain_philips.is_multiframe + assert dw_plain_philips.is_same_series(dw_plain_philips) # specific vs plain wrapper compares False, because the Philips # wrapper has more non-empty information - assert_false(dw_plain_philips.is_same_series(dw_philips)) + assert not dw_plain_philips.is_same_series(dw_philips) # and this should be symmetric - assert_false(dw_philips.is_same_series(dw_plain_philips)) + assert not dw_philips.is_same_series(dw_plain_philips) # we can even make an empty wrapper. This compares True against # itself but False against the others dw_empty = didw.Wrapper({}) - assert_true(dw_empty.is_same_series(dw_empty)) - assert_false(dw_empty.is_same_series(dw_plain_philips)) - assert_false(dw_plain_philips.is_same_series(dw_empty)) + assert dw_empty.is_same_series(dw_empty) + assert not dw_empty.is_same_series(dw_plain_philips) + assert not dw_plain_philips.is_same_series(dw_empty) @dicom_test @@ -284,10 +299,10 @@ def test_slice_indicator(): dw_0 = didw.wrapper_from_file(DATA_FILE_B0) dw_1000 = didw.wrapper_from_data(DATA) z = dw_0.slice_indicator - assert_false(z is None) - assert_equal(z, dw_1000.slice_indicator) + assert not z is None + assert z == dw_1000.slice_indicator dw_empty = didw.Wrapper({}) - assert_true(dw_empty.slice_indicator is None) + assert dw_empty.slice_indicator is None @dicom_test @@ -295,7 +310,7 @@ def test_orthogonal(): # Test that the slice normal is sufficiently orthogonal dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM) R = dw.rotation_matrix - assert_true(np.allclose(np.eye(3), np.dot(R, R.T), atol=1e-6)) + assert np.allclose(np.eye(3), np.dot(R, R.T), atol=1e-6) # Test the threshold for rotation matrix orthogonality d = {} @@ -307,7 +322,8 @@ def test_orthogonal(): assert_array_almost_equal(dw.rotation_matrix, np.eye(3), 5) d['ImageOrientationPatient'] = [1e-4, 1, 0, 1, 0, 0] dw = didw.wrapper_from_data(d) - assert_raises(didw.WrapperPrecisionError, getattr, dw, 'rotation_matrix') + with pytest.raises(didw.WrapperPrecisionError): + dw.rotation_matrix @dicom_test @@ -332,7 +348,7 @@ def test_use_csa_sign(): iop = dw.image_orient_patient dw.image_orient_patient = np.c_[iop[:, 1], iop[:, 0]] dw2 = didw.wrapper_from_file(DATA_FILE_SLC_NORM) - assert_true(np.allclose(dw.slice_normal, dw2.slice_normal)) + assert np.allclose(dw.slice_normal, dw2.slice_normal) @dicom_test @@ -341,7 +357,8 @@ def test_assert_parallel(): # slice normal are not parallel dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM) dw.image_orient_patient = np.c_[[1., 0., 0.], [0., 1., 0.]] - assert_raises(AssertionError, dw.__getattribute__, 'slice_normal') + with pytest.raises(AssertionError): + dw.slice_normal @dicom_test @@ -349,7 +366,7 @@ def test_decimal_rescale(): # Test that we don't get back a data array with dtype np.object when our # rescale slope is a decimal dw = didw.wrapper_from_file(DATA_FILE_DEC_RSCL) - assert_not_equal(dw.get_data().dtype, np.object) + assert dw.get_data().dtype != np.object def fake_frames(seq_name, field_name, value_seq): @@ -443,84 +460,95 @@ def test_shape(self): MFW = self.WRAPCLASS dw = MFW(fake_mf) # No rows, cols, raise WrapperError - assert_raises(didw.WrapperError, getattr, dw, 'image_shape') + with pytest.raises(didw.WrapperError): + dw.image_shape fake_mf['Rows'] = 64 - assert_raises(didw.WrapperError, getattr, dw, 'image_shape') + with pytest.raises(didw.WrapperError): + dw.image_shape fake_mf.pop('Rows') fake_mf['Columns'] = 64 - assert_raises(didw.WrapperError, getattr, dw, 'image_shape') + with pytest.raises(didw.WrapperError): + dw.image_shape fake_mf['Rows'] = 32 # Missing frame data, raise AssertionError - assert_raises(AssertionError, getattr, dw, 'image_shape') + with pytest.raises(AssertionError): + dw.image_shape fake_mf['NumberOfFrames'] = 4 # PerFrameFunctionalGroupsSequence does not match NumberOfFrames - assert_raises(AssertionError, getattr, dw, 'image_shape') + with pytest.raises(AssertionError): + dw.image_shape # check 3D shape when StackID index is 0 div_seq = ((1, 1), (1, 2), (1, 3), (1, 4)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) - assert_equal(MFW(fake_mf).image_shape, (32, 64, 4)) + assert MFW(fake_mf).image_shape == (32, 64, 4) # Check stack number matching when StackID index is 0 div_seq = ((1, 1), (1, 2), (1, 3), (2, 4)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) - assert_raises(didw.WrapperError, getattr, MFW(fake_mf), 'image_shape') + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # Make some fake frame data for 4D when StackID index is 0 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) - assert_equal(MFW(fake_mf).image_shape, (32, 64, 2, 3)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 3) # Check stack number matching for 4D when StackID index is 0 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (2, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) - assert_raises(didw.WrapperError, getattr, MFW(fake_mf), 'image_shape') + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # Check indices can be non-contiguous when StackID index is 0 div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) - assert_equal(MFW(fake_mf).image_shape, (32, 64, 2, 2)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 2) # Check indices can include zero when StackID index is 0 div_seq = ((1, 1, 0), (1, 2, 0), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) - assert_equal(MFW(fake_mf).image_shape, (32, 64, 2, 2)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 2) # check 3D shape when there is no StackID index div_seq = ((1,), (2,), (3,), (4,)) sid_seq = (1, 1, 1, 1) fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) - assert_equal(MFW(fake_mf).image_shape, (32, 64, 4)) + assert MFW(fake_mf).image_shape == (32, 64, 4) # check 3D stack number matching when there is no StackID index div_seq = ((1,), (2,), (3,), (4,)) sid_seq = (1, 1, 1, 2) fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) - assert_raises(didw.WrapperError, getattr, MFW(fake_mf), 'image_shape') + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # check 4D shape when there is no StackID index div_seq = ((1, 1), (2, 1), (1, 2), (2, 2), (1, 3), (2, 3)) sid_seq = (1, 1, 1, 1, 1, 1) fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) - assert_equal(MFW(fake_mf).image_shape, (32, 64, 2, 3)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 3) # check 4D stack number matching when there is no StackID index div_seq = ((1, 1), (2, 1), (1, 2), (2, 2), (1, 3), (2, 3)) sid_seq = (1, 1, 1, 1, 1, 2) fake_mf.update(fake_shape_dependents(div_seq, sid_seq=sid_seq)) - assert_raises(didw.WrapperError, getattr, MFW(fake_mf), 'image_shape') + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # check 3D shape when StackID index is 1 div_seq = ((1, 1), (2, 1), (3, 1), (4, 1)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) - assert_equal(MFW(fake_mf).image_shape, (32, 64, 4)) + assert MFW(fake_mf).image_shape == (32, 64, 4) # Check stack number matching when StackID index is 1 div_seq = ((1, 1), (2, 1), (3, 2), (4, 1)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) - assert_raises(didw.WrapperError, getattr, MFW(fake_mf), 'image_shape') + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_shape # Make some fake frame data for 4D when StackID index is 1 div_seq = ((1, 1, 1), (2, 1, 1), (1, 1, 2), (2, 1, 2), (1, 1, 3), (2, 1, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) - assert_equal(MFW(fake_mf).image_shape, (32, 64, 2, 3)) + assert MFW(fake_mf).image_shape == (32, 64, 2, 3) def test_iop(self): # Test Image orient patient for multiframe fake_mf = copy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) - assert_raises(didw.WrapperError, getattr, dw, 'image_orient_patient') + with pytest.raises(didw.WrapperError): + dw.image_orient_patient # Make a fake frame fake_frame = fake_frames('PlaneOrientationSequence', 'ImageOrientationPatient', @@ -529,8 +557,8 @@ def test_iop(self): assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) fake_mf['SharedFunctionalGroupsSequence'] = [None] - assert_raises(didw.WrapperError, - getattr, MFW(fake_mf), 'image_orient_patient') + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_orient_patient fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) @@ -540,14 +568,16 @@ def test_voxel_sizes(self): fake_mf = copy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) - assert_raises(didw.WrapperError, getattr, dw, 'voxel_sizes') + with pytest.raises(didw.WrapperError): + dw.voxel_sizes # Make a fake frame fake_frame = fake_frames('PixelMeasuresSequence', 'PixelSpacing', [[2.1, 3.2]])[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] # Still not enough, we lack information for slice distances - assert_raises(didw.WrapperError, getattr, MFW(fake_mf), 'voxel_sizes') + with pytest.raises(didw.WrapperError): + MFW(fake_mf).voxel_sizes # This can come from SpacingBetweenSlices or frame SliceThickness fake_mf['SpacingBetweenSlices'] = 4.3 assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 4.3]) @@ -559,7 +589,8 @@ def test_voxel_sizes(self): assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Removing shared leads to error again fake_mf['SharedFunctionalGroupsSequence'] = [None] - assert_raises(didw.WrapperError, getattr, MFW(fake_mf), 'voxel_sizes') + with pytest.raises(didw.WrapperError): + MFW(fake_mf).voxel_sizes # Restoring to frames makes it work again fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) @@ -578,7 +609,8 @@ def test_image_position(self): fake_mf = copy(self.MINIMAL_MF) MFW = self.WRAPCLASS dw = MFW(fake_mf) - assert_raises(didw.WrapperError, getattr, dw, 'image_position') + with pytest.raises(didw.WrapperError): + dw.image_position # Make a fake frame fake_frame = fake_frames('PlanePositionSequence', 'ImagePositionPatient', @@ -586,21 +618,23 @@ def test_image_position(self): fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) fake_mf['SharedFunctionalGroupsSequence'] = [None] - assert_raises(didw.WrapperError, - getattr, MFW(fake_mf), 'image_position') + with pytest.raises(didw.WrapperError): + MFW(fake_mf).image_position fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) # Check lists of Decimals work fake_frame.PlanePositionSequence[0].ImagePositionPatient = [ Decimal(str(v)) for v in [-2, 3, 7]] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) - assert_equal(MFW(fake_mf).image_position.dtype, float) + assert MFW(fake_mf).image_position.dtype == float @dicom_test def test_affine(self): # Make sure we find orientation/position/spacing info dw = didw.wrapper_from_file(DATA_FILE_4D) - dw.get_affine() + aff = dw.affine + with pytest.deprecated_call(): + assert np.array_equal(dw.get_affine(), aff) @dicom_test def test_data_real(self): @@ -613,8 +647,28 @@ def test_data_real(self): if endian_codes[data.dtype.byteorder] == '>': data = data.byteswap() dat_str = data.tostring() - assert_equal(sha1(dat_str).hexdigest(), - '149323269b0af92baa7508e19ca315240f77fa8c') + assert sha1(dat_str).hexdigest() == '149323269b0af92baa7508e19ca315240f77fa8c' + + @dicom_test + def test_slicethickness_fallback(self): + dw = didw.wrapper_from_file(DATA_FILE_EMPTY_ST) + assert dw.voxel_sizes[2] == 1.0 + + @dicom_test + @needs_nibabel_data('nitest-dicom') + def test_data_derived_shape(self): + # Test 4D diffusion data with an additional trace volume included + # Excludes the trace volume and generates the correct shape + dw = didw.wrapper_from_file(DATA_FILE_4D_DERIVED) + assert dw.image_shape == (96, 96, 60, 33) + + @dicom_test + @needs_nibabel_data('nitest-dicom') + def test_data_unreadable_private_headers(self): + # Test CT image with unreadable CSA tags + with pytest.warns(UserWarning): + dw = didw.wrapper_from_file(DATA_FILE_CT) + assert dw.image_shape == (512, 571) @dicom_test def test_data_fake(self): @@ -623,19 +677,22 @@ def test_data_fake(self): MFW = self.WRAPCLASS dw = MFW(fake_mf) # Fails - no shape - assert_raises(didw.WrapperError, dw.get_data) + with pytest.raises(didw.WrapperError): + dw.get_data() # Set shape by cheating dw.image_shape = (2, 3, 4) # Still fails - no data - assert_raises(didw.WrapperError, dw.get_data) + with pytest.raises(didw.WrapperError): + dw.get_data() # Make shape and indices fake_mf['Rows'] = 2 fake_mf['Columns'] = 3 dim_idxs = ((1, 1), (1, 2), (1, 3), (1, 4)) fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0)) - assert_equal(MFW(fake_mf).image_shape, (2, 3, 4)) + assert MFW(fake_mf).image_shape == (2, 3, 4) # Still fails - no data - assert_raises(didw.WrapperError, dw.get_data) + with pytest.raises(didw.WrapperError): + dw.get_data() # Add data - 3D data = np.arange(24).reshape((2, 3, 4)) # Frames dim is first for some reason @@ -697,7 +754,8 @@ def test__scale_data(self): fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] # Lacking RescaleIntercept -> Error dw = MFW(fake_mf) - assert_raises(AttributeError, dw._scale_data, data) + with pytest.raises(AttributeError): + dw._scale_data(data) fake_frame.PixelValueTransformationSequence[0].RescaleIntercept = -2 assert_array_equal(data * 3 - 2, dw._scale_data(data)) # Decimals are OK diff --git a/nibabel/nicom/tests/test_dwiparams.py b/nibabel/nicom/tests/test_dwiparams.py index 3b02367951..d0d20e574a 100644 --- a/nibabel/nicom/tests/test_dwiparams.py +++ b/nibabel/nicom/tests/test_dwiparams.py @@ -6,10 +6,9 @@ from ..dwiparams import B2q, q2bg -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) +import pytest -from numpy.testing import (assert_array_equal, assert_array_almost_equal, - assert_equal as np_assert_equal) +from numpy.testing import (assert_array_almost_equal, assert_equal as np_assert_equal) def test_b2q(): @@ -27,17 +26,20 @@ def test_b2q(): assert_array_almost_equal(-q * s, B2q(B)) # Massive negative eigs B = np.eye(3) * -1 - assert_raises(ValueError, B2q, B) + with pytest.raises(ValueError): + B2q(B) # no error if we up the tolerance q = B2q(B, tol=1) # Less massive negativity, dropping tol B = np.diag([-1e-14, 10., 1]) - assert_raises(ValueError, B2q, B) + with pytest.raises(ValueError): + B2q(B) assert_array_almost_equal(B2q(B, tol=5e-13), [0, 10, 0]) # Confirm that we assume symmetric B = np.eye(3) B[0, 1] = 1e-5 - assert_raises(ValueError, B2q, B) + with pytest.raises(ValueError): + B2q(B) def test_q2bg(): diff --git a/nibabel/nicom/tests/test_structreader.py b/nibabel/nicom/tests/test_structreader.py index 05461d18a0..6e58931559 100644 --- a/nibabel/nicom/tests/test_structreader.py +++ b/nibabel/nicom/tests/test_structreader.py @@ -5,9 +5,6 @@ from ..structreader import Unpacker -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) - - def test_unpacker(): s = b'1234\x00\x01' @@ -22,30 +19,30 @@ def test_unpacker(): swapped_int = le_int swapped_code = '<' up_str = Unpacker(s, endian='<') - assert_equal(up_str.read(4), b'1234') + assert up_str.read(4) == b'1234' up_str.ptr = 0 - assert_equal(up_str.unpack('4s'), (b'1234',)) - assert_equal(up_str.unpack('h'), (le_int,)) + assert up_str.unpack('4s') == (b'1234',) + assert up_str.unpack('h') == (le_int,) up_str = Unpacker(s, endian='>') - assert_equal(up_str.unpack('4s'), (b'1234',)) - assert_equal(up_str.unpack('h'), (be_int,)) + assert up_str.unpack('4s') == (b'1234',) + assert up_str.unpack('h') == (be_int,) # now test conflict of endian up_str = Unpacker(s, ptr=4, endian='>') - assert_equal(up_str.unpack('h'), (be_int,)) + assert up_str.unpack('>h') == (be_int,) up_str.ptr = 4 - assert_equal(up_str.unpack('@h'), (native_int,)) + assert up_str.unpack('@h') == (native_int,) # test -1 for read up_str.ptr = 2 - assert_equal(up_str.read(), b'34\x00\x01') + assert up_str.read() == b'34\x00\x01' # past end - assert_equal(up_str.read(), b'') + assert up_str.read() == b'' # with n_bytes up_str.ptr = 2 - assert_equal(up_str.read(2), b'34') - assert_equal(up_str.read(2), b'\x00\x01') + assert up_str.read(2) == b'34' + assert up_str.read(2) == b'\x00\x01' diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index a7ab9b6bdc..142daa3d16 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -2,64 +2,48 @@ """ import re - -from numpy.testing import (assert_almost_equal, - assert_array_equal) - -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) - - from ..utils import find_private_section -from nibabel.pydicom_compat import dicom_test, pydicom -from .test_dicomwrappers import (DATA, DATA_PHILIPS) +from . import dicom_test +from ...pydicom_compat import pydicom +from .test_dicomwrappers import DATA, DATA_PHILIPS @dicom_test def test_find_private_section_real(): # Find section containing named private creator information # On real data first - assert_equal(find_private_section(DATA, 0x29, 'SIEMENS CSA HEADER'), - 0x1000) - assert_equal(find_private_section(DATA, 0x29, 'SIEMENS MEDCOM HEADER2'), - 0x1100) - assert_equal(find_private_section(DATA_PHILIPS, 0x29, 'SIEMENS CSA HEADER'), - None) + assert find_private_section(DATA, 0x29, 'SIEMENS CSA HEADER') == 0x1000 + assert find_private_section(DATA, 0x29, 'SIEMENS MEDCOM HEADER2') == 0x1100 + assert find_private_section(DATA_PHILIPS, 0x29, 'SIEMENS CSA HEADER') == None # Make fake datasets ds = pydicom.dataset.Dataset({}) ds.add_new((0x11, 0x10), 'LO', b'some section') - assert_equal(find_private_section(ds, 0x11, 'some section'), 0x1000) + assert find_private_section(ds, 0x11, 'some section') == 0x1000 ds.add_new((0x11, 0x11), 'LO', b'anther section') ds.add_new((0x11, 0x12), 'LO', b'third section') - assert_equal(find_private_section(ds, 0x11, 'third section'), 0x1200) + assert find_private_section(ds, 0x11, 'third section') == 0x1200 # Wrong 'OB' is acceptable for VM (should be 'LO') ds.add_new((0x11, 0x12), 'OB', b'third section') - assert_equal(find_private_section(ds, 0x11, 'third section'), 0x1200) + assert find_private_section(ds, 0x11, 'third section') == 0x1200 # Anything else not acceptable ds.add_new((0x11, 0x12), 'PN', b'third section') - assert_equal(find_private_section(ds, 0x11, 'third section'), None) + assert find_private_section(ds, 0x11, 'third section') is None # The input (DICOM value) can be a string insteal of bytes ds.add_new((0x11, 0x12), 'LO', 'third section') - assert_equal(find_private_section(ds, 0x11, 'third section'), 0x1200) + assert find_private_section(ds, 0x11, 'third section') == 0x1200 # Search can be bytes as well as string ds.add_new((0x11, 0x12), 'LO', b'third section') - assert_equal(find_private_section(ds, 0x11, b'third section'), 0x1200) + assert find_private_section(ds, 0x11, b'third section') == 0x1200 # Search with string or bytes must be exact - assert_equal(find_private_section(ds, 0x11, b'third sectio'), None) - assert_equal(find_private_section(ds, 0x11, 'hird sectio'), None) + assert find_private_section(ds, 0x11, b'third sectio') is None + assert find_private_section(ds, 0x11, 'hird sectio') is None # The search can be a regexp - assert_equal(find_private_section(ds, - 0x11, - re.compile(r'third\Wsectio[nN]')), - 0x1200) + assert find_private_section(ds, 0x11, re.compile(r'third\Wsectio[nN]')) == 0x1200 # No match -> None - assert_equal(find_private_section(ds, - 0x11, - re.compile(r'not third\Wsectio[nN]')), - None) + assert find_private_section(ds, 0x11, re.compile(r'not third\Wsectio[nN]')) is None # If there are gaps in the sequence before the one we want, that is OK ds.add_new((0x11, 0x13), 'LO', b'near section') - assert_equal(find_private_section(ds, 0x11, 'near section'), 0x1300) + assert find_private_section(ds, 0x11, 'near section') == 0x1300 ds.add_new((0x11, 0x15), 'LO', b'far section') - assert_equal(find_private_section(ds, 0x11, 'far section'), 0x1500) + assert find_private_section(ds, 0x11, 'far section') == 0x1500 diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index 66688b801b..f1d5810775 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -1,8 +1,7 @@ """ Utilities for working with DICOM datasets """ -from __future__ import division, print_function, absolute_import -from ..py3k import asstr +from numpy.compat.py3k import asstr def find_private_section(dcm_data, group_no, creator): diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 548ad34658..352837f86e 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -10,15 +10,14 @@ NIfTI1 format defined at http://nifti.nimh.nih.gov/nifti-1/ ''' -from __future__ import division, print_function import warnings from io import BytesIO -from six import string_types import numpy as np import numpy.linalg as npl +from numpy.compat.py3k import asstr -from .py3k import asstr +from .filebasedimages import SerializableImage from .volumeutils import Recoder, make_dt_codes, endian_codes from .spatialimages import HeaderDataError, ImageFileError from .batteryrunners import Report @@ -27,7 +26,6 @@ from .spm99analyze import SpmAnalyzeHeader from .casting import have_binary128 from .pydicom_compat import have_dicom, pydicom as pdcm -from . import setup_test # noqa # nifti1 flat header definition for Analyze-like first 348 bytes # first number in comments indicates offset in file header in bytes @@ -125,7 +123,9 @@ (1, 'scanner', "NIFTI_XFORM_SCANNER_ANAT"), (2, 'aligned', "NIFTI_XFORM_ALIGNED_ANAT"), (3, 'talairach', "NIFTI_XFORM_TALAIRACH"), - (4, 'mni', "NIFTI_XFORM_MNI_152")), fields=('code', 'label', 'niistring')) + (4, 'mni', "NIFTI_XFORM_MNI_152"), + (5, 'template', "NIFTI_XFORM_TEMPLATE_OTHER"), + ), fields=('code', 'label', 'niistring')) # unit codes unit_codes = Recoder(( # code, label @@ -1388,7 +1388,7 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): if not known_intent: # We can set intent via an unknown integer code, but can't via an # unknown string label - if not allow_unknown or isinstance(code, string_types): + if not allow_unknown or isinstance(code, str): raise KeyError('Unknown intent code: ' + str(code)) if known_intent: icode = intent_codes.code[code] @@ -1775,18 +1775,18 @@ def __init__(self, dataobj, affine, header=None, self._affine2header() # Copy docstring __init__.__doc__ = analyze.AnalyzeImage.__init__.__doc__ + ''' - Notes - ----- - - If both a `header` and an `affine` are specified, and the `affine` does - not match the affine that is in the `header`, the `affine` will be used, - but the ``sform_code`` and ``qform_code`` fields in the header will be - re-initialised to their default values. This is performed on the basis - that, if you are changing the affine, you are likely to be changing the - space to which the affine is pointing. The :meth:`set_sform` and - :meth:`set_qform` methods can be used to update the codes after an image - has been created - see those methods, and the :ref:`manual - ` for more details. ''' + Notes + ----- + + If both a `header` and an `affine` are specified, and the `affine` does + not match the affine that is in the `header`, the `affine` will be used, + but the ``sform_code`` and ``qform_code`` fields in the header will be + re-initialised to their default values. This is performed on the basis + that, if you are changing the affine, you are likely to be changing the + space to which the affine is pointing. The :meth:`set_sform` and + :meth:`set_qform` methods can be used to update the codes after an image + has been created - see those methods, and the :ref:`manual + ` for more details. ''' def update_header(self): ''' Harmonize header with image data and affine @@ -2014,20 +2014,16 @@ def as_reoriented(self, ornt): return img # Also apply the transform to the dim_info fields - new_dim = list(img.header.get_dim_info()) - for idx, value in enumerate(new_dim): - # For each value, leave as None if it was that way, - # otherwise check where we have mapped it to - if value is None: - continue - new_dim[idx] = np.where(ornt[:, 0] == idx)[0] + new_dim = [ + None if orig_dim is None else int(ornt[orig_dim, 0]) + for orig_dim in img.header.get_dim_info()] img.header.set_dim_info(*new_dim) return img -class Nifti1Image(Nifti1Pair): +class Nifti1Image(Nifti1Pair, SerializableImage): """ Class for single file NIfTI1 format image """ header_class = Nifti1Header diff --git a/nibabel/onetime.py b/nibabel/onetime.py index 1f410b9a1e..a036d0d229 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -19,7 +19,6 @@ [2] Python data model, https://docs.python.org/reference/datamodel.html """ -from __future__ import division, print_function, absolute_import # ----------------------------------------------------------------------------- # Classes and Functions diff --git a/nibabel/openers.py b/nibabel/openers.py index e551404561..a658c65c0a 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -9,13 +9,8 @@ """ Context manager openers for various fileobject types """ -import sys -if sys.version_info[0] < 3: - from bz2file import BZ2File -else: - from bz2 import BZ2File +from bz2 import BZ2File import gzip -import sys import warnings from os.path import splitext from distutils.version import StrictVersion @@ -43,51 +38,6 @@ HAVE_INDEXED_GZIP = False -# The largest memory chunk that gzip can use for reads -GZIP_MAX_READ_CHUNK = 100 * 1024 * 1024 # 100Mb - - -class BufferedGzipFile(gzip.GzipFile): - """GzipFile able to readinto buffer >= 2**32 bytes. - - This class only differs from gzip.GzipFile - in Python 3.5.0. - - This works around a known issue in Python 3.5. - See https://bugs.python.org/issue25626 - """ - - # This helps avoid defining readinto in Python 2.6, - # where it is undefined on gzip.GzipFile. - # It also helps limit the exposure to this code. - if sys.version_info[:3] == (3, 5, 0): - def __init__(self, fileish, mode='rb', compresslevel=9, - buffer_size=2**32 - 1): - super(BufferedGzipFile, self).__init__(fileish, mode=mode, - compresslevel=compresslevel) - self.buffer_size = buffer_size - - def readinto(self, buf): - """Uses self.buffer_size to do a buffered read.""" - n_bytes = len(buf) - if n_bytes < 2 ** 32: - return super(BufferedGzipFile, self).readinto(buf) - - # This works around a known issue in Python 3.5. - # See https://bugs.python.org/issue25626 - mv = memoryview(buf) - n_read = 0 - max_read = 2 ** 32 - 1 # Max for unsigned 32-bit integer - while (n_read < n_bytes): - n_wanted = min(n_bytes - n_read, max_read) - n_got = super(BufferedGzipFile, self).readinto( - mv[n_read:n_read + n_wanted]) - n_read += n_got - if n_got != n_wanted: - break - return n_read - - def _gzip_open(filename, mode='rb', compresslevel=9, keep_open=False): # use indexed_gzip if possible for faster read access. If keep_open == @@ -96,22 +46,15 @@ def _gzip_open(filename, mode='rb', compresslevel=9, keep_open=False): if HAVE_INDEXED_GZIP and mode == 'rb': gzip_file = IndexedGzipFile(filename, drop_handles=not keep_open) - # Fall-back to built-in GzipFile (wrapped with the BufferedGzipFile class - # defined above) + # Fall-back to built-in GzipFile else: - gzip_file = BufferedGzipFile(filename, mode, compresslevel) - - # Speedup for #209, for versions of python < 3.5. Open gzip files with - # faster reads on large files using a larger read buffer. See - # https://github.com/nipy/nibabel/pull/210 for discussion - if hasattr(gzip_file, 'max_read_chunk'): - gzip_file.max_read_chunk = GZIP_MAX_READ_CHUNK + gzip_file = gzip.GzipFile(filename, mode, compresslevel) return gzip_file class Opener(object): - """ Class to accept, maybe open, and context-manage file-likes / filenames + r""" Class to accept, maybe open, and context-manage file-likes / filenames Provides context manager to close files that the constructor opened for you. diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index fb0e00179a..81dae3010c 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -1,18 +1,11 @@ """ Routines to support optional packages """ -import pkgutil from distutils.version import LooseVersion -from six import string_types from .tripwire import TripWire -if pkgutil.find_loader('nose'): - have_nose = True -else: - have_nose = False - def _check_pkg_version(pkg, min_version): # Default version checking function - if isinstance(min_version, string_types): + if isinstance(min_version, str): min_version = LooseVersion(min_version) try: return min_version <= pkg.__version__ @@ -117,8 +110,7 @@ def optional_package(name, trip_msg=None, min_version=None): pkg = TripWire(trip_msg) def setup_module(): - if have_nose: - import nose - raise nose.plugins.skip.SkipTest('No %s for these tests' - % name) + import unittest + raise unittest.SkipTest('No %s for these tests' % name) + return pkg, False, setup_module diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 9f3bbfed4d..ddea3159d0 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Utilities for calculating and applying affine orientations ''' -from __future__ import division, print_function, absolute_import import numpy as np import numpy.linalg as npl diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 1dfa998394..e9ecc91cc4 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -121,7 +121,6 @@ utility via the option "--strict-sort". The dimension info can be exported to a CSV file by adding the option "--volume-info". """ -from __future__ import print_function, division import warnings import numpy as np @@ -131,7 +130,6 @@ from locale import getpreferredencoding from collections import OrderedDict -from .keywordonly import kw_only_meth from .spatialimages import SpatialHeader, SpatialImage from .eulerangles import euler2mat from .volumeutils import Recoder, array_from_file @@ -585,8 +583,7 @@ def exts2pars(exts_source): class PARRECArrayProxy(object): - @kw_only_meth(2) - def __init__(self, file_like, header, mmap=True, scaling='dv'): + def __init__(self, file_like, header, *, mmap=True, scaling='dv'): """ Initialize PARREC array proxy Parameters @@ -634,38 +631,73 @@ def dtype(self): def is_proxy(self): return True - def get_unscaled(self): - with ImageOpener(self.file_like) as fileobj: - return _data_from_rec(fileobj, self._rec_shape, self._dtype, - self._slice_indices, self._shape, - mmap=self._mmap) - - def __array__(self): - with ImageOpener(self.file_like) as fileobj: - return _data_from_rec(fileobj, - self._rec_shape, - self._dtype, - self._slice_indices, - self._shape, - scalings=self._slice_scaling, - mmap=self._mmap) - - def __getitem__(self, slicer): + def _get_unscaled(self, slicer): indices = self._slice_indices - if indices[0] != 0 or np.any(np.diff(indices) != 1): + if slicer == (): + with ImageOpener(self.file_like) as fileobj: + rec_data = array_from_file(self._rec_shape, self._dtype, fileobj, mmap=self._mmap) + rec_data = rec_data[..., indices] + return rec_data.reshape(self._shape, order='F') + elif indices[0] != 0 or np.any(np.diff(indices) != 1): # We can't load direct from REC file, use inefficient slicing - return np.asanyarray(self)[slicer] + return self._get_unscaled(())[slicer] + # Slices all sequential from zero, can use fileslice # This gives more efficient volume by volume loading, for example with ImageOpener(self.file_like) as fileobj: - raw_data = fileslice(fileobj, slicer, self._shape, self._dtype, 0, - 'F') + return fileslice(fileobj, slicer, self._shape, self._dtype, 0, 'F') + + def _get_scaled(self, dtype, slicer): + raw_data = self._get_unscaled(slicer) + if self._slice_scaling is None: + if dtype is None: + return raw_data + final_type = np.promote_types(raw_data.dtype, dtype) + return raw_data.astype(final_type, copy=False) + # Broadcast scaling to shape of original data - slopes, inters = self._slice_scaling fake_data = strided_scalar(self._shape) - _, slopes, inters = np.broadcast_arrays(fake_data, slopes, inters) + _, slopes, inters = np.broadcast_arrays(fake_data, *self._slice_scaling) + + final_type = np.result_type(raw_data, slopes, inters) + if dtype is not None: + final_type = np.promote_types(final_type, dtype) + # Slice scaling to give output shape - return raw_data * slopes[slicer] + inters[slicer] + return raw_data * slopes[slicer].astype(final_type) + inters[slicer].astype(final_type) + + + def get_unscaled(self): + """ Read data from file + + This is an optional part of the proxy API + """ + return self._get_unscaled(slicer=()) + + def __array__(self, dtype=None): + """ Read data from file and apply scaling, casting to ``dtype`` + + If ``dtype`` is unspecified, the dtype of the returned array is the + narrowest dtype that can represent the data without overflow. + Generally, it is the wider of the dtypes of the slopes or intercepts. + + Parameters + ---------- + dtype : numpy dtype specifier, optional + A numpy dtype specifier specifying the type of the returned array. + + Returns + ------- + array + Scaled image data with type `dtype`. + """ + arr = self._get_scaled(dtype=dtype, slicer=()) + if dtype is not None: + arr = arr.astype(dtype, copy=False) + return arr + + def __getitem__(self, slicer): + return self._get_scaled(dtype=None, slicer=slicer) class PARRECHeader(SpatialHeader): @@ -1239,8 +1271,7 @@ class PARRECImage(SpatialImage): ImageArrayProxy = PARRECArrayProxy @classmethod - @kw_only_meth(1) - def from_file_map(klass, file_map, mmap=True, permit_truncated=False, + def from_file_map(klass, file_map, *, mmap=True, permit_truncated=False, scaling='dv', strict_sort=False): """ Create PARREC image from file map `file_map` @@ -1280,8 +1311,7 @@ def from_file_map(klass, file_map, mmap=True, permit_truncated=False, file_map=file_map) @classmethod - @kw_only_meth(1) - def from_filename(klass, filename, mmap=True, permit_truncated=False, + def from_filename(klass, filename, *, mmap=True, permit_truncated=False, scaling='dv', strict_sort=False): """ Create PARREC image from filename `filename` diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index bc58c3bdc9..7be15315d2 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -1,31 +1,76 @@ -import os import sys -import subprocess -try: - from ConfigParser import RawConfigParser as ConfigParser -except ImportError: - from configparser import RawConfigParser as ConfigParser # python 3 +from packaging.version import Version +from . import _version -COMMIT_INFO_FNAME = 'COMMIT_INFO.txt' +__version__ = _version.get_versions()['version'] -def pkg_commit_hash(pkg_path): - ''' Get short form of commit hash given directory `pkg_path` +def _cmp(a, b): + """ Implementation of ``cmp`` for Python 3 + """ + return (a > b) - (a < b) - There should be a file called 'COMMIT_INFO.txt' in `pkg_path`. This is a - file in INI file format, with at least one section: ``commit hash``, and - two variables ``archive_subst_hash`` and ``install_hash``. The first has a - substitution pattern in it which may have been filled by the execution of - ``git archive`` if this is an archive generated that way. The second is - filled in by the installation, if the installation is from a git archive. - We get the commit hash from (in order of preference): +def cmp_pkg_version(version_str, pkg_version_str=__version__): + """ Compare ``version_str`` to current package version - * A substituted value in ``archive_subst_hash`` - * A written commit hash value in ``install_hash` - * git's output, if we are in a git repository + This comparator follows `PEP-440`_ conventions for determining version + ordering. - If all these fail, we return a not-found placeholder tuple + To be valid, a version must have a numerical major version. It may be + optionally followed by a dot and a numerical minor version, which may, + in turn, optionally be followed by a dot and a numerical micro version, + and / or by an "extra" string. + The extra string may further contain a "+". Any value to the left of a "+" + labels the version as pre-release, while values to the right indicate a + post-release relative to the values to the left. That is, + ``1.2.0+1`` is post-release for ``1.2.0``, while ``1.2.0rc1+1`` is + post-release for ``1.2.0rc1`` and pre-release for ``1.2.0``. + + Parameters + ---------- + version_str : str + Version string to compare to current package version + pkg_version_str : str, optional + Version of our package. Optional, set fom ``__version__`` by default. + + Returns + ------- + version_cmp : int + 1 if `version_str` is a later version than `pkg_version_str`, 0 if + same, -1 if earlier. + + Examples + -------- + >>> cmp_pkg_version('1.2.1', '1.2.0') + 1 + >>> cmp_pkg_version('1.2.0dev', '1.2.0') + -1 + >>> cmp_pkg_version('1.2.0dev', '1.2.0rc1') + -1 + >>> cmp_pkg_version('1.2.0rc1', '1.2.0') + -1 + >>> cmp_pkg_version('1.2.0rc1+1', '1.2.0rc1') + 1 + >>> cmp_pkg_version('1.2.0rc1+1', '1.2.0') + -1 + >>> cmp_pkg_version('1.2.0.post1', '1.2.0') + 1 + + .. _`PEP-440`: https://www.python.org/dev/peps/pep-0440/ + """ + return _cmp(Version(version_str), Version(pkg_version_str)) + + +def pkg_commit_hash(pkg_path=None): + ''' Get short form of commit hash + + Versioneer placed a ``_version.py`` file in the package directory. This file + gets updated on installation or ``git archive``. + We inspect the contents of ``_version`` to detect whether we are in a + repository, an archive of the repository, or an installed package. + + If detection fails, we return a not-found placeholder tuple Parameters ---------- @@ -39,27 +84,17 @@ def pkg_commit_hash(pkg_path): hash_str : str short form of hash ''' - # Try and get commit from written commit text file - pth = os.path.join(pkg_path, COMMIT_INFO_FNAME) - if not os.path.isfile(pth): - raise IOError('Missing commit info file %s' % pth) - cfg_parser = ConfigParser() - cfg_parser.read(pth) - archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash') - if not archive_subst.startswith('$Format'): # it has been substituted - return 'archive substitution', archive_subst - install_subst = cfg_parser.get('commit hash', 'install_hash') - if install_subst != '': - return 'installation', install_subst - # maybe we are in a repository - proc = subprocess.Popen('git rev-parse --short HEAD', - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - cwd=pkg_path, shell=True) - repo_commit, _ = proc.communicate() - if repo_commit: - return 'repository', repo_commit.strip() - return '(none found)', '' + versions = _version.get_versions() + hash_str = versions['full-revisionid'][:7] + if hasattr(_version, 'version_json'): + hash_from = 'installation' + elif not _version.get_keywords()['full'].startswith('$Format:'): + hash_from = 'archive substitution' + elif versions['version'] == '0+unknown': + hash_from, hash_str = '(none found)', '' + else: + hash_from = 'repository' + return hash_from, hash_str def get_pkg_info(pkg_path): @@ -75,7 +110,7 @@ def get_pkg_info(pkg_path): context : dict with named parameters of interest ''' - src, hsh = pkg_commit_hash(pkg_path) + src, hsh = pkg_commit_hash() import numpy return dict( pkg_path=pkg_path, diff --git a/nibabel/processing.py b/nibabel/processing.py index cf9f60c76c..0c5f921d87 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -14,7 +14,6 @@ Smoothing and resampling routines need scipy """ -from __future__ import print_function, division, absolute_import import numpy as np import numpy.linalg as npl @@ -234,7 +233,7 @@ def resample_to_output(in_img, # looks like when resampled into world coordinates if n_dim < 3: # Expand image to 3D, make voxel sizes match new_shape = in_shape + (1,) * (3 - n_dim) - data = in_img.get_data().reshape(new_shape) # 2D data should be small + data = np.asanyarray(in_img.dataobj).reshape(new_shape) # 2D data should be small in_img = out_class(data, in_img.affine, in_img.header) if voxel_sizes is not None and len(voxel_sizes) == n_dim: # Need to pad out voxel sizes to match new image dimensions diff --git a/nibabel/py3k.py b/nibabel/py3k.py index bd55158d30..02dd1f16e7 100644 --- a/nibabel/py3k.py +++ b/nibabel/py3k.py @@ -1,88 +1,9 @@ -""" -Python 3 compatibility tools. +import warnings -Copied from numpy/compat/py3k. +warnings.warn("We no longer carry a copy of the 'py3k' module in nibabel; " + "Please import from the 'numpy.compat.py3k' module directly. " + "Full removal scheduled for nibabel 4.0.", + FutureWarning, + stacklevel=2) -Please prefer the routines in the six module when possible. - -BSD license -""" - -__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', - 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', - 'asstr', 'open_latin1', 'StringIO', 'BytesIO'] - -import sys - -if sys.version_info[0] >= 3: - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - bytes = bytes - unicode = str - asunicode = str - - def asbytes(s): - if isinstance(s, bytes): - return s - return s.encode('latin1') - - def asstr(s): - if isinstance(s, str): - return s - return s.decode('latin1') - - def isfileobj(f): - return isinstance(f, io.FileIO) - - def open_latin1(filename, mode='r'): - return open(filename, mode=mode, encoding='iso-8859-1') - strchar = 'U' - ints2bytes = lambda seq: bytes(seq) - ZEROB = bytes([0]) - FileNotFoundError = FileNotFoundError - import builtins -else: - import StringIO - StringIO = BytesIO = StringIO.StringIO - bytes = str - unicode = unicode - asbytes = str - asstr = str - strchar = 'S' - - def isfileobj(f): - return isinstance(f, file) - - def asunicode(s): - if isinstance(s, unicode): - return s - return s.decode('ascii') - - def open_latin1(filename, mode='r'): - return open(filename, mode=mode) - ints2bytes = lambda seq: ''.join(chr(i) for i in seq) - ZEROB = chr(0) - - class FileNotFoundError(IOError): - pass - - import __builtin__ as builtins # noqa - - -def getexception(): - return sys.exc_info()[1] - - -def asbytes_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asbytes_nested(y) for y in x] - else: - return asbytes(x) - - -def asunicode_nested(x): - if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): - return [asunicode_nested(y) for y in x] - else: - return asunicode(x) +from numpy.compat.py3k import * # noqa diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index 7a8658cf47..9cca2a293d 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -12,21 +12,25 @@ else None; * tag_for_keyword : ``tag_for_keyword`` function if pydicom or dicom module is importable else None; + +A test decorator is available in nibabel.nicom.tests: + * dicom_test : test decorator that skips test if dicom not available. + +A deprecated copy is available here for backward compatibility. """ # Module has (apparently) unused imports; stop flake8 complaining # flake8: noqa import numpy as np +from .deprecated import deprecate_with_version have_dicom = True -pydicom = read_file = tag_for_keyword = None +pydicom = read_file = tag_for_keyword = Sequence = None try: import dicom as pydicom - # Values not imported by default - import dicom.values except ImportError: try: import pydicom @@ -34,9 +38,13 @@ have_dicom = False else: # pydicom module available from pydicom.dicomio import read_file + from pydicom.sequence import Sequence # Values not imported by default import pydicom.values else: # dicom module available + # Values not imported by default + import dicom.values + from dicom.sequence import Sequence read_file = pydicom.read_file if have_dicom: @@ -48,6 +56,9 @@ tag_for_keyword = pydicom.datadict.tag_for_name -# test decorator that skips test if dicom not available. -dicom_test = np.testing.dec.skipif(not have_dicom, - 'could not import dicom or pydicom') +@deprecate_with_version("dicom_test has been moved to nibabel.nicom.tests", + since="3.1", until="5.0") +def dicom_test(func): + # Import locally to avoid circular dependency + from .nicom.tests import dicom_test + return dicom_test(func) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index f9318a93f2..adc2367238 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -27,7 +27,6 @@ import math import numpy as np -from . import setup_test # noqa MAX_FLOAT = np.maximum_sctype(np.float) FLOAT_EPS = np.finfo(np.float).eps diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index 6a330174b1..d0bdb655b0 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -2,7 +2,6 @@ * Make ReST table given array of values """ -from __future__ import division import numpy as np diff --git a/nibabel/spaces.py b/nibabel/spaces.py index 393a8a216f..094f43dc77 100644 --- a/nibabel/spaces.py +++ b/nibabel/spaces.py @@ -112,7 +112,7 @@ def slice2volume(index, axis, shape=None): and then use ``whole_aff`` in ``scipy.ndimage.affine_transform``: rzs, trans = to_matvec(whole_aff) - data = img2.get_data() + data = img2.get_fdata() new_slice = scipy.ndimage.affine_transform(data, rzs, trans, slice_shape) Parameters diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index ede0820065..fd2795e96a 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -20,7 +20,8 @@ methods: - * .get_data() + * .get_fdata() + * .get_data() (deprecated, use get_fdata() instead) * .get_affine() (deprecated, use affine property instead) * .get_header() (deprecated, use header property instead) * .to_filename(fname) - writes data to filename(s) derived from @@ -69,7 +70,7 @@ You can get the data out again with:: - img.get_data() + img.get_fdata() Less commonly, for some image types that support it, you might want to fetch out the unscaled array via the object containing the data:: @@ -123,12 +124,12 @@ >>> img.to_file_map() >>> # read it back again from the written files >>> img2 = nib.AnalyzeImage.from_file_map(file_map) - >>> np.all(img2.get_data() == data) + >>> np.all(img2.get_fdata(dtype=np.float32) == data) True >>> # write, read it again >>> img2.to_file_map() >>> img3 = nib.AnalyzeImage.from_file_map(file_map) - >>> np.all(img3.get_data() == data) + >>> np.all(img3.get_fdata(dtype=np.float32) == data) True ''' @@ -586,7 +587,7 @@ def __getitem__(self, idx): "Cannot slice image objects; consider using `img.slicer[slice]` " "to generate a sliced image (see documentation for caveats) or " "slicing image array data with `img.dataobj[slice]` or " - "`img.get_data()[slice]`") + "`img.get_fdata()[slice]`") def orthoview(self): """Plot the image using OrthoSlicer3D @@ -630,7 +631,7 @@ def as_reoriented(self, ornt): if np.array_equal(ornt, [[0, 1], [1, 1], [2, 1]]): return self - t_arr = apply_orientation(self.get_data(), ornt) + t_arr = apply_orientation(np.asanyarray(self.dataobj), ornt) new_aff = self.affine.dot(inv_ornt_aff(ornt, self.shape)) return self.__class__(t_arr, new_aff, self.header) diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 40ae4f44b8..2ae780ebde 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -16,7 +16,6 @@ from .batteryrunners import Report from . import analyze # module import -from .keywordonly import kw_only_meth from .optpkg import optional_package have_scipy = optional_package('scipy')[1] @@ -244,9 +243,12 @@ class Spm99AnalyzeImage(analyze.AnalyzeImage): rw = have_scipy @classmethod - @kw_only_meth(1) - def from_file_map(klass, file_map, mmap=True, keep_file_open=None): - '''class method to create image from mapping in `file_map `` + def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): + ''' Class method to create image from mapping in ``file_map`` + + .. deprecated:: 2.4.1 + ``keep_file_open='auto'`` is redundant with `False` and has + been deprecated. It raises an error as of nibabel 3.0. Parameters ---------- @@ -261,18 +263,14 @@ def from_file_map(klass, file_map, mmap=True, keep_file_open=None): `mmap` value of True gives the same behavior as ``mmap='c'``. If image data file cannot be memory-mapped, ignore `mmap` value and read array from file. - keep_file_open : { None, 'auto', True, False }, optional, keyword only + keep_file_open : { None, True, False }, optional, keyword only `keep_file_open` controls whether a new file handle is created every time the image is accessed, or a single file handle is created and used for the lifetime of this ``ArrayProxy``. If ``True``, a single file handle is created and used. If ``False``, - a new file handle is created every time the image is accessed. If - ``'auto'``, and the optional ``indexed_gzip`` dependency is - present, a single file handle is created and persisted. If - ``indexed_gzip`` is not available, behaviour is the same as if - ``keep_file_open is False``. If ``file_map`` refers to an open - file handle, this setting has no effect. The default value - (``None``) will result in the value of + a new file handle is created every time the image is accessed. + If ``file_map`` refers to an open file handle, this setting has no + effect. The default value (``None``) will result in the value of ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. Returns diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 84d810367e..102ad8fd01 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -2,7 +2,6 @@ """ import os import warnings -from six import string_types from .header import Field from .array_sequence import ArraySequence @@ -57,7 +56,7 @@ def detect_format(fileobj): except IOError: pass - if isinstance(fileobj, string_types): + if isinstance(fileobj, str): _, ext = os.path.splitext(fileobj) return FORMATS.get(ext.lower()) @@ -97,7 +96,7 @@ def load(fileobj, lazy_load=False): def save(tractogram, filename, **kwargs): - """ Saves a tractogram to a file. + r""" Saves a tractogram to a file. Parameters ---------- diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index 5e6df6bf26..71b4bcb3be 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -1,4 +1,3 @@ -from __future__ import division import numbers from operator import mul @@ -6,6 +5,8 @@ import numpy as np +from ..deprecated import deprecate_with_version + MEGABYTE = 1024 * 1024 @@ -54,6 +55,37 @@ def update_seq(self, arr_seq): arr_seq._lengths = np.array(self.lengths) +def _define_operators(cls): + """ Decorator which adds support for some Python operators. """ + def _wrap(cls, op, inplace=False, unary=False): + + def fn_unary_op(self): + return self._op(op) + + def fn_binary_op(self, value): + return self._op(op, value, inplace=inplace) + + setattr(cls, op, fn_unary_op if unary else fn_binary_op) + fn = getattr(cls, op) + fn.__name__ = op + fn.__doc__ = getattr(np.ndarray, op).__doc__ + + for op in ["__add__", "__sub__", "__mul__", "__mod__", "__pow__", + "__floordiv__", "__truediv__", "__lshift__", "__rshift__", + "__or__", "__and__", "__xor__"]: + _wrap(cls, op=op, inplace=False) + _wrap(cls, op="__i{}__".format(op.strip("_")), inplace=True) + + for op in ["__eq__", "__ne__", "__lt__", "__le__", "__gt__", "__ge__"]: + _wrap(cls, op) + + for op in ["__neg__", "__abs__", "__invert__"]: + _wrap(cls, op, unary=True) + + return cls + + +@_define_operators class ArraySequence(object): """ Sequence of ndarrays having variable first dimension sizes. @@ -117,9 +149,42 @@ def total_nb_rows(self): return np.sum(self._lengths) @property + @deprecate_with_version("'ArraySequence.data' property is deprecated.\n" + "Please use the 'ArraySequence.get_data()' method instead", + '3.0', '4.0') def data(self): """ Elements in this array sequence. """ - return self._data + view = self._data.view() + view.setflags(write=False) + return view + + def get_data(self): + """ Returns a *copy* of the elements in this array sequence. + + Notes + ----- + To modify the data on this array sequence, one can use + in-place mathematical operators (e.g., `seq += ...`) or the use + assignment operator (i.e, `seq[...] = value`). + """ + return self.copy()._data + + def _check_shape(self, arrseq): + """ Check whether this array sequence is compatible with another. """ + msg = "cannot perform operation - array sequences have different" + if len(self._lengths) != len(arrseq._lengths): + msg += " lengths: {} vs. {}." + raise ValueError(msg.format(len(self._lengths), len(arrseq._lengths))) + + if self.total_nb_rows != arrseq.total_nb_rows: + msg += " amount of data: {} vs. {}." + raise ValueError(msg.format(self.total_nb_rows, arrseq.total_nb_rows)) + + if self.common_shape != arrseq.common_shape: + msg += " common shape: {} vs. {}." + raise ValueError(msg.format(self.common_shape, arrseq.common_shape)) + + return True def _get_next_offset(self): """ Offset in ``self._data`` at which to write next rowelement """ @@ -321,7 +386,7 @@ def __getitem__(self, idx): seq._lengths = self._lengths[off_idx] return seq - if isinstance(off_idx, list) or is_ndarray_of_int_or_bool(off_idx): + if isinstance(off_idx, (list, range)) or is_ndarray_of_int_or_bool(off_idx): # Fancy indexing seq._offsets = self._offsets[off_idx] seq._lengths = self._lengths[off_idx] @@ -330,6 +395,116 @@ def __getitem__(self, idx): raise TypeError("Index must be either an int, a slice, a list of int" " or a ndarray of bool! Not " + str(type(idx))) + def __setitem__(self, idx, elements): + """ Set sequence(s) through standard or advanced numpy indexing. + + Parameters + ---------- + idx : int or slice or list or ndarray + If int, index of the element to retrieve. + If slice, use slicing to retrieve elements. + If list, indices of the elements to retrieve. + If ndarray with dtype int, indices of the elements to retrieve. + If ndarray with dtype bool, only retrieve selected elements. + elements: ndarray or :class:`ArraySequence` + Data that will overwrite selected sequences. + If `idx` is an int, `elements` is expected to be a ndarray. + Otherwise, `elements` is expected a :class:`ArraySequence` object. + """ + if isinstance(idx, (numbers.Integral, np.integer)): + start = self._offsets[idx] + self._data[start:start + self._lengths[idx]] = elements + return + + if isinstance(idx, tuple): + off_idx = idx[0] + data = self._data.__getitem__((slice(None),) + idx[1:]) + else: + off_idx = idx + data = self._data + + if isinstance(off_idx, slice): # Standard list slicing + offsets = self._offsets[off_idx] + lengths = self._lengths[off_idx] + + elif isinstance(off_idx, (list, range)) or is_ndarray_of_int_or_bool(off_idx): + # Fancy indexing + offsets = self._offsets[off_idx] + lengths = self._lengths[off_idx] + + else: + raise TypeError("Index must be either an int, a slice, a list of int" + " or a ndarray of bool! Not " + str(type(idx))) + + if is_array_sequence(elements): + if len(lengths) != len(elements): + msg = "Trying to set {} sequences with {} sequences." + raise ValueError(msg.format(len(lengths), len(elements))) + + if sum(lengths) != elements.total_nb_rows: + msg = "Trying to set {} points with {} points." + raise ValueError(msg.format(sum(lengths), elements.total_nb_rows)) + + for o1, l1, o2, l2 in zip(offsets, lengths, elements._offsets, elements._lengths): + data[o1:o1 + l1] = elements._data[o2:o2 + l2] + + elif isinstance(elements, numbers.Number): + for o1, l1 in zip(offsets, lengths): + data[o1:o1 + l1] = elements + + else: # Try to iterate over it. + for o1, l1, element in zip(offsets, lengths, elements): + data[o1:o1 + l1] = element + + def _op(self, op, value=None, inplace=False): + """ Applies some operator to this arraysequence. + + This handles both unary and binary operators with a scalar or another + array sequence. Operations are performed directly on the underlying + data, or a copy of it, which depends on the value of `inplace`. + + Parameters + ---------- + op : str + Name of the Python operator (e.g., `"__add__"`). + value : scalar or :class:`ArraySequence`, optional + If None, the operator is assumed to be unary. + Otherwise, that value is used in the binary operation. + inplace: bool, optional + If False, the operation is done on a copy of this array sequence. + Otherwise, this array sequence gets modified directly. + """ + seq = self if inplace else self.copy() + + if is_array_sequence(value) and seq._check_shape(value): + elements = zip(seq._offsets, seq._lengths, + self._offsets, self._lengths, + value._offsets, value._lengths) + + # Change seq.dtype to match the operation resulting type. + o0, l0, o1, l1, o2, l2 = next(elements) + tmp = getattr(self._data[o1:o1 + l1], op)(value._data[o2:o2 + l2]) + seq._data = seq._data.astype(tmp.dtype) + seq._data[o0:o0 + l0] = tmp + + for o0, l0, o1, l1, o2, l2 in elements: + seq._data[o0:o0 + l0] = getattr(self._data[o1:o1 + l1], op)(value._data[o2:o2 + l2]) + + else: + args = [] if value is None else [value] # Dealing with unary and binary ops. + elements = zip(seq._offsets, seq._lengths, self._offsets, self._lengths) + + # Change seq.dtype to match the operation resulting type. + o0, l0, o1, l1 = next(elements) + tmp = getattr(self._data[o1:o1 + l1], op)(*args) + seq._data = seq._data.astype(tmp.dtype) + seq._data[o0:o0 + l0] = tmp + + for o0, l0, o1, l1 in elements: + seq._data[o0:o0 + l0] = getattr(self._data[o1:o1 + l1], op)(*args) + + return seq + def __iter__(self): if len(self._lengths) != len(self._offsets): raise ValueError("ArraySequence object corrupted:" @@ -372,7 +547,7 @@ def load(cls, filename): return seq -def create_arraysequences_from_generator(gen, n): +def create_arraysequences_from_generator(gen, n, buffer_sizes=None): """ Creates :class:`ArraySequence` objects from a generator yielding tuples Parameters @@ -382,8 +557,13 @@ def create_arraysequences_from_generator(gen, n): array sequences. n : int Number of :class:`ArraySequences` object to create. + buffer_sizes : list of float, optional + Sizes (in Mb) for each ArraySequence's buffer. """ - seqs = [ArraySequence() for _ in range(n)] + if buffer_sizes is None: + buffer_sizes = [4] * n + + seqs = [ArraySequence(buffer_size=size) for size in buffer_sizes] for data in gen: for i, seq in enumerate(seqs): if data[i].nbytes > 0: diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 9b1888ebba..5decf9e831 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -3,15 +3,14 @@ TCK format is defined at http://mrtrix.readthedocs.io/en/latest/getting_started/image_data.html?highlight=format#tracks-file-format-tck """ -from __future__ import division import os import warnings import numpy as np +from numpy.compat.py3k import asbytes, asstr from nibabel.openers import Opener -from nibabel.py3k import asbytes, asstr from .array_sequence import ArraySequence from .tractogram_file import TractogramFile @@ -31,9 +30,9 @@ class TckFile(TractogramFile): ----- MRtrix (so its file format: TCK) considers streamlines coordinates to be in world space (RAS+ and mm space). MRtrix refers to that space - as the "real" or "scanner" space [1]_. + as the "real" or "scanner" space [#]_. - Moreover, when streamlines are mapped back to voxel space [2]_, a + Moreover, when streamlines are mapped back to voxel space [#]_, a streamline point located at an integer coordinate (i,j,k) is considered to be at the center of the corresponding voxel. This is in contrast with TRK's internal convention where it would have referred to a corner. @@ -41,10 +40,8 @@ class TckFile(TractogramFile): NiBabel's streamlines internal representation follows the same convention as MRtrix. - References - ---------- - [1] http://www.nitrc.org/pipermail/mrtrix-discussion/2014-January/000859.html - [2] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space + .. [#] http://www.nitrc.org/pipermail/mrtrix-discussion/2014-January/000859.html + .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ # Constants MAGIC_NUMBER = "mrtrix tracks" diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index 33421f45c7..06e19248f4 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -5,8 +5,8 @@ import itertools import numpy as np -from nose.tools import assert_equal, assert_raises, assert_true -from nibabel.testing import assert_arrays_equal +import pytest +from ...testing import assert_arrays_equal from numpy.testing import assert_array_equal from ..array_sequence import ArraySequence, is_array_sequence, concatenate @@ -15,7 +15,7 @@ SEQ_DATA = {} -def setup(): +def setup_module(): global SEQ_DATA rng = np.random.RandomState(42) SEQ_DATA['rng'] = rng @@ -24,28 +24,29 @@ def setup(): def generate_data(nb_arrays, common_shape, rng): - data = [rng.rand(*(rng.randint(3, 20),) + common_shape) + data = [rng.rand(*(rng.randint(3, 20),) + common_shape) * 100 for _ in range(nb_arrays)] return data def check_empty_arr_seq(seq): - assert_equal(len(seq), 0) - assert_equal(len(seq._offsets), 0) - assert_equal(len(seq._lengths), 0) + assert len(seq) == 0 + assert len(seq._offsets) == 0 + assert len(seq._lengths) == 0 # assert_equal(seq._data.ndim, 0) - assert_equal(seq._data.ndim, 1) - assert_true(seq.common_shape == ()) + assert seq._data.ndim == 1 + assert seq.common_shape == () def check_arr_seq(seq, arrays): lengths = list(map(len, arrays)) - assert_true(is_array_sequence(seq)) - assert_equal(len(seq), len(arrays)) - assert_equal(len(seq._offsets), len(arrays)) - assert_equal(len(seq._lengths), len(arrays)) - assert_equal(seq._data.shape[1:], arrays[0].shape[1:]) - assert_equal(seq.common_shape, arrays[0].shape[1:]) + assert is_array_sequence(seq) + assert len(seq) == len(arrays) + assert len(seq._offsets) == len(arrays) + assert len(seq._lengths) == len(arrays) + assert seq._data.shape[1:] == arrays[0].shape[1:] + assert seq.common_shape == arrays[0].shape[1:] + assert_arrays_equal(seq, arrays) # If seq is a view, then order of internal data is not guaranteed. @@ -54,18 +55,20 @@ def check_arr_seq(seq, arrays): assert_array_equal(sorted(seq._lengths), sorted(lengths)) else: seq.shrink_data() - assert_equal(seq._data.shape[0], sum(lengths)) + + assert seq._data.shape[0] == sum(lengths) + assert_array_equal(seq._data, np.concatenate(arrays, axis=0)) assert_array_equal(seq._offsets, np.r_[0, np.cumsum(lengths)[:-1]]) assert_array_equal(seq._lengths, lengths) def check_arr_seq_view(seq_view, seq): - assert_true(seq_view._is_view) - assert_true(seq_view is not seq) - assert_true(np.may_share_memory(seq_view._data, seq._data)) - assert_true(seq_view._offsets is not seq._offsets) - assert_true(seq_view._lengths is not seq._lengths) + assert seq_view._is_view + assert seq_view is not seq + assert np.may_share_memory(seq_view._data, seq._data) + assert seq_view._offsets is not seq._offsets + assert seq_view._lengths is not seq._lengths class TestArraySequence(unittest.TestCase): @@ -97,8 +100,8 @@ def test_creating_arraysequence_from_generator(self): seq_with_buffer = ArraySequence(gen_2, buffer_size=256) # Check buffer size effect - assert_equal(seq_with_buffer.data.shape, seq.data.shape) - assert_true(seq_with_buffer._buffer_size > seq._buffer_size) + assert seq_with_buffer.data.shape == seq.data.shape + assert seq_with_buffer._buffer_size > seq._buffer_size # Check generator result check_arr_seq(seq, SEQ_DATA['data']) @@ -121,26 +124,27 @@ def test_arraysequence_iter(self): # Try iterating through a corrupted ArraySequence object. seq = SEQ_DATA['seq'].copy() seq._lengths = seq._lengths[::2] - assert_raises(ValueError, list, seq) + with pytest.raises(ValueError): + list(seq) def test_arraysequence_copy(self): orig = SEQ_DATA['seq'] seq = orig.copy() n_rows = seq.total_nb_rows - assert_equal(n_rows, orig.total_nb_rows) + assert n_rows == orig.total_nb_rows assert_array_equal(seq._data, orig._data[:n_rows]) - assert_true(seq._data is not orig._data) + assert seq._data is not orig._data assert_array_equal(seq._offsets, orig._offsets) - assert_true(seq._offsets is not orig._offsets) + assert seq._offsets is not orig._offsets assert_array_equal(seq._lengths, orig._lengths) - assert_true(seq._lengths is not orig._lengths) - assert_equal(seq.common_shape, orig.common_shape) + assert seq._lengths is not orig._lengths + assert seq.common_shape == orig.common_shape # Taking a copy of an `ArraySequence` generated by slicing. # Only keep needed data. seq = orig[::2].copy() check_arr_seq(seq, SEQ_DATA['data'][::2]) - assert_true(seq._data is not orig._data) + assert seq._data is not orig._data def test_arraysequence_append(self): element = generate_data(nb_arrays=1, @@ -171,7 +175,8 @@ def test_arraysequence_append(self): element = generate_data(nb_arrays=1, common_shape=SEQ_DATA['seq'].common_shape*2, rng=SEQ_DATA['rng'])[0] - assert_raises(ValueError, seq.append, element) + with pytest.raises(ValueError): + seq.append(element) def test_arraysequence_extend(self): new_data = generate_data(nb_arrays=10, @@ -217,7 +222,8 @@ def test_arraysequence_extend(self): common_shape=SEQ_DATA['seq'].common_shape*2, rng=SEQ_DATA['rng']) seq = SEQ_DATA['seq'].copy() # Copy because of in-place modification. - assert_raises(ValueError, seq.extend, data) + with pytest.raises(ValueError): + seq.extend(data) # Extend after extracting some slice working_slice = seq[:2] @@ -228,9 +234,6 @@ def test_arraysequence_getitem(self): for i, e in enumerate(SEQ_DATA['seq']): assert_array_equal(SEQ_DATA['seq'][i], e) - if sys.version_info < (3,): - assert_array_equal(SEQ_DATA['seq'][long(i)], e) - # Get all items using indexing (creates a view). indices = list(range(len(SEQ_DATA['seq']))) seq_view = SEQ_DATA['seq'][indices] @@ -265,7 +268,8 @@ def test_arraysequence_getitem(self): for i, keep in enumerate(selection) if keep]) # Test invalid indexing - assert_raises(TypeError, SEQ_DATA['seq'].__getitem__, 'abc') + with pytest.raises(TypeError): + SEQ_DATA['seq']['abc'] # Get specific columns. seq_view = SEQ_DATA['seq'][:, 2] @@ -277,6 +281,180 @@ def test_arraysequence_getitem(self): check_arr_seq_view(seq_view, SEQ_DATA['seq']) check_arr_seq(seq_view, [d[:, 2] for d in SEQ_DATA['data'][::-2]]) + def test_arraysequence_setitem(self): + # Set one item + seq = SEQ_DATA['seq'] * 0 + for i, e in enumerate(SEQ_DATA['seq']): + seq[i] = e + + check_arr_seq(seq, SEQ_DATA['seq']) + + # Setitem with a scalar. + seq = SEQ_DATA['seq'].copy() + seq[:] = 0 + assert seq._data.sum() == 0 + + # Setitem with a list of ndarray. + seq = SEQ_DATA['seq'] * 0 + seq[:] = SEQ_DATA['data'] + check_arr_seq(seq, SEQ_DATA['data']) + + # Setitem using tuple indexing. + seq = ArraySequence(np.arange(900).reshape((50,6,3))) + seq[:, 0] = 0 + assert seq._data[:, 0].sum() == 0 + + # Setitem using tuple indexing. + seq = ArraySequence(np.arange(900).reshape((50,6,3))) + seq[range(len(seq))] = 0 + assert seq._data.sum() == 0 + + # Setitem of a slice using another slice. + seq = ArraySequence(np.arange(900).reshape((50,6,3))) + seq[0:4] = seq[5:9] + check_arr_seq(seq[0:4], seq[5:9]) + + # Setitem between array sequences with different number of sequences. + seq = ArraySequence(np.arange(900).reshape((50,6,3))) + with pytest.raises(ValueError): + seq[0:4] = seq[5:10] + + # Setitem between array sequences with different amount of points. + seq1 = ArraySequence(np.arange(10).reshape(5, 2)) + seq2 = ArraySequence(np.arange(15).reshape(5, 3)) + with pytest.raises(ValueError): + seq1[0:5] = seq2 + + # Setitem between array sequences with different common shape. + seq1 = ArraySequence(np.arange(12).reshape(2, 2, 3)) + seq2 = ArraySequence(np.arange(8).reshape(2, 2, 2)) + + with pytest.raises(ValueError): + seq1[0:2] = seq2 + + # Invalid index. + with pytest.raises(TypeError): + seq[object()] = None + + def test_arraysequence_operators(self): + # Disable division per zero warnings. + flags = np.seterr(divide='ignore', invalid='ignore') + SCALARS = [42, 0.5, True, -3, 0] + CMP_OPS = ["__eq__", "__ne__", "__lt__", "__le__", "__gt__", "__ge__"] + + seq = SEQ_DATA['seq'].copy() + seq_int = SEQ_DATA['seq'].copy() + seq_int._data = seq_int._data.astype(int) + seq_bool = SEQ_DATA['seq'].copy() > 30 + + ARRSEQS = [seq, seq_int, seq_bool] + VIEWS = [seq[::2], seq_int[::2], seq_bool[::2]] + + def _test_unary(op, arrseq): + orig = arrseq.copy() + seq = getattr(orig, op)() + assert seq is not orig + check_arr_seq(seq, [getattr(d, op)() for d in orig]) + + def _test_binary(op, arrseq, scalars, seqs, inplace=False): + for scalar in scalars: + orig = arrseq.copy() + seq = getattr(orig, op)(scalar) + assert (seq is orig) == inplace + + check_arr_seq(seq, [getattr(e, op)(scalar) for e in arrseq]) + + # Test math operators with another ArraySequence. + for other in seqs: + orig = arrseq.copy() + seq = getattr(orig, op)(other) + assert seq is not SEQ_DATA['seq'] + check_arr_seq(seq, [getattr(e1, op)(e2) for e1, e2 in zip(arrseq, other)]) + + # Operations between array sequences of different lengths. + orig = arrseq.copy() + with pytest.raises(ValueError): + getattr(orig, op)(orig[::2]) + + # Operations between array sequences with different amount of data. + seq1 = ArraySequence(np.arange(10).reshape(5, 2)) + seq2 = ArraySequence(np.arange(15).reshape(5, 3)) + with pytest.raises(ValueError): + getattr(seq1, op)(seq2) + + # Operations between array sequences with different common shape. + seq1 = ArraySequence(np.arange(12).reshape(2, 2, 3)) + seq2 = ArraySequence(np.arange(8).reshape(2, 2, 2)) + with pytest.raises(ValueError): + getattr(seq1, op)(seq2) + + + + for op in ["__add__", "__sub__", "__mul__", "__mod__", + "__floordiv__", "__truediv__"] + CMP_OPS: + _test_binary(op, seq, SCALARS, ARRSEQS) + _test_binary(op, seq_int, SCALARS, ARRSEQS) + + # Test math operators with ArraySequence views. + _test_binary(op, seq[::2], SCALARS, VIEWS) + _test_binary(op, seq_int[::2], SCALARS, VIEWS) + + if op in CMP_OPS: + continue + + op = "__i{}__".format(op.strip("_")) + _test_binary(op, seq, SCALARS, ARRSEQS, inplace=True) + + if op == "__itruediv__": + continue # Going to deal with it separately. + + _test_binary(op, seq_int, [42, -3, True, 0], [seq_int, seq_bool, -seq_int], inplace=True) # int <-- int + + with pytest.raises(TypeError): + _test_binary(op, seq_int, [0.5], [], inplace=True) # int <-- float + with pytest.raises(TypeError): + _test_binary(op, seq_int, [], [seq], inplace=True) # int <-- float + + + # __pow__ : Integers to negative integer powers are not allowed. + _test_binary("__pow__", seq, [42, -3, True, 0], [seq_int, seq_bool, -seq_int]) + _test_binary("__ipow__", seq, [42, -3, True, 0], [seq_int, seq_bool, -seq_int], inplace=True) + + with pytest.raises(ValueError): + _test_binary("__pow__", seq_int, [-3], []) + with pytest.raises(ValueError): + _test_binary("__ipow__", seq_int, [-3], [], inplace=True) + + # __itruediv__ is only valid with float arrseq. + for scalar in SCALARS + ARRSEQS: + seq_int_cp = seq_int.copy() + with pytest.raises(TypeError): + seq_int_cp /= scalar + + # Bitwise operators + for op in ("__lshift__", "__rshift__", "__or__", "__and__", "__xor__"): + _test_binary(op, seq_bool, [42, -3, True, 0], [seq_int, seq_bool, -seq_int]) + + with pytest.raises(TypeError): + _test_binary(op, seq_bool, [0.5], []) + with pytest.raises(TypeError): + _test_binary(op, seq, [], [seq]) + + # Unary operators + for op in ["__neg__", "__abs__"]: + _test_unary(op, seq) + _test_unary(op, -seq) + _test_unary(op, seq_int) + _test_unary(op, -seq_int) + + _test_unary("__abs__", seq_bool) + _test_unary("__invert__", seq_bool) + with pytest.raises(TypeError): + _test_unary("__invert__", seq) + + # Restore flags. + np.seterr(**flags) + def test_arraysequence_repr(self): # Test that calling repr on a ArraySequence object is not falling. repr(SEQ_DATA['seq']) @@ -292,7 +470,7 @@ def test_arraysequence_repr(self): txt1 = repr(seq) np.set_printoptions(threshold=nb_arrays//2) txt2 = repr(seq) - assert_true(len(txt2) < len(txt1)) + assert len(txt2) < len(txt1) np.set_printoptions(threshold=bkp_threshold) def test_save_and_load_arraysequence(self): @@ -319,6 +497,15 @@ def test_save_and_load_arraysequence(self): # Make sure we can add new elements to it. loaded_seq.append(SEQ_DATA['data'][0]) + def test_get_data(self): + seq_view = SEQ_DATA['seq'][::2] + check_arr_seq_view(seq_view, SEQ_DATA['seq']) + + # We make sure the array sequence data does not + # contain more elements than it is supposed to. + data = seq_view.get_data() + assert len(data) < len(seq_view._data) + def test_concatenate(): seq = SEQ_DATA['seq'].copy() # In case there is in-place modification. @@ -326,10 +513,10 @@ def test_concatenate(): new_seq = concatenate(seqs, axis=1) seq._data += 100 # Modifying the 'seq' shouldn't change 'new_seq'. check_arr_seq(new_seq, SEQ_DATA['data']) - assert_true(not new_seq._is_view) + assert new_seq._is_view is not True seq = SEQ_DATA['seq'] seqs = [seq[:, [i]] for i in range(seq.common_shape[0])] new_seq = concatenate(seqs, axis=0) - assert_true(len(new_seq), seq.common_shape[0] * len(seq)) + assert len(new_seq) == seq.common_shape[0] * len(seq) assert_array_equal(new_seq._data, seq._data.T.reshape((-1, 1))) diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index 90a18f5acf..9a2f803117 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -8,11 +8,9 @@ import nibabel as nib from io import BytesIO from nibabel.tmpdirs import InTemporaryDirectory -from nibabel.py3k import asbytes +from numpy.compat.py3k import asbytes -from nibabel.testing import data_path -from nibabel.testing import clear_and_catch_warnings -from nose.tools import assert_equal, assert_raises, assert_true, assert_false +from nibabel.testing import data_path, clear_and_catch_warnings from .test_tractogram import assert_tractogram_equal from ..tractogram import Tractogram, LazyTractogram @@ -82,50 +80,50 @@ def test_is_supported_detect_format(): # Test is_supported and detect_format functions # Empty file/string f = BytesIO() - assert_false(nib.streamlines.is_supported(f)) - assert_false(nib.streamlines.is_supported("")) - assert_true(nib.streamlines.detect_format(f) is None) - assert_true(nib.streamlines.detect_format("") is None) + assert not nib.streamlines.is_supported(f) + assert not nib.streamlines.is_supported("") + assert nib.streamlines.detect_format(f) is None + assert nib.streamlines.detect_format("") is None # Valid file without extension for tfile_cls in FORMATS.values(): f = BytesIO() f.write(asbytes(tfile_cls.MAGIC_NUMBER)) f.seek(0, os.SEEK_SET) - assert_true(nib.streamlines.is_supported(f)) - assert_true(nib.streamlines.detect_format(f) is tfile_cls) + assert nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) is tfile_cls # Wrong extension but right magic number for tfile_cls in FORMATS.values(): with tempfile.TemporaryFile(mode="w+b", suffix=".txt") as f: f.write(asbytes(tfile_cls.MAGIC_NUMBER)) f.seek(0, os.SEEK_SET) - assert_true(nib.streamlines.is_supported(f)) - assert_true(nib.streamlines.detect_format(f) is tfile_cls) + assert nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) is tfile_cls # Good extension but wrong magic number for ext, tfile_cls in FORMATS.items(): with tempfile.TemporaryFile(mode="w+b", suffix=ext) as f: f.write(b"pass") f.seek(0, os.SEEK_SET) - assert_false(nib.streamlines.is_supported(f)) - assert_true(nib.streamlines.detect_format(f) is None) + assert not nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) is None # Wrong extension, string only f = "my_tractogram.asd" - assert_false(nib.streamlines.is_supported(f)) - assert_true(nib.streamlines.detect_format(f) is None) + assert not nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) is None # Good extension, string only for ext, tfile_cls in FORMATS.items(): f = "my_tractogram" + ext - assert_true(nib.streamlines.is_supported(f)) - assert_equal(nib.streamlines.detect_format(f), tfile_cls) + assert nib.streamlines.is_supported(f) + assert nib.streamlines.detect_format(f) == tfile_cls # Extension should not be case-sensitive. for ext, tfile_cls in FORMATS.items(): f = "my_tractogram" + ext.upper() - assert_true(nib.streamlines.detect_format(f) is tfile_cls) + assert nib.streamlines.detect_format(f) is tfile_cls class TestLoadSave(unittest.TestCase): @@ -135,12 +133,12 @@ def test_load_empty_file(self): for empty_filename in DATA['empty_filenames']: tfile = nib.streamlines.load(empty_filename, lazy_load=lazy_load) - assert_true(isinstance(tfile, TractogramFile)) + assert isinstance(tfile, TractogramFile) if lazy_load: - assert_true(type(tfile.tractogram), Tractogram) + assert type(tfile.tractogram), Tractogram else: - assert_true(type(tfile.tractogram), LazyTractogram) + assert type(tfile.tractogram), LazyTractogram assert_tractogram_equal(tfile.tractogram, DATA['empty_tractogram']) @@ -150,12 +148,12 @@ def test_load_simple_file(self): for simple_filename in DATA['simple_filenames']: tfile = nib.streamlines.load(simple_filename, lazy_load=lazy_load) - assert_true(isinstance(tfile, TractogramFile)) + assert isinstance(tfile, TractogramFile) if lazy_load: - assert_true(type(tfile.tractogram), Tractogram) + assert type(tfile.tractogram), Tractogram else: - assert_true(type(tfile.tractogram), LazyTractogram) + assert type(tfile.tractogram), LazyTractogram assert_tractogram_equal(tfile.tractogram, DATA['simple_tractogram']) @@ -165,12 +163,12 @@ def test_load_complex_file(self): for complex_filename in DATA['complex_filenames']: tfile = nib.streamlines.load(complex_filename, lazy_load=lazy_load) - assert_true(isinstance(tfile, TractogramFile)) + assert isinstance(tfile, TractogramFile) if lazy_load: - assert_true(type(tfile.tractogram), Tractogram) + assert type(tfile.tractogram), Tractogram else: - assert_true(type(tfile.tractogram), LazyTractogram) + assert type(tfile.tractogram), LazyTractogram tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) @@ -191,19 +189,19 @@ def test_save_tractogram_file(self): trk_file = trk.TrkFile(tractogram) # No need for keyword arguments. - assert_raises(ValueError, nib.streamlines.save, - trk_file, "dummy.trk", header={}) + with self.assertRaises(ValueError): + nib.streamlines.save(trk_file, "dummy.trk", header={}) # Wrong extension. with clear_and_catch_warnings(record=True, modules=[nib.streamlines]) as w: trk_file = trk.TrkFile(tractogram) - assert_raises(ValueError, nib.streamlines.save, - trk_file, "dummy.tck", header={}) + with self.assertRaises(ValueError): + nib.streamlines.save(trk_file, "dummy.tck", header={}) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, ExtensionWarning)) - assert_true("extension" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, ExtensionWarning) + assert "extension" in str(w[0].message) with InTemporaryDirectory(): nib.streamlines.save(trk_file, "dummy.trk") @@ -250,9 +248,9 @@ def test_save_complex_file(self): ((not cls.SUPPORTS_DATA_PER_POINT) + (not cls.SUPPORTS_DATA_PER_STREAMLINE)) - assert_equal(len(w), nb_expected_warnings) + assert len(w) == nb_expected_warnings for i in range(nb_expected_warnings): - assert_true(issubclass(w[i].category, Warning)) + assert issubclass(w[i].category, Warning) tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) @@ -267,11 +265,26 @@ def test_save_complex_file(self): tfile = nib.streamlines.load(filename, lazy_load=False) assert_tractogram_equal(tfile.tractogram, tractogram) + def test_save_sliced_tractogram(self): + tractogram = Tractogram(DATA['streamlines'], + affine_to_rasmm=np.eye(4)) + original_tractogram = tractogram.copy() + for ext, cls in FORMATS.items(): + with InTemporaryDirectory(): + filename = 'streamlines' + ext + nib.streamlines.save(tractogram[::2], filename) + tfile = nib.streamlines.load(filename, lazy_load=False) + assert_tractogram_equal(tfile.tractogram, tractogram[::2]) + # Make sure original tractogram hasn't changed. + assert_tractogram_equal(tractogram, original_tractogram) + def test_load_unknown_format(self): - assert_raises(ValueError, nib.streamlines.load, "") + with self.assertRaises(ValueError): + nib.streamlines.load("") def test_save_unknown_format(self): - assert_raises(ValueError, nib.streamlines.save, Tractogram(), "") + with self.assertRaises(ValueError): + nib.streamlines.save(Tractogram(), "") def test_save_from_generator(self): tractogram = Tractogram(DATA['streamlines'], diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index ad16b52754..0dfb043d83 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -14,15 +14,15 @@ from .. import tck as tck_module from ..tck import TckFile -from nose.tools import assert_equal, assert_raises, assert_true +import pytest from numpy.testing import assert_array_equal -from nibabel.testing import data_path, clear_and_catch_warnings +from ...testing import data_path, clear_and_catch_warnings from .test_tractogram import assert_tractogram_equal DATA = {} -def setup(): +def setup_module(): global DATA DATA['empty_tck_fname'] = pjoin(data_path, "empty.tck") @@ -69,8 +69,8 @@ def test_load_matlab_nan_file(self): for lazy_load in [False, True]: tck = TckFile.load(DATA['matlab_nan_tck_fname'], lazy_load=lazy_load) streamlines = list(tck.tractogram.streamlines) - assert_equal(len(streamlines), 1) - assert_equal(streamlines[0].shape, (108, 3)) + assert len(streamlines) == 1 + assert streamlines[0].shape == (108, 3) def test_writeable_data(self): data = DATA['simple_tractogram'] @@ -80,7 +80,7 @@ def test_writeable_data(self): for actual, expected_tgi in zip(tck.streamlines, data): assert_array_equal(actual, expected_tgi.streamline) # Test we can write to arrays - assert_true(actual.flags.writeable) + assert actual.flags.writeable actual[0, 0] = 99 def test_load_simple_file_in_big_endian(self): @@ -88,7 +88,7 @@ def test_load_simple_file_in_big_endian(self): tck = TckFile.load(DATA['simple_tck_big_endian_fname'], lazy_load=lazy_load) assert_tractogram_equal(tck.tractogram, DATA['simple_tractogram']) - assert_equal(tck.header['datatype'], 'Float32BE') + assert tck.header['datatype'] == 'Float32BE' def test_load_file_with_wrong_information(self): tck_file = open(DATA['simple_tck_fname'], 'rb').read() @@ -96,12 +96,15 @@ def test_load_file_with_wrong_information(self): # Simulate a TCK file where `datatype` has not the right endianness. new_tck_file = tck_file.replace(asbytes("Float32LE"), asbytes("Float32BE")) - assert_raises(DataError, TckFile.load, BytesIO(new_tck_file)) + + with pytest.raises(DataError): + TckFile.load(BytesIO(new_tck_file)) # Simulate a TCK file with unsupported `datatype`. new_tck_file = tck_file.replace(asbytes("Float32LE"), asbytes("int32")) - assert_raises(HeaderError, TckFile.load, BytesIO(new_tck_file)) + with pytest.raises(HeaderError): + TckFile.load(BytesIO(new_tck_file)) # Simulate a TCK file with no `datatype` field. new_tck_file = tck_file.replace(b"datatype: Float32LE\n", b"") @@ -109,24 +112,25 @@ def test_load_file_with_wrong_information(self): new_tck_file = new_tck_file.replace(b"file: . 67\n", b"file: . 47\n") with clear_and_catch_warnings(record=True, modules=[tck_module]) as w: tck = TckFile.load(BytesIO(new_tck_file)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("Missing 'datatype'" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "Missing 'datatype'" in str(w[0].message) assert_array_equal(tck.header['datatype'], "Float32LE") # Simulate a TCK file with no `file` field. new_tck_file = tck_file.replace(b"\nfile: . 67", b"") with clear_and_catch_warnings(record=True, modules=[tck_module]) as w: tck = TckFile.load(BytesIO(new_tck_file)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("Missing 'file'" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "Missing 'file'" in str(w[0].message) assert_array_equal(tck.header['file'], ". 56") # Simulate a TCK file with `file` field pointing to another file. new_tck_file = tck_file.replace(b"file: . 67\n", b"file: dummy.mat 75\n") - assert_raises(HeaderError, TckFile.load, BytesIO(new_tck_file)) + with pytest.raises(HeaderError): + TckFile.load(BytesIO(new_tck_file)) # Simulate a TCK file which is missing a streamline delimiter. eos = TckFile.FIBER_DELIMITER.tostring() @@ -137,11 +141,13 @@ def test_load_file_with_wrong_information(self): buffer_size = 1. / 1024**2 # 1 bytes hdr = TckFile._read_header(BytesIO(new_tck_file)) tck_reader = TckFile._read(BytesIO(new_tck_file), hdr, buffer_size) - assert_raises(DataError, list, tck_reader) + with pytest.raises(DataError): + list(tck_reader) # Simulate a TCK file which is missing the end-of-file delimiter. new_tck_file = tck_file[:-len(eof)] - assert_raises(DataError, TckFile.load, BytesIO(new_tck_file)) + with pytest.raises(DataError): + TckFile.load(BytesIO(new_tck_file)) def test_write_empty_file(self): tractogram = Tractogram(affine_to_rasmm=np.eye(4)) @@ -158,8 +164,7 @@ def test_write_empty_file(self): assert_tractogram_equal(new_tck.tractogram, new_tck_orig.tractogram) tck_file.seek(0, os.SEEK_SET) - assert_equal(tck_file.read(), - open(DATA['empty_tck_fname'], 'rb').read()) + assert tck_file.read() == open(DATA['empty_tck_fname'], 'rb').read() def test_write_simple_file(self): tractogram = Tractogram(DATA['streamlines'], @@ -177,17 +182,18 @@ def test_write_simple_file(self): assert_tractogram_equal(new_tck.tractogram, new_tck_orig.tractogram) tck_file.seek(0, os.SEEK_SET) - assert_equal(tck_file.read(), - open(DATA['simple_tck_fname'], 'rb').read()) + assert tck_file.read() == open(DATA['simple_tck_fname'], 'rb').read() # TCK file containing not well formatted entries in its header. tck_file = BytesIO() tck = TckFile(tractogram) tck.header['new_entry'] = 'value\n' # \n not allowed - assert_raises(HeaderError, tck.save, tck_file) + with pytest.raises(HeaderError): + tck.save(tck_file) tck.header['new_entry'] = 'val:ue' # : not allowed - assert_raises(HeaderError, tck.save, tck_file) + with pytest.raises(HeaderError): + tck.save(tck_file) def test_load_write_file(self): for fname in [DATA['empty_tck_fname'], @@ -202,7 +208,7 @@ def test_load_write_file(self): # Check that the written file is the same as the one read. tck_file.seek(0, os.SEEK_SET) - assert_equal(tck_file.read(), open(fname, 'rb').read()) + assert tck_file.read() == open(fname, 'rb').read() # Save tractogram that has an affine_to_rasmm. for lazy_load in [False, True]: diff --git a/nibabel/streamlines/tests/test_tractogram.py b/nibabel/streamlines/tests/test_tractogram.py index 0fe83a26d7..f86594d070 100644 --- a/nibabel/streamlines/tests/test_tractogram.py +++ b/nibabel/streamlines/tests/test_tractogram.py @@ -6,11 +6,9 @@ import operator from collections import defaultdict -from nibabel.testing import assert_arrays_equal -from nibabel.testing import clear_and_catch_warnings -from nose.tools import assert_equal, assert_raises, assert_true +import pytest +from ...testing import assert_arrays_equal, clear_and_catch_warnings from numpy.testing import assert_array_equal, assert_array_almost_equal -from six.moves import zip from .. import tractogram as module_tractogram from ..tractogram import is_data_dict, is_lazy_dict @@ -94,7 +92,7 @@ def make_dummy_streamline(nb_points): return streamline, data_per_point, data_for_streamline -def setup(): +def setup_module(): global DATA DATA['rng'] = np.random.RandomState(1234) @@ -150,13 +148,12 @@ def check_tractogram_item(tractogram_item, assert_array_equal(tractogram_item.streamline, streamline) - assert_equal(len(tractogram_item.data_for_streamline), - len(data_for_streamline)) + assert len(tractogram_item.data_for_streamline) == len(data_for_streamline) for key in data_for_streamline.keys(): assert_array_equal(tractogram_item.data_for_streamline[key], data_for_streamline[key]) - assert_equal(len(tractogram_item.data_for_points), len(data_for_points)) + assert len(tractogram_item.data_for_points) == len(data_for_points) for key in data_for_points.keys(): assert_arrays_equal(tractogram_item.data_for_points[key], data_for_points[key]) @@ -172,16 +169,16 @@ def check_tractogram(tractogram, data_per_streamline={}, data_per_point={}): streamlines = list(streamlines) - assert_equal(len(tractogram), len(streamlines)) + assert len(tractogram) == len(streamlines) assert_arrays_equal(tractogram.streamlines, streamlines) [t for t in tractogram] # Force iteration through tractogram. - assert_equal(len(tractogram.data_per_streamline), len(data_per_streamline)) + assert len(tractogram.data_per_streamline) == len(data_per_streamline) for key in data_per_streamline.keys(): assert_arrays_equal(tractogram.data_per_streamline[key], data_per_streamline[key]) - assert_equal(len(tractogram.data_per_point), len(data_per_point)) + assert len(tractogram.data_per_point) == len(data_per_point) for key in data_per_point.keys(): assert_arrays_equal(tractogram.data_per_point[key], data_per_point[key]) @@ -205,43 +202,43 @@ def test_per_array_dict_creation(self): nb_streamlines = len(DATA['tractogram']) data_per_streamline = DATA['tractogram'].data_per_streamline data_dict = PerArrayDict(nb_streamlines, data_per_streamline) - assert_equal(data_dict.keys(), data_per_streamline.keys()) + assert data_dict.keys() == data_per_streamline.keys() for k in data_dict.keys(): assert_array_equal(data_dict[k], data_per_streamline[k]) del data_dict['mean_curvature'] - assert_equal(len(data_dict), - len(data_per_streamline)-1) + assert len(data_dict) == len(data_per_streamline) - 1 # Create a PerArrayDict object using an existing dict object. data_per_streamline = DATA['data_per_streamline'] data_dict = PerArrayDict(nb_streamlines, data_per_streamline) - assert_equal(data_dict.keys(), data_per_streamline.keys()) + assert data_dict.keys() == data_per_streamline.keys() for k in data_dict.keys(): assert_array_equal(data_dict[k], data_per_streamline[k]) del data_dict['mean_curvature'] - assert_equal(len(data_dict), len(data_per_streamline)-1) + assert len(data_dict) == len(data_per_streamline) - 1 # Create a PerArrayDict object using keyword arguments. data_per_streamline = DATA['data_per_streamline'] data_dict = PerArrayDict(nb_streamlines, **data_per_streamline) - assert_equal(data_dict.keys(), data_per_streamline.keys()) + assert data_dict.keys() == data_per_streamline.keys() for k in data_dict.keys(): assert_array_equal(data_dict[k], data_per_streamline[k]) del data_dict['mean_curvature'] - assert_equal(len(data_dict), len(data_per_streamline)-1) + assert len(data_dict) == len(data_per_streamline) - 1 def test_getitem(self): sdict = PerArrayDict(len(DATA['tractogram']), DATA['data_per_streamline']) - assert_raises(KeyError, sdict.__getitem__, 'invalid') + with pytest.raises(KeyError): + sdict['invalid'] # Test slicing and advanced indexing. for k, v in DATA['tractogram'].data_per_streamline.items(): - assert_true(k in sdict) + assert k in sdict assert_arrays_equal(sdict[k], v) assert_arrays_equal(sdict[::2][k], v[::2]) assert_arrays_equal(sdict[::-1][k], v[::-1]) @@ -259,7 +256,7 @@ def test_extend(self): new_data) sdict.extend(sdict2) - assert_equal(len(sdict), len(sdict2)) + assert len(sdict) == len(sdict2) for k in DATA['tractogram'].data_per_streamline: assert_arrays_equal(sdict[k][:len(DATA['tractogram'])], DATA['tractogram'].data_per_streamline[k]) @@ -279,21 +276,24 @@ def test_extend(self): 'mean_colors': 4 * np.array(DATA['mean_colors']), 'other': 5 * np.array(DATA['mean_colors'])} sdict2 = PerArrayDict(len(DATA['tractogram']), new_data) - assert_raises(ValueError, sdict.extend, sdict2) + with pytest.raises(ValueError): + sdict.extend(sdict2) # Other dict has not the same entries (key mistmached). new_data = {'mean_curvature': 2 * np.array(DATA['mean_curvature']), 'mean_torsion': 3 * np.array(DATA['mean_torsion']), 'other': 4 * np.array(DATA['mean_colors'])} sdict2 = PerArrayDict(len(DATA['tractogram']), new_data) - assert_raises(ValueError, sdict.extend, sdict2) + with pytest.raises(ValueError): + sdict.extend(sdict2) # Other dict has the right number of entries but wrong shape. new_data = {'mean_curvature': 2 * np.array(DATA['mean_curvature']), 'mean_torsion': 3 * np.array(DATA['mean_torsion']), 'mean_colors': 4 * np.array(DATA['mean_torsion'])} sdict2 = PerArrayDict(len(DATA['tractogram']), new_data) - assert_raises(ValueError, sdict.extend, sdict2) + with pytest.raises(ValueError): + sdict.extend(sdict2) class TestPerArraySequenceDict(unittest.TestCase): @@ -304,43 +304,43 @@ def test_per_array_sequence_dict_creation(self): total_nb_rows = DATA['tractogram'].streamlines.total_nb_rows data_per_point = DATA['tractogram'].data_per_point data_dict = PerArraySequenceDict(total_nb_rows, data_per_point) - assert_equal(data_dict.keys(), data_per_point.keys()) + assert data_dict.keys() == data_per_point.keys() for k in data_dict.keys(): assert_arrays_equal(data_dict[k], data_per_point[k]) del data_dict['fa'] - assert_equal(len(data_dict), - len(data_per_point)-1) + assert len(data_dict) == len(data_per_point) - 1 # Create a PerArraySequenceDict object using an existing dict object. data_per_point = DATA['data_per_point'] data_dict = PerArraySequenceDict(total_nb_rows, data_per_point) - assert_equal(data_dict.keys(), data_per_point.keys()) + assert data_dict.keys() == data_per_point.keys() for k in data_dict.keys(): assert_arrays_equal(data_dict[k], data_per_point[k]) del data_dict['fa'] - assert_equal(len(data_dict), len(data_per_point)-1) + assert len(data_dict) == len(data_per_point) - 1 # Create a PerArraySequenceDict object using keyword arguments. data_per_point = DATA['data_per_point'] data_dict = PerArraySequenceDict(total_nb_rows, **data_per_point) - assert_equal(data_dict.keys(), data_per_point.keys()) + assert data_dict.keys() == data_per_point.keys() for k in data_dict.keys(): assert_arrays_equal(data_dict[k], data_per_point[k]) del data_dict['fa'] - assert_equal(len(data_dict), len(data_per_point)-1) + assert len(data_dict) == len(data_per_point) - 1 def test_getitem(self): total_nb_rows = DATA['tractogram'].streamlines.total_nb_rows sdict = PerArraySequenceDict(total_nb_rows, DATA['data_per_point']) - assert_raises(KeyError, sdict.__getitem__, 'invalid') + with pytest.raises(KeyError): + sdict['invalid'] # Test slicing and advanced indexing. for k, v in DATA['tractogram'].data_per_point.items(): - assert_true(k in sdict) + assert k in sdict assert_arrays_equal(sdict[k], v) assert_arrays_equal(sdict[::2][k], v[::2]) assert_arrays_equal(sdict[::-1][k], v[::-1]) @@ -361,7 +361,7 @@ def test_extend(self): sdict2 = PerArraySequenceDict(np.sum(list_nb_points), new_data) sdict.extend(sdict2) - assert_equal(len(sdict), len(sdict2)) + assert len(sdict) == len(sdict2) for k in DATA['tractogram'].data_per_point: assert_arrays_equal(sdict[k][:len(DATA['tractogram'])], DATA['tractogram'].data_per_point[k]) @@ -383,7 +383,8 @@ def test_extend(self): data_per_point_shapes, rng=DATA['rng']) sdict2 = PerArraySequenceDict(np.sum(list_nb_points), new_data) - assert_raises(ValueError, sdict.extend, sdict2) + with pytest.raises(ValueError): + sdict.extend(sdict2) # Other dict has not the same entries (key mistmached). data_per_point_shapes = {"colors": DATA['colors'][0].shape[1:], @@ -392,7 +393,8 @@ def test_extend(self): data_per_point_shapes, rng=DATA['rng']) sdict2 = PerArraySequenceDict(np.sum(list_nb_points), new_data) - assert_raises(ValueError, sdict.extend, sdict2) + with pytest.raises(ValueError): + sdict.extend(sdict2) # Other dict has the right number of entries but wrong shape. data_per_point_shapes = {"colors": DATA['colors'][0].shape[1:], @@ -401,7 +403,8 @@ def test_extend(self): data_per_point_shapes, rng=DATA['rng']) sdict2 = PerArraySequenceDict(np.sum(list_nb_points), new_data) - assert_raises(ValueError, sdict.extend, sdict2) + with pytest.raises(ValueError): + sdict.extend(sdict2) class TestLazyDict(unittest.TestCase): @@ -414,14 +417,13 @@ def test_lazydict_creation(self): expected_keys = DATA['data_per_streamline_func'].keys() for data_dict in lazy_dicts: - assert_true(is_lazy_dict(data_dict)) - assert_equal(data_dict.keys(), expected_keys) + assert is_lazy_dict(data_dict) + assert data_dict.keys() == expected_keys for k in data_dict.keys(): assert_array_equal(list(data_dict[k]), list(DATA['data_per_streamline'][k])) - assert_equal(len(data_dict), - len(DATA['data_per_streamline_func'])) + assert len(data_dict) == len(DATA['data_per_streamline_func']) class TestTractogramItem(unittest.TestCase): @@ -440,7 +442,7 @@ def test_creating_tractogram_item(self): # Create a tractogram item with a streamline, data. t = TractogramItem(streamline, data_for_streamline, data_for_points) - assert_equal(len(t), len(streamline)) + assert len(t) == len(streamline) assert_array_equal(t.streamline, streamline) assert_array_equal(list(t), streamline) assert_array_equal(t.data_for_streamline['mean_curvature'], @@ -457,7 +459,7 @@ def test_tractogram_creation(self): # Create an empty tractogram. tractogram = Tractogram() check_tractogram(tractogram) - assert_true(tractogram.affine_to_rasmm is None) + assert tractogram.affine_to_rasmm is None # Create a tractogram with only streamlines tractogram = Tractogram(streamlines=DATA['streamlines']) @@ -478,8 +480,8 @@ def test_tractogram_creation(self): DATA['data_per_streamline'], DATA['data_per_point']) - assert_true(is_data_dict(tractogram.data_per_streamline)) - assert_true(is_data_dict(tractogram.data_per_point)) + assert is_data_dict(tractogram.data_per_streamline) + assert is_data_dict(tractogram.data_per_point) # Create a tractogram from another tractogram attributes. tractogram2 = Tractogram(tractogram.streamlines, @@ -503,8 +505,8 @@ def test_tractogram_creation(self): [(0, 0, 1)]*5] data_per_point = {'wrong_data': wrong_data} - assert_raises(ValueError, Tractogram, DATA['streamlines'], - data_per_point=data_per_point) + with pytest.raises(ValueError): + Tractogram(streamlines=DATA['streamlines'], data_per_point=data_per_point) # Inconsistent number of scalars between streamlines wrong_data = [[(1, 0, 0)]*1, @@ -512,8 +514,8 @@ def test_tractogram_creation(self): [(0, 0, 1)]*5] data_per_point = {'wrong_data': wrong_data} - assert_raises(ValueError, Tractogram, DATA['streamlines'], - data_per_point=data_per_point) + with pytest.raises(ValueError): + Tractogram(streamlines=DATA['streamlines'], data_per_point=data_per_point) def test_setting_affine_to_rasmm(self): tractogram = DATA['tractogram'].copy() @@ -521,28 +523,25 @@ def test_setting_affine_to_rasmm(self): # Test assigning None. tractogram.affine_to_rasmm = None - assert_true(tractogram.affine_to_rasmm is None) + assert tractogram.affine_to_rasmm is None # Test assigning a valid ndarray (should make a copy). tractogram.affine_to_rasmm = affine - assert_true(tractogram.affine_to_rasmm is not affine) + assert tractogram.affine_to_rasmm is not affine # Test assigning a list of lists. tractogram.affine_to_rasmm = affine.tolist() assert_array_equal(tractogram.affine_to_rasmm, affine) # Test assigning a ndarray with wrong shape. - assert_raises(ValueError, setattr, tractogram, - "affine_to_rasmm", affine[::2]) + with pytest.raises(ValueError): + tractogram.affine_to_rasmm = affine[::2] def test_tractogram_getitem(self): # Retrieve TractogramItem by their index. for i, t in enumerate(DATA['tractogram']): assert_tractogram_item_equal(DATA['tractogram'][i], t) - if sys.version_info < (3,): - assert_tractogram_item_equal(DATA['tractogram'][long(i)], t) - # Get one TractogramItem out of two. tractogram_view = DATA['simple_tractogram'][::2] check_tractogram(tractogram_view, DATA['streamlines'][::2]) @@ -597,21 +596,16 @@ def test_tractogram_copy(self): tractogram = DATA['tractogram'].copy() # Check we copied the data and not simply created new references. - assert_true(tractogram is not DATA['tractogram']) - assert_true(tractogram.streamlines - is not DATA['tractogram'].streamlines) - assert_true(tractogram.data_per_streamline - is not DATA['tractogram'].data_per_streamline) - assert_true(tractogram.data_per_point - is not DATA['tractogram'].data_per_point) + assert tractogram is not DATA['tractogram'] + assert tractogram.streamlines is not DATA['tractogram'].streamlines + assert tractogram.data_per_streamline is not DATA['tractogram'].data_per_streamline + assert tractogram.data_per_point is not DATA['tractogram'].data_per_point for key in tractogram.data_per_streamline: - assert_true(tractogram.data_per_streamline[key] - is not DATA['tractogram'].data_per_streamline[key]) + assert tractogram.data_per_streamline[key] is not DATA['tractogram'].data_per_streamline[key] for key in tractogram.data_per_point: - assert_true(tractogram.data_per_point[key] - is not DATA['tractogram'].data_per_point[key]) + assert tractogram.data_per_point[key] is not DATA['tractogram'].data_per_point[key] # Check the values of the data are the same. assert_tractogram_equal(tractogram, DATA['tractogram']) @@ -622,39 +616,39 @@ def test_creating_invalid_tractogram(self): [(0, 1, 0)]*2, [(0, 0, 1)]*3] # Last streamlines has 5 points. - assert_raises(ValueError, Tractogram, DATA['streamlines'], - data_per_point={'scalars': scalars}) + with pytest.raises(ValueError): + Tractogram(streamlines=DATA['streamlines'], data_per_point={'scalars': scalars}) # Not enough data_per_streamline for all streamlines. properties = [np.array([1.11, 1.22], dtype="f4"), np.array([3.11, 3.22], dtype="f4")] - assert_raises(ValueError, Tractogram, DATA['streamlines'], - data_per_streamline={'properties': properties}) + with pytest.raises(ValueError): + Tractogram(streamlines=DATA['streamlines'], data_per_streamline={'properties': properties}) # Inconsistent dimension for a data_per_point. - scalars = [[(1, 0, 0)]*1, - [(0, 1)]*2, - [(0, 0, 1)]*5] + scalars = [[(1, 0, 0)] * 1, + [(0, 1)] * 2, + [(0, 0, 1)] * 5] - assert_raises(ValueError, Tractogram, DATA['streamlines'], - data_per_point={'scalars': scalars}) + with pytest.raises(ValueError): + Tractogram(streamlines=DATA['streamlines'], data_per_point={'scalars':scalars}) # Inconsistent dimension for a data_per_streamline. properties = [[1.11, 1.22], [2.11], [3.11, 3.22]] - assert_raises(ValueError, Tractogram, DATA['streamlines'], - data_per_streamline={'properties': properties}) + with pytest.raises(ValueError): + Tractogram(streamlines=DATA['streamlines'], data_per_streamline={'properties': properties}) # Too many dimension for a data_per_streamline. properties = [np.array([[1.11], [1.22]], dtype="f4"), np.array([[2.11], [2.22]], dtype="f4"), np.array([[3.11], [3.22]], dtype="f4")] - assert_raises(ValueError, Tractogram, DATA['streamlines'], - data_per_streamline={'properties': properties}) + with pytest.raises(ValueError): + Tractogram(streamlines=DATA['streamlines'], data_per_streamline={'properties': properties}) def test_tractogram_apply_affine(self): tractogram = DATA['tractogram'].copy() @@ -664,7 +658,7 @@ def test_tractogram_apply_affine(self): # Apply the affine to the streamline in a lazy manner. transformed_tractogram = tractogram.apply_affine(affine, lazy=True) - assert_true(type(transformed_tractogram) is LazyTractogram) + assert type(transformed_tractogram) is LazyTractogram check_tractogram(transformed_tractogram, streamlines=[s*scaling for s in DATA['streamlines']], data_per_streamline=DATA['data_per_streamline'], @@ -677,7 +671,7 @@ def test_tractogram_apply_affine(self): # Apply the affine to the streamlines in-place. transformed_tractogram = tractogram.apply_affine(affine) - assert_true(transformed_tractogram is tractogram) + assert transformed_tractogram is tractogram check_tractogram(tractogram, streamlines=[s*scaling for s in DATA['streamlines']], data_per_streamline=DATA['data_per_streamline'], @@ -689,6 +683,22 @@ def test_tractogram_apply_affine(self): np.dot(np.eye(4), np.dot(np.linalg.inv(affine), np.linalg.inv(affine)))) + # Applying the affine to a tractogram that has been indexed or sliced + # shouldn't affect the remaining streamlines. + tractogram = DATA['tractogram'].copy() + transformed_tractogram = tractogram[::2].apply_affine(affine) + assert transformed_tractogram is not tractogram + check_tractogram(tractogram[::2], + streamlines=[s*scaling for s in DATA['streamlines'][::2]], + data_per_streamline=DATA['tractogram'].data_per_streamline[::2], + data_per_point=DATA['tractogram'].data_per_point[::2]) + + # Remaining streamlines should match the original ones. + check_tractogram(tractogram[1::2], + streamlines=DATA['streamlines'][1::2], + data_per_streamline=DATA['tractogram'].data_per_streamline[1::2], + data_per_point=DATA['tractogram'].data_per_point[1::2]) + # Check that applying an affine and its inverse give us back the # original streamlines. tractogram = DATA['tractogram'].copy() @@ -711,7 +721,7 @@ def test_tractogram_apply_affine(self): tractogram = DATA['tractogram'].copy() tractogram.affine_to_rasmm = None tractogram.apply_affine(affine) - assert_true(tractogram.affine_to_rasmm is None) + assert tractogram.affine_to_rasmm is None def test_tractogram_to_world(self): tractogram = DATA['tractogram'].copy() @@ -725,7 +735,7 @@ def test_tractogram_to_world(self): np.linalg.inv(affine)) tractogram_world = transformed_tractogram.to_world(lazy=True) - assert_true(type(tractogram_world) is LazyTractogram) + assert type(tractogram_world) is LazyTractogram assert_array_almost_equal(tractogram_world.affine_to_rasmm, np.eye(4)) for s1, s2 in zip(tractogram_world.streamlines, DATA['streamlines']): @@ -733,14 +743,14 @@ def test_tractogram_to_world(self): # Bring them back streamlines to world space in a in-place manner. tractogram_world = transformed_tractogram.to_world() - assert_true(tractogram_world is tractogram) + assert tractogram_world is tractogram assert_array_almost_equal(tractogram.affine_to_rasmm, np.eye(4)) for s1, s2 in zip(tractogram.streamlines, DATA['streamlines']): assert_array_almost_equal(s1, s2) # Calling to_world twice should do nothing. tractogram_world2 = transformed_tractogram.to_world() - assert_true(tractogram_world2 is tractogram) + assert tractogram_world2 is tractogram assert_array_almost_equal(tractogram.affine_to_rasmm, np.eye(4)) for s1, s2 in zip(tractogram.streamlines, DATA['streamlines']): assert_array_almost_equal(s1, s2) @@ -748,7 +758,8 @@ def test_tractogram_to_world(self): # Calling to_world when affine_to_rasmm is None should fail. tractogram = DATA['tractogram'].copy() tractogram.affine_to_rasmm = None - assert_raises(ValueError, tractogram.to_world) + with pytest.raises(ValueError): + tractogram.to_world() def test_tractogram_extend(self): # Load tractogram that contains some metadata. @@ -758,7 +769,7 @@ def test_tractogram_extend(self): (extender, True)): first_arg = t.copy() new_t = op(first_arg, t) - assert_equal(new_t is first_arg, in_place) + assert (new_t is first_arg) == in_place assert_tractogram_equal(new_t[:len(t)], DATA['tractogram']) assert_tractogram_equal(new_t[len(t):], DATA['tractogram']) @@ -777,7 +788,8 @@ class TestLazyTractogram(unittest.TestCase): def test_lazy_tractogram_creation(self): # To create tractogram from arrays use `Tractogram`. - assert_raises(TypeError, LazyTractogram, DATA['streamlines']) + with pytest.raises(TypeError): + LazyTractogram(streamlines=DATA['streamlines']) # Streamlines and other data as generators streamlines = (x for x in DATA['streamlines']) @@ -788,29 +800,30 @@ def test_lazy_tractogram_creation(self): # Creating LazyTractogram with generators is not allowed as # generators get exhausted and are not reusable unlike generator # function. - assert_raises(TypeError, LazyTractogram, streamlines) - assert_raises(TypeError, LazyTractogram, - data_per_point={"none": None}) - assert_raises(TypeError, LazyTractogram, - data_per_streamline=data_per_streamline) - assert_raises(TypeError, LazyTractogram, DATA['streamlines'], - data_per_point=data_per_point) + with pytest.raises(TypeError): + LazyTractogram(streamlines=streamlines) + with pytest.raises(TypeError): + LazyTractogram(data_per_point={"none": None}) + with pytest.raises(TypeError): + LazyTractogram(data_per_streamline=data_per_streamline) + with pytest.raises(TypeError): + LazyTractogram(streamlines=DATA['streamlines'], data_per_point=data_per_point) # Empty `LazyTractogram` tractogram = LazyTractogram() check_tractogram(tractogram) - assert_true(tractogram.affine_to_rasmm is None) + assert tractogram.affine_to_rasmm is None # Create tractogram with streamlines and other data tractogram = LazyTractogram(DATA['streamlines_func'], DATA['data_per_streamline_func'], DATA['data_per_point_func']) - assert_true(is_lazy_dict(tractogram.data_per_streamline)) - assert_true(is_lazy_dict(tractogram.data_per_point)) + assert is_lazy_dict(tractogram.data_per_streamline) + assert is_lazy_dict(tractogram.data_per_point) [t for t in tractogram] # Force iteration through tractogram. - assert_equal(len(tractogram), len(DATA['streamlines'])) + assert len(tractogram) == len(DATA['streamlines']) # Generator functions get re-called and creates new iterators. for i in range(2): @@ -842,18 +855,20 @@ def _data_gen(): assert_tractogram_equal(tractogram, DATA['tractogram']) # Creating a LazyTractogram from not a corouting should raise an error. - assert_raises(TypeError, LazyTractogram.from_data_func, _data_gen()) + with pytest.raises(TypeError): + LazyTractogram.from_data_func(_data_gen()) def test_lazy_tractogram_getitem(self): - assert_raises(NotImplementedError, - DATA['lazy_tractogram'].__getitem__, 0) + with pytest.raises(NotImplementedError): + DATA['lazy_tractogram'][0] def test_lazy_tractogram_extend(self): t = DATA['lazy_tractogram'].copy() new_t = DATA['lazy_tractogram'].copy() for op in (operator.add, operator.iadd, extender): - assert_raises(NotImplementedError, op, new_t, t) + with pytest.raises(NotImplementedError): + op(new_t, t) def test_lazy_tractogram_len(self): modules = [module_tractogram] # Modules for which to catch warnings. @@ -862,35 +877,35 @@ def test_lazy_tractogram_len(self): # Calling `len` will create new generators each time. tractogram = LazyTractogram(DATA['streamlines_func']) - assert_true(tractogram._nb_streamlines is None) + assert tractogram._nb_streamlines is None # This should produce a warning message. - assert_equal(len(tractogram), len(DATA['streamlines'])) - assert_equal(tractogram._nb_streamlines, len(DATA['streamlines'])) - assert_equal(len(w), 1) + assert len(tractogram) == len(DATA['streamlines']) + assert tractogram._nb_streamlines == len(DATA['streamlines']) + assert len(w) == 1 tractogram = LazyTractogram(DATA['streamlines_func']) # New instances should still produce a warning message. - assert_equal(len(tractogram), len(DATA['streamlines'])) - assert_equal(len(w), 2) - assert_true(issubclass(w[-1].category, Warning)) + assert len(tractogram) == len(DATA['streamlines']) + assert len(w) == 2 + assert issubclass(w[-1].category, Warning) is True # Calling again 'len' again should *not* produce a warning. - assert_equal(len(tractogram), len(DATA['streamlines'])) - assert_equal(len(w), 2) + assert len(tractogram) == len(DATA['streamlines']) + assert len(w) == 2 with clear_and_catch_warnings(record=True, modules=modules) as w: # Once we iterated through the tractogram, we know the length. tractogram = LazyTractogram(DATA['streamlines_func']) - assert_true(tractogram._nb_streamlines is None) + assert tractogram._nb_streamlines is None [t for t in tractogram] # Force iteration through tractogram. - assert_equal(tractogram._nb_streamlines, len(DATA['streamlines'])) + assert tractogram._nb_streamlines == len(DATA['streamlines']) # This should *not* produce a warning. - assert_equal(len(tractogram), len(DATA['streamlines'])) - assert_equal(len(w), 0) + assert len(tractogram) == len(DATA['streamlines']) + assert len(w) == 0 def test_lazy_tractogram_apply_affine(self): affine = np.eye(4) @@ -900,7 +915,7 @@ def test_lazy_tractogram_apply_affine(self): tractogram = DATA['lazy_tractogram'].copy() transformed_tractogram = tractogram.apply_affine(affine) - assert_true(transformed_tractogram is not tractogram) + assert transformed_tractogram is not tractogram assert_array_equal(tractogram._affine_to_apply, np.eye(4)) assert_array_equal(tractogram.affine_to_rasmm, np.eye(4)) assert_array_equal(transformed_tractogram._affine_to_apply, affine) @@ -922,14 +937,15 @@ def test_lazy_tractogram_apply_affine(self): # Calling to_world when affine_to_rasmm is None should fail. tractogram = DATA['lazy_tractogram'].copy() tractogram.affine_to_rasmm = None - assert_raises(ValueError, tractogram.to_world) + with pytest.raises(ValueError): + tractogram.to_world() # But calling apply_affine when affine_to_rasmm is None should work. tractogram = DATA['lazy_tractogram'].copy() tractogram.affine_to_rasmm = None transformed_tractogram = tractogram.apply_affine(affine) assert_array_equal(transformed_tractogram._affine_to_apply, affine) - assert_true(transformed_tractogram.affine_to_rasmm is None) + assert transformed_tractogram.affine_to_rasmm is None check_tractogram(transformed_tractogram, streamlines=[s*scaling for s in DATA['streamlines']], data_per_streamline=DATA['data_per_streamline'], @@ -937,8 +953,9 @@ def test_lazy_tractogram_apply_affine(self): # Calling apply_affine with lazy=False should fail for LazyTractogram. tractogram = DATA['lazy_tractogram'].copy() - assert_raises(ValueError, tractogram.apply_affine, - affine=np.eye(4), lazy=False) + with pytest.raises(ValueError): + tractogram.apply_affine(affine=np.eye(4), lazy=False) + def test_tractogram_to_world(self): tractogram = DATA['lazy_tractogram'].copy() @@ -952,7 +969,7 @@ def test_tractogram_to_world(self): np.linalg.inv(affine)) tractogram_world = transformed_tractogram.to_world() - assert_true(tractogram_world is not transformed_tractogram) + assert tractogram_world is not transformed_tractogram assert_array_almost_equal(tractogram_world.affine_to_rasmm, np.eye(4)) for s1, s2 in zip(tractogram_world.streamlines, DATA['streamlines']): @@ -967,40 +984,37 @@ def test_tractogram_to_world(self): # Calling to_world when affine_to_rasmm is None should fail. tractogram = DATA['lazy_tractogram'].copy() tractogram.affine_to_rasmm = None - assert_raises(ValueError, tractogram.to_world) + with pytest.raises(ValueError): + tractogram.to_world() def test_lazy_tractogram_copy(self): # Create a copy of the lazy tractogram. tractogram = DATA['lazy_tractogram'].copy() # Check we copied the data and not simply created new references. - assert_true(tractogram is not DATA['lazy_tractogram']) + assert tractogram is not DATA['lazy_tractogram'] # When copying LazyTractogram, the generator function yielding # streamlines should stay the same. - assert_true(tractogram._streamlines - is DATA['lazy_tractogram']._streamlines) + assert tractogram._streamlines is DATA['lazy_tractogram']._streamlines # Copying LazyTractogram, creates new internal LazyDict objects, # but generator functions contained in it should stay the same. - assert_true(tractogram._data_per_streamline - is not DATA['lazy_tractogram']._data_per_streamline) - assert_true(tractogram._data_per_point - is not DATA['lazy_tractogram']._data_per_point) + assert tractogram._data_per_streamline is not DATA['lazy_tractogram']._data_per_streamline + assert tractogram._data_per_point is not DATA['lazy_tractogram']._data_per_point for key in tractogram.data_per_streamline: data = tractogram.data_per_streamline.store[key] expected = DATA['lazy_tractogram'].data_per_streamline.store[key] - assert_true(data is expected) + assert data is expected for key in tractogram.data_per_point: data = tractogram.data_per_point.store[key] expected = DATA['lazy_tractogram'].data_per_point.store[key] - assert_true(data is expected) + assert data is expected # The affine should be a copy. - assert_true(tractogram._affine_to_apply - is not DATA['lazy_tractogram']._affine_to_apply) + assert tractogram._affine_to_apply is not DATA['lazy_tractogram']._affine_to_apply assert_array_equal(tractogram._affine_to_apply, DATA['lazy_tractogram']._affine_to_apply) diff --git a/nibabel/streamlines/tests/test_tractogram_file.py b/nibabel/streamlines/tests/test_tractogram_file.py index da5bce4b3f..2550ecf03d 100644 --- a/nibabel/streamlines/tests/test_tractogram_file.py +++ b/nibabel/streamlines/tests/test_tractogram_file.py @@ -4,7 +4,7 @@ from ..tractogram import Tractogram from ..tractogram_file import TractogramFile -from nose.tools import assert_raises, assert_equal +import pytest def test_subclassing_tractogram_file(): @@ -23,7 +23,8 @@ def load(cls, fileobj, lazy_load=True): def create_empty_header(cls): return None - assert_raises(TypeError, DummyTractogramFile, Tractogram()) + with pytest.raises(TypeError): + DummyTractogramFile(Tractogram()) # Missing 'load' method class DummyTractogramFile(TractogramFile): @@ -38,7 +39,8 @@ def save(self, fileobj): def create_empty_header(cls): return None - assert_raises(TypeError, DummyTractogramFile, Tractogram()) + with pytest.raises(TypeError): + DummyTractogramFile(Tractogram()) # Now we have everything required. class DummyTractogramFile(TractogramFile): @@ -57,12 +59,14 @@ def save(self, fileobj): dtf = DummyTractogramFile(Tractogram()) # Default create_empty_header is empty dict - assert_equal(dtf.header, {}) + assert dtf.header == {} def test_tractogram_file(): - assert_raises(NotImplementedError, TractogramFile.is_correct_format, "") - assert_raises(NotImplementedError, TractogramFile.load, "") + with pytest.raises(NotImplementedError): + TractogramFile.is_correct_format("") + with pytest.raises(NotImplementedError): + TractogramFile.load("") # Testing calling the 'save' method of `TractogramFile` object. class DummyTractogramFile(TractogramFile): @@ -78,6 +82,5 @@ def load(cls, fileobj, lazy_load=True): def save(self, fileobj): pass - assert_raises(NotImplementedError, - super(DummyTractogramFile, - DummyTractogramFile(Tractogram)).save, "") + with pytest.raises(NotImplementedError): + super(DummyTractogramFile, DummyTractogramFile(Tractogram)).save("") diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py index a0a3d8a1f3..8fb35fc368 100644 --- a/nibabel/streamlines/tests/test_trk.py +++ b/nibabel/streamlines/tests/test_trk.py @@ -7,9 +7,8 @@ from io import BytesIO -from nibabel.testing import data_path -from nibabel.testing import clear_and_catch_warnings, assert_arr_dict_equal -from nose.tools import assert_equal, assert_raises, assert_true +import pytest +from ...testing import data_path, clear_and_catch_warnings, assert_arr_dict_equal from numpy.testing import assert_array_equal from .test_tractogram import assert_tractogram_equal @@ -23,7 +22,7 @@ DATA = {} -def setup(): +def setup_module(): global DATA DATA['empty_trk_fname'] = pjoin(data_path, "empty.trk") @@ -132,45 +131,51 @@ def test_load_file_with_wrong_information(self): trk_struct[Field.VOXEL_TO_RASMM] = np.zeros((4, 4)) with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: trk = TrkFile.load(BytesIO(trk_bytes)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("identity" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "identity" in str(w[0].message) assert_array_equal(trk.affine, np.eye(4)) # Simulate a TRK where `vox_to_ras` is invalid. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct[Field.VOXEL_TO_RASMM] = np.diag([0, 0, 0, 1]) with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) # Simulate a TRK file where `voxel_order` was not provided. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct[Field.VOXEL_ORDER] = b'' with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: TrkFile.load(BytesIO(trk_bytes)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("LPS" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "LPS" in str(w[0].message) # Simulate a TRK file with an unsupported version. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct['version'] = 123 - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) + # Simulate a TRK file with a wrong hdr_size. trk_struct, trk_bytes = self.trk_with_bytes() trk_struct['hdr_size'] = 1234 - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) # Simulate a TRK file with a wrong scalar_name. trk_struct, trk_bytes = self.trk_with_bytes('complex_trk_fname') trk_struct['scalar_name'][0, 0] = b'colors\x003\x004' - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) # Simulate a TRK file with a wrong property_name. trk_struct, trk_bytes = self.trk_with_bytes('complex_trk_fname') trk_struct['property_name'][0, 0] = b'colors\x003\x004' - assert_raises(HeaderError, TrkFile.load, BytesIO(trk_bytes)) + with pytest.raises(HeaderError): + TrkFile.load(BytesIO(trk_bytes)) def test_load_trk_version_1(self): # Simulate and test a TRK (version 1). @@ -183,9 +188,9 @@ def test_load_trk_version_1(self): trk_struct['version'] = 1 with clear_and_catch_warnings(record=True, modules=[trk_module]) as w: trk = TrkFile.load(BytesIO(trk_bytes)) - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, HeaderWarning)) - assert_true("identity" in str(w[0].message)) + assert len(w) == 1 + assert issubclass(w[0].category, HeaderWarning) + assert "identity" in str(w[0].message) assert_array_equal(trk.affine, np.eye(4)) assert_array_equal(trk.header['version'], 1) @@ -195,8 +200,8 @@ def test_load_complex_file_in_big_endian(self): # We use hdr_size as an indicator of little vs big endian. good_orders = '>' if sys.byteorder == 'little' else '>=' hdr_size = trk_struct['hdr_size'] - assert_true(hdr_size.dtype.byteorder in good_orders) - assert_equal(hdr_size, 1000) + assert hdr_size.dtype.byteorder in good_orders + assert hdr_size == 1000 for lazy_load in [False, True]: trk = TrkFile.load(DATA['complex_trk_big_endian_fname'], @@ -205,7 +210,7 @@ def test_load_complex_file_in_big_endian(self): def test_tractogram_file_properties(self): trk = TrkFile.load(DATA['simple_trk_fname']) - assert_equal(trk.streamlines, trk.tractogram.streamlines) + assert trk.streamlines == trk.tractogram.streamlines assert_array_equal(trk.affine, trk.header[Field.VOXEL_TO_RASMM]) def test_write_empty_file(self): @@ -223,8 +228,7 @@ def test_write_empty_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['empty_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['empty_trk_fname'], 'rb').read() def test_write_simple_file(self): tractogram = Tractogram(DATA['streamlines'], @@ -242,8 +246,7 @@ def test_write_simple_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['simple_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['simple_trk_fname'], 'rb').read() def test_write_complex_file(self): # With scalars @@ -292,8 +295,7 @@ def test_write_complex_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['complex_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['complex_trk_fname'], 'rb').read() def test_load_write_file(self): for fname in [DATA['empty_trk_fname'], @@ -328,8 +330,7 @@ def test_load_write_LPS_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['standard_LPS_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['standard_LPS_trk_fname'], 'rb').read() # Test writing a file where the header is missing the # Field.VOXEL_ORDER. @@ -352,8 +353,7 @@ def test_load_write_LPS_file(self): assert_tractogram_equal(new_trk.tractogram, new_trk_orig.tractogram) trk_file.seek(0, os.SEEK_SET) - assert_equal(trk_file.read(), - open(DATA['standard_LPS_trk_fname'], 'rb').read()) + assert trk_file.read() == open(DATA['standard_LPS_trk_fname'], 'rb').read() def test_write_optional_header_fields(self): # The TRK file format doesn't support additional header fields. @@ -367,7 +367,7 @@ def test_write_optional_header_fields(self): trk_file.seek(0, os.SEEK_SET) new_trk = TrkFile.load(trk_file) - assert_true("extra" not in new_trk.header) + assert "extra" not in new_trk.header def test_write_too_many_scalars_and_properties(self): # TRK supports up to 10 data_per_point. @@ -395,7 +395,8 @@ def test_write_too_many_scalars_and_properties(self): affine_to_rasmm=np.eye(4)) trk = TrkFile(tractogram) - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) # TRK supports up to 10 data_per_streamline. data_per_streamline = {} @@ -421,7 +422,8 @@ def test_write_too_many_scalars_and_properties(self): data_per_streamline=data_per_streamline) trk = TrkFile(tractogram) - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) def test_write_scalars_and_properties_name_too_long(self): # TRK supports data_per_point name up to 20 characters. @@ -437,7 +439,8 @@ def test_write_scalars_and_properties_name_too_long(self): trk = TrkFile(tractogram) if nb_chars > 18: - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) else: trk.save(BytesIO()) @@ -448,7 +451,8 @@ def test_write_scalars_and_properties_name_too_long(self): trk = TrkFile(tractogram) if nb_chars > 20: - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) else: trk.save(BytesIO()) @@ -465,7 +469,8 @@ def test_write_scalars_and_properties_name_too_long(self): trk = TrkFile(tractogram) if nb_chars > 18: - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) else: trk.save(BytesIO()) @@ -476,7 +481,8 @@ def test_write_scalars_and_properties_name_too_long(self): trk = TrkFile(tractogram) if nb_chars > 20: - assert_raises(ValueError, trk.save, BytesIO()) + with pytest.raises(ValueError): + trk.save(BytesIO()) else: trk.save(BytesIO()) @@ -498,32 +504,37 @@ def test_header_read_restore(self): hdr_from_fname['_offset_data'] += hdr_pos # Correct for start position assert_arr_dict_equal(TrkFile._read_header(bio), hdr_from_fname) # Check fileobject file position has not changed - assert_equal(bio.tell(), hdr_pos) + assert bio.tell() == hdr_pos def test_encode_names(): # Test function for encoding numbers into property names b0 = b'\x00' - assert_equal(encode_value_in_name(0, 'foo', 10), - b'foo' + b0 * 7) - assert_equal(encode_value_in_name(1, 'foo', 10), - b'foo' + b0 * 7) - assert_equal(encode_value_in_name(8, 'foo', 10), - b'foo' + b0 + b'8' + b0 * 5) - assert_equal(encode_value_in_name(40, 'foobar', 10), - b'foobar' + b0 + b'40' + b0) - assert_equal(encode_value_in_name(1, 'foobarbazz', 10), b'foobarbazz') - assert_raises(ValueError, encode_value_in_name, 1, 'foobarbazzz', 10) - assert_raises(ValueError, encode_value_in_name, 2, 'foobarbaz', 10) - assert_equal(encode_value_in_name(2, 'foobarba', 10), b'foobarba\x002') + assert encode_value_in_name(0, 'foo', 10) == b'foo' + b0 * 7 + assert encode_value_in_name(1, 'foo', 10) == b'foo' + b0 * 7 + assert encode_value_in_name(8, 'foo', 10) == b'foo' + b0 + b'8' + b0 * 5 + assert encode_value_in_name(40, 'foobar', 10) == b'foobar' + b0 + b'40' + b0 + assert encode_value_in_name(1, 'foobarbazz', 10) == b'foobarbazz' + + with pytest.raises(ValueError): + encode_value_in_name(1, 'foobarbazzz', 10) + + with pytest.raises(ValueError): + encode_value_in_name(2, 'foobarbazzz', 10) + + assert encode_value_in_name(2, 'foobarba', 10) == b'foobarba\x002' def test_decode_names(): # Test function for decoding name string into name, number b0 = b'\x00' - assert_equal(decode_value_from_name(b''), ('', 0)) - assert_equal(decode_value_from_name(b'foo' + b0 * 7), ('foo', 1)) - assert_equal(decode_value_from_name(b'foo\x008' + b0 * 5), ('foo', 8)) - assert_equal(decode_value_from_name(b'foobar\x0010\x00'), ('foobar', 10)) - assert_raises(ValueError, decode_value_from_name, b'foobar\x0010\x01') - assert_raises(HeaderError, decode_value_from_name, b'foo\x0010\x00111') + assert decode_value_from_name(b'') == ('', 0) + assert decode_value_from_name(b'foo' + b0 * 7) == ('foo', 1) + assert decode_value_from_name(b'foo\x008' + b0 * 5) == ('foo', 8) + assert decode_value_from_name(b'foobar\x0010\x00') == ('foobar', 10) + + with pytest.raises(ValueError): + decode_value_from_name(b'foobar\x0010\x01') + + with pytest.raises(HeaderError): + decode_value_from_name(b'foo\x0010\x00111') diff --git a/nibabel/streamlines/tests/test_utils.py b/nibabel/streamlines/tests/test_utils.py index 939ee9bb9e..bcdde6d013 100644 --- a/nibabel/streamlines/tests/test_utils.py +++ b/nibabel/streamlines/tests/test_utils.py @@ -4,7 +4,8 @@ from nibabel.testing import data_path from numpy.testing import assert_array_equal -from nose.tools import assert_raises + +import pytest from ..utils import get_affine_from_reference @@ -17,7 +18,8 @@ def test_get_affine_from_reference(): # Get affine from an numpy array. assert_array_equal(get_affine_from_reference(affine), affine) wrong_ref = np.array([[1, 2, 3], [4, 5, 6]]) - assert_raises(ValueError, get_affine_from_reference, wrong_ref) + with pytest.raises(ValueError): + get_affine_from_reference(wrong_ref) # Get affine from a `SpatialImage`. assert_array_equal(get_affine_from_reference(img), affine) diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index 67afb4b211..e8ecbac4ff 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -1,8 +1,8 @@ import copy import numbers import numpy as np -import collections from warnings import warn +from collections.abc import MutableMapping from nibabel.affines import apply_affine @@ -19,8 +19,8 @@ def is_lazy_dict(obj): return is_data_dict(obj) and callable(list(obj.store.values())[0]) -class SliceableDataDict(collections.MutableMapping): - """ Dictionary for which key access can do slicing on the values. +class SliceableDataDict(MutableMapping): + r""" Dictionary for which key access can do slicing on the values. This container behaves like a standard dictionary but extends key access to allow keys for key access to be indices slicing into the contained ndarray @@ -73,7 +73,7 @@ def __len__(self): class PerArrayDict(SliceableDataDict): - """ Dictionary for which key access can do slicing on the values. + r""" Dictionary for which key access can do slicing on the values. This container behaves like a standard dictionary but extends key access to allow keys for key access to be indices slicing into the contained ndarray @@ -181,7 +181,7 @@ def _extend_entry(self, key, value): self[key].extend(value) -class LazyDict(collections.MutableMapping): +class LazyDict(MutableMapping): """ Dictionary of generator functions. This container behaves like a dictionary but it makes sure its elements are @@ -263,9 +263,9 @@ class Tractogram(object): choice as long as you provide the correct `affine_to_rasmm` matrix, at construction time. When applied to streamlines coordinates, that transformation matrix should bring the streamlines back to world space - (RAS+ and mm space) [1]_. + (RAS+ and mm space) [#]_. - Moreover, when streamlines are mapped back to voxel space [2]_, a + Moreover, when streamlines are mapped back to voxel space [#]_, a streamline point located at an integer coordinate (i,j,k) is considered to be at the center of the corresponding voxel. This is in contrast with other conventions where it might have referred to a corner. @@ -292,8 +292,8 @@ class Tractogram(object): References ---------- - [1] http://nipy.org/nibabel/coordinate_systems.html#naming-reference-spaces - [2] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space + .. [#] http://nipy.org/nibabel/coordinate_systems.html#naming-reference-spaces + .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ def __init__(self, streamlines=None, data_per_streamline=None, @@ -432,11 +432,8 @@ def apply_affine(self, affine, lazy=False): if np.all(affine == np.eye(4)): return self # No transformation. - BUFFER_SIZE = 10000000 # About 128 Mb since pts shape is 3. - for start in range(0, len(self.streamlines.data), BUFFER_SIZE): - end = start + BUFFER_SIZE - pts = self.streamlines._data[start:end] - self.streamlines.data[start:end] = apply_affine(affine, pts) + for i in range(len(self.streamlines)): + self.streamlines[i] = apply_affine(affine, self.streamlines[i]) if self.affine_to_rasmm is not None: # Update the affine that brings back the streamlines to RASmm. @@ -518,9 +515,9 @@ class LazyTractogram(Tractogram): choice as long as you provide the correct `affine_to_rasmm` matrix, at construction time. When applied to streamlines coordinates, that transformation matrix should bring the streamlines back to world space - (RAS+ and mm space) [1]_. + (RAS+ and mm space) [#]_. - Moreover, when streamlines are mapped back to voxel space [2]_, a + Moreover, when streamlines are mapped back to voxel space [#]_, a streamline point located at an integer coordinate (i,j,k) is considered to be at the center of the corresponding voxel. This is in contrast with other conventions where it might have referred to a corner. @@ -556,8 +553,8 @@ class LazyTractogram(Tractogram): References ---------- - [1] http://nipy.org/nibabel/coordinate_systems.html#naming-reference-spaces - [2] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space + .. [#] http://nipy.org/nibabel/coordinate_systems.html#naming-reference-spaces + .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ def __init__(self, streamlines=None, data_per_streamline=None, diff --git a/nibabel/streamlines/tractogram_file.py b/nibabel/streamlines/tractogram_file.py index d422560280..f8184c8ba9 100644 --- a/nibabel/streamlines/tractogram_file.py +++ b/nibabel/streamlines/tractogram_file.py @@ -1,7 +1,6 @@ """ Define abstract interface for Tractogram file classes """ -from abc import ABCMeta, abstractmethod -from six import with_metaclass +from abc import ABC, abstractmethod from .header import Field @@ -34,7 +33,7 @@ def __init__(self, callable): super(abstractclassmethod, self).__init__(callable) -class TractogramFile(with_metaclass(ABCMeta)): +class TractogramFile(ABC): """ Convenience class to encapsulate tractogram file format. """ def __init__(self, tractogram, header=None): diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 7ff80dc59f..2397a3ff24 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -1,4 +1,3 @@ -from __future__ import division # Definition of trackvis header structure: # http://www.trackvis.org/docs/?subsect=fileformat @@ -9,11 +8,12 @@ import warnings import numpy as np +from numpy.compat.py3k import asstr + import nibabel as nib from nibabel.openers import Opener -from nibabel.py3k import asstr -from nibabel.volumeutils import (native_code, swapped_code) +from nibabel.volumeutils import (native_code, swapped_code, endian_codes) from nibabel.orientations import (aff2axcodes, axcodes2ornt) from .array_sequence import create_arraysequences_from_generator @@ -266,10 +266,14 @@ def is_correct_format(cls, fileobj): return magic_number == cls.MAGIC_NUMBER @classmethod - def _default_structarr(cls): + def _default_structarr(cls, endianness=None): """ Return an empty compliant TRK header as numpy structured array """ - st_arr = np.zeros((), dtype=header_2_dtype) + dt = header_2_dtype + if endianness is not None: + endianness = endian_codes[endianness] + dt = dt.newbyteorder(endianness) + st_arr = np.zeros((), dtype=dt) # Default values st_arr[Field.MAGIC_NUMBER] = cls.MAGIC_NUMBER @@ -283,10 +287,10 @@ def _default_structarr(cls): return st_arr @classmethod - def create_empty_header(cls): + def create_empty_header(cls, endianness=None): """ Return an empty compliant TRK header as dict """ - st_arr = cls._default_structarr() + st_arr = cls._default_structarr(endianness) return dict(zip(st_arr.dtype.names, st_arr.tolist())) @classmethod @@ -368,8 +372,23 @@ def _read(): tractogram = LazyTractogram.from_data_func(_read) else: + + # Speed up loading by guessing a suitable buffer size. + with Opener(fileobj) as f: + old_file_position = f.tell() + f.seek(0, os.SEEK_END) + size = f.tell() + f.seek(old_file_position, os.SEEK_SET) + + # Buffer size is in mega bytes. + mbytes = size // (1024 * 1024) + sizes = [mbytes, 4, 4] + if hdr["nb_scalars_per_point"] > 0: + sizes = [mbytes // 2, mbytes // 2, 4] + trk_reader = cls._read(fileobj, hdr) - arr_seqs = create_arraysequences_from_generator(trk_reader, n=3) + arr_seqs = create_arraysequences_from_generator(trk_reader, n=3, + buffer_sizes=sizes) streamlines, scalars, properties = arr_seqs properties = np.asarray(properties) # Actually a 2d array. tractogram = Tractogram(streamlines) @@ -396,7 +415,7 @@ def save(self, fileobj): of the TRK header data). """ # Enforce little-endian byte order for header - header = self._default_structarr().newbyteorder('<') + header = self._default_structarr(endianness='little') # Override hdr's fields by those contained in `header`. for k, v in self.header.items(): diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 2c0a93fe32..52055ebcc3 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -7,41 +7,48 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Utilities for testing ''' -from __future__ import division, print_function import re import os import sys import warnings -from os.path import dirname, abspath, join as pjoin +from pkg_resources import resource_filename + +import unittest import numpy as np from numpy.testing import assert_array_equal -from numpy.testing import dec -skipif = dec.skipif -slow = dec.slow -# Allow failed import of nose if not now running tests -try: - from nose.tools import (assert_equal, assert_not_equal, - assert_true, assert_false, assert_raises) -except ImportError: - pass +from .np_features import memmap_after_ufunc +from .helpers import bytesio_filemap, bytesio_round_trip, assert_data_similar -from six.moves import zip_longest +from itertools import zip_longest -# set path to example data -data_path = abspath(pjoin(dirname(__file__), '..', 'tests', 'data')) +def test_data(subdir=None, fname=None): + if subdir is None: + resource = os.path.join('tests', 'data') + elif subdir in ('gifti', 'nicom', 'externals'): + resource = os.path.join(subdir, 'tests', 'data') + else: + raise ValueError("Unknown test data directory: %s" % subdir) + + if fname is not None: + resource = os.path.join(resource, fname) + + return resource_filename('nibabel', resource) + + +# set path to example data +data_path = test_data() -from .np_features import memmap_after_ufunc def assert_dt_equal(a, b): """ Assert two numpy dtype specifiers are equal Avoids failed comparison between int32 / int64 and intp """ - assert_equal(np.dtype(a).str, np.dtype(b).str) + assert np.dtype(a).str == np.dtype(b).str def assert_allclose_safely(a, b, match_nans=True, rtol=1e-5, atol=1e-8): @@ -51,7 +58,7 @@ def assert_allclose_safely(a, b, match_nans=True, rtol=1e-5, atol=1e-8): a, b = np.broadcast_arrays(a, b) if match_nans: nans = np.isnan(a) - np.testing.assert_array_equal(nans, np.isnan(b)) + assert_array_equal(nans, np.isnan(b)) to_test = ~nans else: to_test = np.ones(a.shape, dtype=bool) @@ -64,13 +71,13 @@ def assert_allclose_safely(a, b, match_nans=True, rtol=1e-5, atol=1e-8): a = a.astype(float) if b.dtype.kind in 'ui': b = b.astype(float) - assert_true(np.allclose(a, b, rtol=rtol, atol=atol)) + assert np.allclose(a, b, rtol=rtol, atol=atol) def assert_arrays_equal(arrays1, arrays2): """ Check two iterables yield the same sequence of arrays. """ for arr1, arr2 in zip_longest(arrays1, arrays2, fillvalue=None): - assert_false(arr1 is None or arr2 is None) + assert (arr1 is not None and arr2 is not None) assert_array_equal(arr1, arr2) @@ -187,27 +194,30 @@ class suppress_warnings(error_warnings): filter = 'ignore' -class catch_warn_reset(clear_and_catch_warnings): - - def __init__(self, *args, **kwargs): - warnings.warn('catch_warn_reset is deprecated and will be removed in ' - 'nibabel v3.0; use nibabel.testing.clear_and_catch_warnings.', - FutureWarning) - - EXTRA_SET = os.environ.get('NIPY_EXTRA_TESTS', '').split(',') def runif_extra_has(test_str): """Decorator checks to see if NIPY_EXTRA_TESTS env var contains test_str""" - return skipif(test_str not in EXTRA_SET, - "Skip {0} tests.".format(test_str)) + return unittest.skipUnless(test_str in EXTRA_SET, "Skip {0} tests.".format(test_str)) def assert_arr_dict_equal(dict1, dict2): """ Assert that two dicts are equal, where dicts contain arrays """ - assert_equal(set(dict1), set(dict2)) + assert set(dict1) == set(dict2) for key, value1 in dict1.items(): value2 = dict2[key] assert_array_equal(value1, value2) + + +class BaseTestCase(unittest.TestCase): + """ TestCase that does not attempt to run if prefixed with a ``_`` + + This restores the nose-like behavior of skipping so-named test cases + in test runners like pytest. + """ + def setUp(self): + if self.__class__.__name__.startswith('_'): + raise unittest.SkipTest("Base test case - subclass to run") + super().setUp() diff --git a/nibabel/tests/test_helpers.py b/nibabel/testing/helpers.py similarity index 61% rename from nibabel/tests/test_helpers.py rename to nibabel/testing/helpers.py index 7b05a4d666..49112fddfb 100644 --- a/nibabel/tests/test_helpers.py +++ b/nibabel/testing/helpers.py @@ -4,12 +4,9 @@ import numpy as np -from ..openers import ImageOpener -from ..tmpdirs import InTemporaryDirectory from ..optpkg import optional_package _, have_scipy, _ = optional_package('scipy.io') -from nose.tools import assert_true from numpy.testing import assert_array_equal @@ -31,33 +28,6 @@ def bytesio_round_trip(img): return klass.from_file_map(bytes_map) -def bz2_mio_error(): - """ Return True if writing mat 4 file fails - - Writing an empty string can fail for bz2 objects in python 3.3: - - https://bugs.python.org/issue16828 - - This in turn causes scipy to give this error when trying to write bz2 mat - files. - - This won't cause a problem for scipy releases after Jan 24 2014 because of - commit 98ef522d99 (in scipy) - """ - if not have_scipy: - return True - import scipy.io - - with InTemporaryDirectory(): - with ImageOpener('test.mat.bz2', 'wb') as fobj: - try: - scipy.io.savemat(fobj, {'a': 1}, format='4') - except ValueError: - return True - else: - return False - - def assert_data_similar(arr, params): """ Check data is the same if recorded, otherwise check summaries @@ -78,6 +48,6 @@ def assert_data_similar(arr, params): return summary = params['data_summary'] real_arr = np.asarray(arr) - assert_true(np.allclose( + assert np.allclose( (real_arr.min(), real_arr.max(), real_arr.mean()), - (summary['min'], summary['max'], summary['mean']))) + (summary['min'], summary['max'], summary['mean'])) diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index cc2a5942b5..c7352c3f89 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -39,7 +39,7 @@ def resample_img2img(img_to, img_from, order=1, out_class=nib.Nifti1Image): from scipy import ndimage as spnd vox2vox = npl.inv(img_from.affine).dot(img_to.affine) rzs, trans = to_matvec(vox2vox) - data = spnd.affine_transform(img_from.get_data(), + data = spnd.affine_transform(img_from.get_fdata(), rzs, trans, img_to.shape, @@ -57,7 +57,7 @@ def gmean_norm(data): np.set_printoptions(suppress=True, precision=4) normal_fname = "Phantom_EPI_3mm_tra_SENSE_6_1.PAR" normal_img = parrec.load(normal_fname) - normal_data = normal_img.get_data() + normal_data = normal_img.get_fdata() normal_normed = gmean_norm(normal_data) print("RMS of standard image {:<44}: {}".format( @@ -69,7 +69,7 @@ def gmean_norm(data): continue funny_img = parrec.load(parfile) fixed_img = resample_img2img(normal_img, funny_img) - fixed_data = fixed_img.get_data() + fixed_data = fixed_img.get_fdata() difference_data = normal_normed - gmean_norm(fixed_data) print('RMS resliced {:<52} : {}'.format( parfile, diff --git a/nibabel/tests/data/gen_standard.py b/nibabel/tests/data/gen_standard.py index b97da8ff2f..f966b5599d 100644 --- a/nibabel/tests/data/gen_standard.py +++ b/nibabel/tests/data/gen_standard.py @@ -52,35 +52,36 @@ def _gen_straight_streamline(start, end, steps=3): return streamlines -rng = np.random.RandomState(42) - -width = 4 # Coronal -height = 5 # Sagittal -depth = 7 # Axial - -voxel_size = np.array((1., 3., 2.)) - -# Generate a random mask with voxel order RAS+. -mask = rng.rand(width, height, depth) > 0.8 -mask = (255*mask).astype(np.uint8) - -# Build tractogram -streamlines = mark_the_spot(mask) -tractogram = nib.streamlines.Tractogram(streamlines) - -# Build header -affine = np.eye(4) -affine[range(3), range(3)] = voxel_size -header = {Field.DIMENSIONS: (width, height, depth), - Field.VOXEL_SIZES: voxel_size, - Field.VOXEL_TO_RASMM: affine, - Field.VOXEL_ORDER: 'RAS'} - -# Save the standard mask. -nii = nib.Nifti1Image(mask, affine=affine) -nib.save(nii, "standard.nii.gz") - -# Save the standard tractogram in every available file format. -for ext, cls in FORMATS.items(): - tfile = cls(tractogram, header) - nib.streamlines.save(tfile, "standard" + ext) +if __name__ == '__main__': + rng = np.random.RandomState(42) + + width = 4 # Coronal + height = 5 # Sagittal + depth = 7 # Axial + + voxel_size = np.array((1., 3., 2.)) + + # Generate a random mask with voxel order RAS+. + mask = rng.rand(width, height, depth) > 0.8 + mask = (255*mask).astype(np.uint8) + + # Build tractogram + streamlines = mark_the_spot(mask) + tractogram = nib.streamlines.Tractogram(streamlines) + + # Build header + affine = np.eye(4) + affine[range(3), range(3)] = voxel_size + header = {Field.DIMENSIONS: (width, height, depth), + Field.VOXEL_SIZES: voxel_size, + Field.VOXEL_TO_RASMM: affine, + Field.VOXEL_ORDER: 'RAS'} + + # Save the standard mask. + nii = nib.Nifti1Image(mask, affine=affine) + nib.save(nii, "standard.nii.gz") + + # Save the standard tractogram in every available file format. + for ext, cls in FORMATS.items(): + tfile = cls(tractogram, header) + nib.streamlines.save(tfile, "standard" + ext) diff --git a/nibabel/tests/data/make_moved_anat.py b/nibabel/tests/data/make_moved_anat.py index 6fba2d0902..ec0817885c 100644 --- a/nibabel/tests/data/make_moved_anat.py +++ b/nibabel/tests/data/make_moved_anat.py @@ -12,11 +12,12 @@ from nibabel.eulerangles import euler2mat from nibabel.affines import from_matvec -img = nib.load('anatomical.nii') -some_rotations = euler2mat(0.1, 0.2, 0.3) -extra_affine = from_matvec(some_rotations, [3, 4, 5]) -moved_anat = nib.Nifti1Image(img.dataobj, - extra_affine.dot(img.affine), - img.header) -moved_anat.set_data_dtype(np.float32) -nib.save(moved_anat, 'anat_moved.nii') +if __name__ == '__main__': + img = nib.load('anatomical.nii') + some_rotations = euler2mat(0.1, 0.2, 0.3) + extra_affine = from_matvec(some_rotations, [3, 4, 5]) + moved_anat = nib.Nifti1Image(img.dataobj, + extra_affine.dot(img.affine), + img.header) + moved_anat.set_data_dtype(np.float32) + nib.save(moved_anat, 'anat_moved.nii') diff --git a/nibabel/tests/data/minc1-no-att.mnc b/nibabel/tests/data/minc1-no-att.mnc index 1fcd595f7e..b1ce938403 100644 Binary files a/nibabel/tests/data/minc1-no-att.mnc and b/nibabel/tests/data/minc1-no-att.mnc differ diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 529e103f46..3c1b58502d 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -4,7 +4,7 @@ from os import environ, listdir from os.path import dirname, realpath, join as pjoin, isdir, exists -from ..testing import skipif +import unittest def get_nibabel_data(): @@ -39,11 +39,11 @@ def needs_nibabel_data(subdir=None): """ nibabel_data = get_nibabel_data() if nibabel_data == '': - return skipif(True, "Need nibabel-data directory for this test") + return unittest.skip("Need nibabel-data directory for this test") if subdir is None: - return skipif(False) + return lambda x: x required_path = pjoin(nibabel_data, subdir) # Path should not be empty (as is the case for not-updated submodules) have_files = exists(required_path) and len(listdir(required_path)) > 0 - return skipif(not have_files, - "Need files in {0} for these tests".format(required_path)) + return unittest.skipUnless(have_files, + "Need files in {0} for these tests".format(required_path)) diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index a82fdaa1e8..0027cc36b2 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -18,22 +18,8 @@ from subprocess import Popen, PIPE -try: # Python 2 - string_types = basestring, -except NameError: # Python 3 - string_types = str, - -def _get_package(): - """ Workaround for missing ``__package__`` in Python 3.2 - """ - if '__package__' in globals() and not __package__ is None: - return __package__ - return __name__.split('.', 1)[0] - - -# Same as __package__ for Python 2.6, 2.7 and >= 3.3 -MY_PACKAGE = _get_package() +MY_PACKAGE = __package__ def local_script_dir(script_sdir): @@ -112,12 +98,12 @@ def run_command(self, cmd, check_code=True): ------- returncode : int return code from execution of `cmd` - stdout : bytes (python 3) or str (python 2) + stdout : bytes stdout from `cmd` - stderr : bytes (python 3) or str (python 2) + stderr : bytes stderr from `cmd` """ - if isinstance(cmd, string_types): + if isinstance(cmd, str): cmd = [cmd] else: cmd = list(cmd) @@ -129,9 +115,6 @@ def run_command(self, cmd, check_code=True): # the script through the Python interpreter cmd = [sys.executable, pjoin(self.local_script_dir, cmd[0])] + cmd[1:] - elif os.name == 'nt': - # Need .bat file extension for windows - cmd[0] += '.bat' if os.name == 'nt': # Quote any arguments with spaces. The quotes delimit the arguments # on Windows, and the arguments might be file paths with spaces. @@ -152,7 +135,7 @@ def run_command(self, cmd, check_code=True): env['PYTHONPATH'] = self.local_module_dir + pathsep + pypath proc = Popen(cmd, stdout=PIPE, stderr=PIPE, env=env) stdout, stderr = proc.communicate() - if proc.poll() == None: + if proc.poll() is None: proc.terminate() if check_code and proc.returncode != 0: raise RuntimeError( diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index e66ed46190..6fd2f59fab 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -7,10 +7,10 @@ from ..eulerangles import euler2mat from ..affines import (AffineError, apply_affine, append_diag, to_matvec, - from_matvec, dot_reduce, voxel_sizes) + from_matvec, dot_reduce, voxel_sizes, obliquity) -from nose.tools import assert_equal, assert_raises +import pytest from numpy.testing import assert_array_equal, assert_almost_equal, \ assert_array_almost_equal @@ -80,7 +80,7 @@ def test_matrix_vector(): vec = xform[:-1, -1] assert_array_equal(newmat, mat) assert_array_equal(newvec, vec) - assert_equal(newvec.shape, (M - 1,)) + assert newvec.shape == (M - 1,) assert_array_equal(from_matvec(mat, vec), xform) # Check default translation works xform_not = xform[:] @@ -126,17 +126,19 @@ def test_append_diag(): [0, 0, 0, 5, 9], [0, 0, 0, 0, 1]]) # Length of starts has to match length of steps - assert_raises(AffineError, append_diag, aff, [5, 6], [9]) + with pytest.raises(AffineError): + append_diag(aff, [5, 6], [9]) def test_dot_reduce(): # Chaining numpy dot # Error for no arguments - assert_raises(TypeError, dot_reduce) + with pytest.raises(TypeError): + dot_reduce() # Anything at all on its own, passes through - assert_equal(dot_reduce(1), 1) - assert_equal(dot_reduce(None), None) - assert_equal(dot_reduce([1, 2, 3]), [1, 2, 3]) + assert dot_reduce(1) == 1 + assert dot_reduce(None) is None + assert dot_reduce([1, 2, 3]) == [1, 2, 3] # Two or more -> dot product vec = [1, 2, 3] mat = np.arange(4, 13).reshape((3, 3)) @@ -178,3 +180,15 @@ def test_voxel_sizes(): rot_affine[:3, :3] = rotation full_aff = rot_affine.dot(aff) assert_almost_equal(voxel_sizes(full_aff), vox_sizes) + + +def test_obliquity(): + """Check the calculation of inclination of an affine axes.""" + from math import pi + aligned = np.diag([2.0, 2.0, 2.3, 1.0]) + aligned[:-1, -1] = [-10, -10, -7] + R = from_matvec(euler2mat(x=0.09, y=0.001, z=0.001), [0.0, 0.0, 0.0]) + oblique = R.dot(aligned) + assert_almost_equal(obliquity(aligned), [0.0, 0.0, 0.0]) + assert_almost_equal(obliquity(oblique) * 180 / pi, + [0.0810285, 5.1569949, 5.1569376]) diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 25ee778db9..b092a2334c 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -20,7 +20,7 @@ import numpy as np -from six import BytesIO, StringIO +from io import BytesIO, StringIO from ..spatialimages import (HeaderDataError, HeaderTypeError, supported_np_types) from ..analyze import AnalyzeHeader, AnalyzeImage @@ -31,18 +31,14 @@ from ..tmpdirs import InTemporaryDirectory from ..arraywriters import WriterError -from nose.tools import (assert_equal, assert_not_equal, assert_true, - assert_false, assert_raises) - +import pytest from numpy.testing import (assert_array_equal, assert_array_almost_equal) -from ..testing import (assert_equal, assert_not_equal, assert_true, - assert_false, assert_raises, data_path, - suppress_warnings, assert_dt_equal) +from ..testing import (data_path, suppress_warnings, assert_dt_equal, + bytesio_filemap, bytesio_round_trip) from .test_wrapstruct import _TestLabeledWrapStruct from . import test_spatialimages as tsi -from .test_helpers import bytesio_filemap, bytesio_round_trip header_file = os.path.join(data_path, 'analyze.hdr') @@ -71,8 +67,7 @@ class TestAnalyzeHeader(_TestLabeledWrapStruct): def test_supported_types(self): hdr = self.header_class() - assert_equal(self.supported_np_types, - supported_np_types(hdr)) + assert self.supported_np_types == supported_np_types(hdr) def get_bad_bb(self): # A value for the binary block that should raise an error @@ -84,7 +79,7 @@ def test_general_init(self): hdr = self.header_class() # an empty header has shape (0,) - like an empty array # (np.array([])) - assert_equal(hdr.get_data_shape(), (0,)) + assert hdr.get_data_shape() == (0,) # The affine is always homogenous 3D regardless of shape. The # default affine will have -1 as the X zoom iff default_x_flip # is True (which it is by default). We have to be careful of the @@ -93,20 +88,20 @@ def test_general_init(self): assert_array_equal(np.diag(hdr.get_base_affine()), [-1, 1, 1, 1]) # But zooms only go with number of dimensions - assert_equal(hdr.get_zooms(), (1.0,)) + assert hdr.get_zooms() == (1.0,) def test_header_size(self): - assert_equal(self.header_class.template_dtype.itemsize, self.sizeof_hdr) + assert self.header_class.template_dtype.itemsize == self.sizeof_hdr def test_empty(self): hdr = self.header_class() - assert_true(len(hdr.binaryblock) == self.sizeof_hdr) - assert_true(hdr['sizeof_hdr'] == self.sizeof_hdr) - assert_true(np.all(hdr['dim'][1:] == 1)) - assert_true(hdr['dim'][0] == 0) - assert_true(np.all(hdr['pixdim'] == 1)) - assert_true(hdr['datatype'] == 16) # float32 - assert_true(hdr['bitpix'] == 32) + assert len(hdr.binaryblock) == self.sizeof_hdr + assert hdr['sizeof_hdr'] == self.sizeof_hdr + assert np.all(hdr['dim'][1:] == 1) + assert hdr['dim'][0] == 0 + assert np.all(hdr['pixdim'] == 1) + assert hdr['datatype'] == 16 # float32 + assert hdr['bitpix'] == 32 def _set_something_into_hdr(self, hdr): # Called from test_bytes test method. Specific to the header data type @@ -117,26 +112,24 @@ def test_checks(self): # Test header checks hdr_t = self.header_class() # _dxer just returns the diagnostics as a string - assert_equal(self._dxer(hdr_t), '') + assert self._dxer(hdr_t) == '' hdr = hdr_t.copy() hdr['sizeof_hdr'] = 1 with suppress_warnings(): - assert_equal(self._dxer(hdr), 'sizeof_hdr should be ' + - str(self.sizeof_hdr)) + assert self._dxer(hdr) == 'sizeof_hdr should be ' + str(self.sizeof_hdr) hdr = hdr_t.copy() hdr['datatype'] = 0 - assert_equal(self._dxer(hdr), 'data code 0 not supported\n' - 'bitpix does not match datatype') + assert self._dxer(hdr) == 'data code 0 not supported\nbitpix does not match datatype' hdr = hdr_t.copy() hdr['bitpix'] = 0 - assert_equal(self._dxer(hdr), 'bitpix does not match datatype') + assert self._dxer(hdr) == 'bitpix does not match datatype' def test_pixdim_checks(self): hdr_t = self.header_class() for i in (1, 2, 3): hdr = hdr_t.copy() hdr['pixdim'][i] = -1 - assert_equal(self._dxer(hdr), 'pixdim[1,2,3] should be positive') + assert self._dxer(hdr) == 'pixdim[1,2,3] should be positive' def test_log_checks(self): # Test logging, fixing, errors for header checking @@ -146,11 +139,10 @@ def test_log_checks(self): with suppress_warnings(): hdr['sizeof_hdr'] = 350 # severity 30 fhdr, message, raiser = self.log_chk(hdr, 30) - assert_equal(fhdr['sizeof_hdr'], self.sizeof_hdr) - assert_equal(message, - 'sizeof_hdr should be {0}; set sizeof_hdr to {0}'.format( - self.sizeof_hdr)) - assert_raises(*raiser) + + assert fhdr['sizeof_hdr'] == self.sizeof_hdr + assert message == 'sizeof_hdr should be {0}; set sizeof_hdr to {0}'.format(self.sizeof_hdr) + pytest.raises(*raiser) # RGB datatype does not raise error hdr = HC() hdr.set_data_dtype('RGB') @@ -160,25 +152,22 @@ def test_log_checks(self): hdr['datatype'] = -1 # severity 40 with suppress_warnings(): fhdr, message, raiser = self.log_chk(hdr, 40) - assert_equal(message, 'data code -1 not recognized; ' - 'not attempting fix') + assert message == 'data code -1 not recognized; not attempting fix' - assert_raises(*raiser) + pytest.raises(*raiser) # datatype not supported hdr['datatype'] = 255 # severity 40 fhdr, message, raiser = self.log_chk(hdr, 40) - assert_equal(message, 'data code 255 not supported; ' - 'not attempting fix') - assert_raises(*raiser) + assert message == 'data code 255 not supported; not attempting fix' + pytest.raises(*raiser) # bitpix hdr = HC() hdr['datatype'] = 16 # float32 hdr['bitpix'] = 16 # severity 10 fhdr, message, raiser = self.log_chk(hdr, 10) - assert_equal(fhdr['bitpix'], 32) - assert_equal(message, 'bitpix does not match datatype; ' - 'setting bitpix to match datatype') - assert_raises(*raiser) + assert fhdr['bitpix'] == 32 + assert message == 'bitpix does not match datatype; setting bitpix to match datatype' + pytest.raises(*raiser) def test_pixdim_log_checks(self): # pixdim positive @@ -186,28 +175,25 @@ def test_pixdim_log_checks(self): hdr = HC() hdr['pixdim'][1] = -2 # severity 35 fhdr, message, raiser = self.log_chk(hdr, 35) - assert_equal(fhdr['pixdim'][1], 2) - assert_equal(message, 'pixdim[1,2,3] should be positive; ' - 'setting to abs of pixdim values') - assert_raises(*raiser) + assert fhdr['pixdim'][1] == 2 + assert message == 'pixdim[1,2,3] should be positive; setting to abs of pixdim values' + pytest.raises(*raiser) hdr = HC() hdr['pixdim'][1] = 0 # severity 30 fhdr, message, raiser = self.log_chk(hdr, 30) - assert_equal(fhdr['pixdim'][1], 1) - assert_equal(message, PIXDIM0_MSG) - assert_raises(*raiser) + assert fhdr['pixdim'][1] == 1 + assert message == PIXDIM0_MSG + pytest.raises(*raiser) # both hdr = HC() hdr['pixdim'][1] = 0 # severity 30 hdr['pixdim'][2] = -2 # severity 35 fhdr, message, raiser = self.log_chk(hdr, 35) - assert_equal(fhdr['pixdim'][1], 1) - assert_equal(fhdr['pixdim'][2], 2) - assert_equal(message, 'pixdim[1,2,3] should be ' - 'non-zero and pixdim[1,2,3] should ' - 'be positive; setting 0 dims to 1 ' - 'and setting to abs of pixdim values') - assert_raises(*raiser) + assert fhdr['pixdim'][1] == 1 + assert fhdr['pixdim'][2] == 2 + assert message == ('pixdim[1,2,3] should be non-zero and pixdim[1,2,3] should be ' + 'positive; setting 0 dims to 1 and setting to abs of pixdim values') + pytest.raises(*raiser) def test_no_scaling_fixes(self): # Check we do not fix slope or intercept @@ -248,12 +234,12 @@ def test_logger_error(self): # Check log message appears in new logger imageglobals.logger = logger hdr.copy().check_fix() - assert_equal(str_io.getvalue(), - 'bitpix does not match datatype; ' - 'setting bitpix to match datatype\n') + assert str_io.getvalue() == ('bitpix does not match datatype; ' + 'setting bitpix to match datatype\n') # Check that error_level in fact causes error to be raised imageglobals.error_level = 10 - assert_raises(HeaderDataError, hdr.copy().check_fix) + with pytest.raises(HeaderDataError): + hdr.copy().check_fix() finally: imageglobals.logger, imageglobals.error_level = log_cache @@ -304,55 +290,58 @@ def assert_set_dtype(dt_spec, np_dtype): assert_set_dtype(int, np_sys_int) hdr = self.header_class() for inp in all_unsupported_types: - assert_raises(HeaderDataError, hdr.set_data_dtype, inp) + with pytest.raises(HeaderDataError): + hdr.set_data_dtype(inp) def test_shapes(self): # Test that shape checks work hdr = self.header_class() for shape in ((2, 3, 4), (2, 3, 4, 5), (2, 3), (2,)): hdr.set_data_shape(shape) - assert_equal(hdr.get_data_shape(), shape) + assert hdr.get_data_shape() == shape # Check max works, but max+1 raises error dim_dtype = hdr.structarr['dim'].dtype # as_int for safety to deal with numpy 1.4.1 int conversion errors mx = as_int(np.iinfo(dim_dtype).max) shape = (mx,) hdr.set_data_shape(shape) - assert_equal(hdr.get_data_shape(), shape) + assert hdr.get_data_shape() == shape shape = (mx + 1,) - assert_raises(HeaderDataError, hdr.set_data_shape, shape) + with pytest.raises(HeaderDataError): + hdr.set_data_shape(shape) # Lists or tuples or arrays will work for setting shape shape = (2, 3, 4) for constructor in (list, tuple, np.array): hdr.set_data_shape(constructor(shape)) - assert_equal(hdr.get_data_shape(), shape) + assert hdr.get_data_shape() == shape def test_read_write_data(self): # Check reading and writing of data hdr = self.header_class() # Trying to read data from an empty header gives no data bytes = hdr.data_from_fileobj(BytesIO()) - assert_equal(len(bytes), 0) + assert len(bytes) == 0 # Setting no data into an empty header results in - no data str_io = BytesIO() hdr.data_to_fileobj([], str_io) - assert_equal(str_io.getvalue(), b'') + assert str_io.getvalue() == b'' # Setting more data then there should be gives an error - assert_raises(HeaderDataError, - hdr.data_to_fileobj, - np.zeros(3), - str_io) + with pytest.raises(HeaderDataError): + hdr.data_to_fileobj(np.zeros(3), str_io) # Test valid write hdr.set_data_shape((1, 2, 3)) hdr.set_data_dtype(np.float32) S = BytesIO() data = np.arange(6, dtype=np.float64) # data have to be the right shape - assert_raises(HeaderDataError, hdr.data_to_fileobj, data, S) + with pytest.raises(HeaderDataError): + hdr.data_to_fileobj(data, S) data = data.reshape((1, 2, 3)) # and size - assert_raises(HeaderDataError, hdr.data_to_fileobj, data[:, :, :-1], S) - assert_raises(HeaderDataError, hdr.data_to_fileobj, data[:, :-1, :], S) + with pytest.raises(HeaderDataError): + hdr.data_to_fileobj(data[:, :, :-1], S) + with pytest.raises(HeaderDataError): + hdr.data_to_fileobj(data[:, :-1, :], S) # OK if so hdr.data_to_fileobj(data, S) # Read it back @@ -360,7 +349,7 @@ def test_read_write_data(self): # Should be about the same assert_array_almost_equal(data, data_back) # but with the header dtype, not the data dtype - assert_equal(hdr.get_data_dtype(), data_back.dtype) + assert hdr.get_data_dtype() == data_back.dtype # this is with native endian, not so for swapped S2 = BytesIO() hdr2 = hdr.as_byteswapped() @@ -371,9 +360,9 @@ def test_read_write_data(self): # Compares the same assert_array_almost_equal(data_back, data_back2) # Same dtype names - assert_equal(data_back.dtype.name, data_back2.dtype.name) + assert data_back.dtype.name == data_back2.dtype.name # But not the same endianness - assert_not_equal(data.dtype.byteorder, data_back2.dtype.byteorder) + assert data.dtype.byteorder != data_back2.dtype.byteorder # Try scaling down to integer hdr.set_data_dtype(np.uint8) S3 = BytesIO() @@ -385,15 +374,16 @@ def test_read_write_data(self): assert_array_almost_equal(data, data_back) # If the header can't do scaling, rescale raises an error if not hdr.has_data_slope: - assert_raises(HeaderTypeError, hdr.data_to_fileobj, data, S3) - assert_raises(HeaderTypeError, hdr.data_to_fileobj, data, S3, - rescale=True) + with pytest.raises(HeaderTypeError): + hdr.data_to_fileobj(data, S3) + with pytest.raises(HeaderTypeError): + hdr.data_to_fileobj(data, S3, rescale=True) # If not scaling we lose precision from rounding data = np.arange(6, dtype=np.float64).reshape((1, 2, 3)) + 0.5 with np.errstate(invalid='ignore'): hdr.data_to_fileobj(data, S3, rescale=False) data_back = hdr.data_from_fileobj(S3) - assert_false(np.allclose(data, data_back)) + assert not np.allclose(data, data_back) # Test RGB image dtype = np.dtype([('R', 'uint8'), ('G', 'uint8'), ('B', 'uint8')]) data = np.ones((1, 2, 3), dtype) @@ -409,26 +399,24 @@ def test_datatype(self): for code in codes.value_set(): npt = codes.type[code] if npt is np.void: - assert_raises( - HeaderDataError, - ehdr.set_data_dtype, - code) + with pytest.raises(HeaderDataError): + ehdr.set_data_dtype(code) continue dt = codes.dtype[code] ehdr.set_data_dtype(npt) - assert_true(ehdr['datatype'] == code) - assert_true(ehdr['bitpix'] == dt.itemsize * 8) + assert ehdr['datatype'] == code + assert ehdr['bitpix'] == dt.itemsize * 8 ehdr.set_data_dtype(code) - assert_true(ehdr['datatype'] == code) + assert ehdr['datatype'] == code ehdr.set_data_dtype(dt) - assert_true(ehdr['datatype'] == code) + assert ehdr['datatype'] == code def test_offset(self): # Test get / set offset hdr = self.header_class() offset = hdr.get_data_offset() hdr.set_data_offset(offset + 16) - assert_equal(hdr.get_data_offset(), offset + 16) + assert hdr.get_data_offset() == offset + 16 def test_data_shape_zooms_affine(self): hdr = self.header_class() @@ -436,27 +424,23 @@ def test_data_shape_zooms_affine(self): L = len(shape) hdr.set_data_shape(shape) if L: - assert_equal(hdr.get_data_shape(), shape) + assert hdr.get_data_shape() == shape else: - assert_equal(hdr.get_data_shape(), (0,)) + assert hdr.get_data_shape() == (0,) # Default zoom - for 3D - is 1(()) - assert_equal(hdr.get_zooms(), (1,) * L) + assert hdr.get_zooms() == (1,) * L # errors if zooms do not match shape if len(shape): - assert_raises(HeaderDataError, - hdr.set_zooms, - (1,) * (L - 1)) + with pytest.raises(HeaderDataError): + hdr.set_zooms((1,) * (L - 1)) # Errors for negative zooms - assert_raises(HeaderDataError, - hdr.set_zooms, - (-1,) + (1,) * (L - 1)) - assert_raises(HeaderDataError, - hdr.set_zooms, - (1,) * (L + 1)) + with pytest.raises(HeaderDataError): + hdr.set_zooms((-1,) + (1,) * (L - 1)) + with pytest.raises(HeaderDataError): + hdr.set_zooms((1,) * (L + 1)) # Errors for negative zooms - assert_raises(HeaderDataError, - hdr.set_zooms, - (-1,) * L) + with pytest.raises(HeaderDataError): + hdr.set_zooms((-1,) * L) # reducing the dimensionality of the array and then increasing # it again reverts the previously set zoom values to 1.0 hdr = self.header_class() @@ -489,20 +473,20 @@ def test_default_x_flip(self): def test_from_eg_file(self): fileobj = open(self.example_file, 'rb') hdr = self.header_class.from_fileobj(fileobj, check=False) - assert_equal(hdr.endianness, '>') - assert_equal(hdr['sizeof_hdr'], self.sizeof_hdr) + assert hdr.endianness == '>' + assert hdr['sizeof_hdr'] == self.sizeof_hdr def test_orientation(self): # Test flips hdr = self.header_class() - assert_true(hdr.default_x_flip) + assert hdr.default_x_flip hdr.set_data_shape((3, 5, 7)) hdr.set_zooms((4, 5, 6)) aff = np.diag((-4, 5, 6, 1)) aff[:3, 3] = np.array([1, 2, 3]) * np.array([-4, 5, 6]) * -1 assert_array_equal(hdr.get_base_affine(), aff) hdr.default_x_flip = False - assert_false(hdr.default_x_flip) + assert not hdr.default_x_flip aff[0] *= -1 assert_array_equal(hdr.get_base_affine(), aff) @@ -512,23 +496,23 @@ def test_str(self): s1 = str(hdr) # check the datacode recoding rexp = re.compile('^datatype +: float32', re.MULTILINE) - assert_true(rexp.search(s1) is not None) + assert rexp.search(s1) is not None def test_from_header(self): # check from header class method. klass = self.header_class empty = klass.from_header() - assert_equal(klass(), empty) + assert klass() == empty empty = klass.from_header(None) - assert_equal(klass(), empty) + assert klass() == empty hdr = klass() hdr.set_data_dtype(np.float64) hdr.set_data_shape((1, 2, 3)) hdr.set_zooms((3.0, 2.0, 1.0)) for check in (True, False): copy = klass.from_header(hdr, check=check) - assert_equal(hdr, copy) - assert_false(hdr is copy) + assert hdr == copy + assert hdr is not copy class C(object): @@ -538,17 +522,17 @@ def get_data_shape(self): return (5, 4, 3) def get_zooms(self): return (10.0, 9.0, 8.0) converted = klass.from_header(C()) - assert_true(isinstance(converted, klass)) - assert_equal(converted.get_data_dtype(), np.dtype('i2')) - assert_equal(converted.get_data_shape(), (5, 4, 3)) - assert_equal(converted.get_zooms(), (10.0, 9.0, 8.0)) + assert isinstance(converted, klass) + assert converted.get_data_dtype() == np.dtype('i2') + assert converted.get_data_shape() == (5, 4, 3) + assert converted.get_zooms() == (10.0, 9.0, 8.0) def test_base_affine(self): klass = self.header_class hdr = klass() hdr.set_data_shape((3, 5, 7)) hdr.set_zooms((3, 2, 1)) - assert_true(hdr.default_x_flip) + assert hdr.default_x_flip assert_array_almost_equal( hdr.get_base_affine(), [[-3., 0., 0., 3.], @@ -574,7 +558,7 @@ def test_scaling(self): # Test integer scaling from float # Analyze headers cannot do float-integer scaling hdr = self.header_class() - assert_true(hdr.default_x_flip) + assert hdr.default_x_flip shape = (1, 2, 3) hdr.set_data_shape(shape) hdr.set_data_dtype(np.float32) @@ -588,22 +572,23 @@ def test_scaling(self): hdr.set_data_dtype(np.int32) # Writing to int needs scaling, and raises an error if we can't scale if not hdr.has_data_slope: - assert_raises(HeaderTypeError, hdr.data_to_fileobj, data, BytesIO()) + with pytest.raises(HeaderTypeError): + hdr.data_to_fileobj(data, BytesIO()) # But if we aren't scaling, convert the floats to integers and write with np.errstate(invalid='ignore'): hdr.data_to_fileobj(data, S, rescale=False) rdata = hdr.data_from_fileobj(S) - assert_true(np.allclose(data, rdata)) + assert np.allclose(data, rdata) # This won't work for floats that aren't close to integers data_p5 = data + 0.5 with np.errstate(invalid='ignore'): hdr.data_to_fileobj(data_p5, S, rescale=False) rdata = hdr.data_from_fileobj(S) - assert_false(np.allclose(data_p5, rdata)) + assert not np.allclose(data_p5, rdata) def test_slope_inter(self): hdr = self.header_class() - assert_equal(hdr.get_slope_inter(), (None, None)) + assert hdr.get_slope_inter() == (None, None) for slinter in ((None,), (None, None), (np.nan, np.nan), @@ -614,9 +599,11 @@ def test_slope_inter(self): (None, 0), (1.0, 0)): hdr.set_slope_inter(*slinter) - assert_equal(hdr.get_slope_inter(), (None, None)) - assert_raises(HeaderTypeError, hdr.set_slope_inter, 1.1) - assert_raises(HeaderTypeError, hdr.set_slope_inter, 1.0, 0.1) + assert hdr.get_slope_inter() == (None, None) + with pytest.raises(HeaderTypeError): + hdr.set_slope_inter(1.1) + with pytest.raises(HeaderTypeError): + hdr.set_slope_inter(1.0, 0.1) def test_from_analyze_map(self): # Test that any header can pass values from a mapping @@ -625,19 +612,22 @@ def test_from_analyze_map(self): class H1(object): pass - assert_raises(AttributeError, klass.from_header, H1()) + with pytest.raises(AttributeError): + klass.from_header(H1()) class H2(object): def get_data_dtype(self): return np.dtype('u1') - assert_raises(AttributeError, klass.from_header, H2()) + with pytest.raises(AttributeError): + klass.from_header(H2()) class H3(H2): def get_data_shape(self): return (2, 3, 4) - assert_raises(AttributeError, klass.from_header, H3()) + with pytest.raises(AttributeError): + klass.from_header(H3()) class H4(H3): @@ -647,7 +637,7 @@ def get_zooms(self): exp_hdr.set_data_dtype(np.dtype('u1')) exp_hdr.set_data_shape((2, 3, 4)) exp_hdr.set_zooms((4, 5, 6)) - assert_equal(klass.from_header(H4()), exp_hdr) + assert klass.from_header(H4()) == exp_hdr # cal_max, cal_min get properly set from ``as_analyze_map`` class H5(H4): @@ -656,7 +646,7 @@ def as_analyze_map(self): return dict(cal_min=-100, cal_max=100) exp_hdr['cal_min'] = -100 exp_hdr['cal_max'] = 100 - assert_equal(klass.from_header(H5()), exp_hdr) + assert klass.from_header(H5()) == exp_hdr # set_* methods override fields fron header class H6(H5): @@ -664,7 +654,7 @@ class H6(H5): def as_analyze_map(self): return dict(datatype=4, bitpix=32, cal_min=-100, cal_max=100) - assert_equal(klass.from_header(H6()), exp_hdr) + assert klass.from_header(H6()) == exp_hdr # Any mapping will do, including a Nifti header class H7(H5): @@ -677,7 +667,7 @@ def as_analyze_map(self): return n_hdr # Values from methods still override values from header (shape, dtype, # zooms still at defaults from n_hdr header fields above) - assert_equal(klass.from_header(H7()), exp_hdr) + assert klass.from_header(H7()) == exp_hdr def test_best_affine(): @@ -691,20 +681,18 @@ def test_data_code_error(): # test analyze raising error for unsupported codes hdr = Nifti1Header() hdr['datatype'] = 256 - assert_raises(HeaderDataError, AnalyzeHeader.from_header, hdr) + with pytest.raises(HeaderDataError): + AnalyzeHeader.from_header(hdr) class TestAnalyzeImage(tsi.TestSpatialImage, tsi.MmapImageMixin): image_class = AnalyzeImage can_save = True supported_np_types = TestAnalyzeHeader.supported_np_types - # Flag to skip bz2 save tests if they are going to break - bad_bz2 = False def test_supported_types(self): img = self.image_class(np.zeros((2, 3, 4)), np.eye(4)) - assert_equal(self.supported_np_types, - supported_np_types(img)) + assert self.supported_np_types == supported_np_types(img) def test_default_header(self): # Check default header is as expected @@ -715,12 +703,12 @@ def test_default_header(self): hdr.set_data_dtype(arr.dtype) hdr.set_data_offset(0) hdr.set_slope_inter(np.nan, np.nan) - assert_equal(img.header, hdr) + assert img.header == hdr def test_data_hdr_cache(self): # test the API for loaded images, such that the data returned - # from img.get_data() is not affected by subsequent changes to - # the header. + # from np.asanyarray(img.dataobj) and img,get_fdata() are not + # affected by subsequent changes to the header. IC = self.image_class # save an image to a file map fm = IC.make_file_map() @@ -734,20 +722,21 @@ def test_data_hdr_cache(self): img = IC(data, affine, hdr) img.to_file_map(fm) img2 = IC.from_file_map(fm) - assert_equal(img2.shape, shape) - assert_equal(img2.get_data_dtype().type, np.int16) + assert img2.shape == shape + assert img2.get_data_dtype().type == np.int16 hdr = img2.header hdr.set_data_shape((3, 2, 2)) - assert_equal(hdr.get_data_shape(), (3, 2, 2)) + assert hdr.get_data_shape() == (3, 2, 2) hdr.set_data_dtype(np.uint8) - assert_equal(hdr.get_data_dtype(), np.dtype(np.uint8)) - assert_array_equal(img2.get_data(), data) + assert hdr.get_data_dtype() == np.dtype(np.uint8) + assert_array_equal(img2.get_fdata(), data) + assert_array_equal(np.asanyarray(img2.dataobj), data) # now check read_img_data function - here we do see the changed # header sc_data = read_img_data(img2) - assert_equal(sc_data.shape, (3, 2, 2)) + assert sc_data.shape == (3, 2, 2) us_data = read_img_data(img2, prefer='unscaled') - assert_equal(us_data.shape, (3, 2, 2)) + assert us_data.shape == (3, 2, 2) def test_affine_44(self): IC = self.image_class @@ -761,7 +750,8 @@ def test_affine_44(self): img = IC(data, affine.tolist()) assert_array_equal(affine, img.affine) # Not OK - affine wrong shape - assert_raises(ValueError, IC, data, np.diag([2, 3, 4])) + with pytest.raises(ValueError): + IC(data, np.diag([2, 3, 4])) def test_offset_to_zero(self): # Check offset is always set to zero when creating images @@ -769,24 +759,24 @@ def test_offset_to_zero(self): arr = np.arange(24, dtype=np.int16).reshape((2, 3, 4)) aff = np.eye(4) img = img_klass(arr, aff) - assert_equal(img.header.get_data_offset(), 0) + assert img.header.get_data_offset() == 0 # Save to BytesIO object(s), make sure offset still zero bytes_map = bytesio_filemap(img_klass) img.to_file_map(bytes_map) - assert_equal(img.header.get_data_offset(), 0) + assert img.header.get_data_offset() == 0 # Set offset in in-memory image big_off = 1024 img.header.set_data_offset(big_off) - assert_equal(img.header.get_data_offset(), big_off) + assert img.header.get_data_offset() == big_off # Offset is in proxy but not in image after saving to fileobj img_rt = bytesio_round_trip(img) - assert_equal(img_rt.dataobj.offset, big_off) - assert_equal(img_rt.header.get_data_offset(), 0) + assert img_rt.dataobj.offset == big_off + assert img_rt.header.get_data_offset() == 0 # The original header still has the big_off value img.header.set_data_offset(big_off) # Making a new image with this header resets to zero img_again = img_klass(arr, aff, img.header) - assert_equal(img_again.header.get_data_offset(), 0) + assert img_again.header.get_data_offset() == 0 def test_big_offset_exts(self): # Check writing offset beyond data works for different file extensions @@ -794,9 +784,7 @@ def test_big_offset_exts(self): arr = np.arange(24, dtype=np.int16).reshape((2, 3, 4)) aff = np.eye(4) img_ext = img_klass.files_types[0][1] - compressed_exts = ['', '.gz'] - if not self.bad_bz2: - compressed_exts.append('.bz2') + compressed_exts = ['', '.gz', '.bz2'] with InTemporaryDirectory(): for offset in (0, 2048): # Set offset in in-memory image @@ -834,7 +822,7 @@ def test_header_updating(self): hdr_back = img.from_file_map(img.file_map).header assert_array_equal(hdr.get_zooms(), (9, 3, 4)) # Modify data in-place? Update on save - data = img.get_data() + data = img.get_fdata() data.shape = (3, 2, 4) img.to_file_map() img_back = img.from_file_map(img.file_map) @@ -847,8 +835,8 @@ def test_pickle(self): img = img_klass(np.zeros((2, 3, 4)), None) img_str = pickle.dumps(img) img2 = pickle.loads(img_str) - assert_array_equal(img.get_data(), img2.get_data()) - assert_equal(img.header, img2.header) + assert_array_equal(img.get_fdata(), img2.get_fdata()) + assert img.header == img2.header # Save / reload using bytes IO objects for key, value in img.file_map.items(): value.fileobj = BytesIO() @@ -856,7 +844,7 @@ def test_pickle(self): img_prox = img.from_file_map(img.file_map) img_str = pickle.dumps(img_prox) img2_prox = pickle.loads(img_str) - assert_array_equal(img.get_data(), img2_prox.get_data()) + assert_array_equal(img.get_fdata(), img2_prox.get_fdata()) def test_no_finite_values(self): # save of data with no finite values to int type raises error if we have @@ -867,10 +855,11 @@ def test_no_finite_values(self): data[:, 2] = -np.inf img = self.image_class(data, None) img.set_data_dtype(np.int16) - assert_equal(img.get_data_dtype(), np.dtype(np.int16)) + assert img.get_data_dtype() == np.dtype(np.int16) fm = bytesio_filemap(img) if not img.header.has_data_slope: - assert_raises(WriterError, img.to_file_map, fm) + with pytest.raises(WriterError): + img.to_file_map(fm) return img.to_file_map(fm) img_back = self.image_class.from_file_map(fm) @@ -882,4 +871,5 @@ def test_unsupported(): data = np.arange(24, dtype=np.int32).reshape((2, 3, 4)) affine = np.eye(4) data = np.arange(24, dtype=np.uint32).reshape((2, 3, 4)) - assert_raises(HeaderDataError, AnalyzeImage, data, affine) + with pytest.raises(HeaderDataError): + AnalyzeImage(data, affine) diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index affa89d3e3..a4d23aaefd 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -1,10 +1,6 @@ """ Metaclass and class for validating instance APIs """ -from __future__ import division, print_function, absolute_import -from six import with_metaclass - -from nose.tools import assert_equal class validator2test(type): @@ -22,7 +18,7 @@ def meth(self): for imaker, params in self.obj_params(): validator(self, imaker, params) meth.__name__ = 'test_' + name[len('validate_'):] - meth.__doc__ = 'autogenerated test from ' + name + meth.__doc__ = 'autogenerated test from {}.{}'.format(klass.__name__, name) return meth for name in dir(klass): if not name.startswith('validate_'): @@ -33,7 +29,7 @@ def meth(self): return klass -class ValidateAPI(with_metaclass(validator2test)): +class ValidateAPI(metaclass=validator2test): """ A class to validate APIs Your job is twofold: @@ -82,8 +78,8 @@ def validate_something(self, obj, params): The metaclass sets up a ``test_something`` function that runs these checks on each ( """ - assert_equal(obj.var, params['var']) - assert_equal(obj.get_var(), params['var']) + assert obj.var == params['var'] + assert obj.get_var() == params['var'] class TestRunAllTests(ValidateAPI): @@ -105,4 +101,4 @@ def validate_second(self, obj, param): def teardown(): # Check that both validate_xxx tests got run - assert_equal(TestRunAllTests.run_tests, ['first', 'second']) + assert TestRunAllTests.run_tests == ['first', 'second'] diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 187d5940df..2a509acb88 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """ Tests for arrayproxy module """ -from __future__ import division, print_function, absolute_import import warnings import gzip @@ -24,12 +23,11 @@ from ..openers import ImageOpener from ..nifti1 import Nifti1Header -import mock +from unittest import mock from numpy.testing import assert_array_equal, assert_array_almost_equal -from nose.tools import (assert_true, assert_false, assert_equal, - assert_not_equal, assert_raises) -from nibabel.testing import memmap_after_ufunc +import pytest +from ..testing import memmap_after_ufunc from .test_fileslice import slicer_samples from .test_openers import patch_indexed_gzip @@ -71,15 +69,16 @@ def test_init(): bio.write(arr.tostring(order='F')) hdr = FunkyHeader(shape) ap = ArrayProxy(bio, hdr) - assert_true(ap.file_like is bio) - assert_equal(ap.shape, shape) + assert ap.file_like is bio + assert ap.shape == shape # shape should be read only - assert_raises(AttributeError, setattr, ap, 'shape', shape) + with pytest.raises(AttributeError): + ap.shape = shape # Get the data assert_array_equal(np.asarray(ap), arr) # Check we can modify the original header without changing the ap version hdr.shape[0] = 6 - assert_not_equal(ap.shape, shape) + assert ap.shape != shape # Data stays the same, also assert_array_equal(np.asarray(ap), arr) # C order also possible @@ -89,7 +88,8 @@ def test_init(): ap = CArrayProxy(bio, FunkyHeader((2, 3, 4))) assert_array_equal(np.asarray(ap), arr) # Illegal init - assert_raises(TypeError, ArrayProxy, bio, object()) + with pytest.raises(TypeError): + ArrayProxy(bio, object()) def test_tuplespec(): @@ -107,22 +107,22 @@ def test_tuplespec(): ap_tuple = ArrayProxy(bio, tuple_spec) # Header and tuple specs produce identical behavior for prop in ('shape', 'dtype', 'offset', 'slope', 'inter', 'is_proxy'): - assert_equal(getattr(ap_header, prop), getattr(ap_tuple, prop)) + assert getattr(ap_header, prop) == getattr(ap_tuple, prop) for method, args in (('get_unscaled', ()), ('__array__', ()), ('__getitem__', ((0, 2, 1), )) ): assert_array_equal(getattr(ap_header, method)(*args), getattr(ap_tuple, method)(*args)) - # Tuple-defined ArrayProxies have no header to store - with warnings.catch_warnings(): - assert_true(ap_tuple.header is None) # Partial tuples of length 2-4 are also valid for n in range(2, 5): ArrayProxy(bio, tuple_spec[:n]) # Bad tuple lengths - assert_raises(TypeError, ArrayProxy, bio, ()) - assert_raises(TypeError, ArrayProxy, bio, tuple_spec[:1]) - assert_raises(TypeError, ArrayProxy, bio, tuple_spec + ('error',)) + with pytest.raises(TypeError): + ArrayProxy(bio, ()) + with pytest.raises(TypeError): + ArrayProxy(bio, tuple_spec[:1]) + with pytest.raises(TypeError): + ArrayProxy(bio, tuple_spec + ('error',)) def write_raw_data(arr, hdr, fileobj): @@ -140,12 +140,8 @@ def test_nifti1_init(): write_raw_data(arr, hdr, bio) hdr.set_slope_inter(2, 10) ap = ArrayProxy(bio, hdr) - assert_true(ap.file_like == bio) - assert_equal(ap.shape, shape) - # Check there has been a copy of the header - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - assert_false(ap.header is hdr) + assert ap.file_like == bio + assert ap.shape == shape # Get the data assert_array_equal(np.asarray(ap), arr * 2.0 + 10) with InTemporaryDirectory(): @@ -153,8 +149,8 @@ def test_nifti1_init(): write_raw_data(arr, hdr, f) f.close() ap = ArrayProxy('test.nii', hdr) - assert_true(ap.file_like == 'test.nii') - assert_equal(ap.shape, shape) + assert ap.file_like == 'test.nii' + assert ap.shape == shape assert_array_equal(np.asarray(ap), arr * 2.0 + 10) @@ -190,14 +186,14 @@ def test_is_proxy(): hdr = FunkyHeader((2, 3, 4)) bio = BytesIO() prox = ArrayProxy(bio, hdr) - assert_true(is_proxy(prox)) - assert_false(is_proxy(bio)) - assert_false(is_proxy(hdr)) - assert_false(is_proxy(np.zeros((2, 3, 4)))) + assert is_proxy(prox) + assert not is_proxy(bio) + assert not is_proxy(hdr) + assert not is_proxy(np.zeros((2, 3, 4))) class NP(object): is_proxy = False - assert_false(is_proxy(NP())) + assert not is_proxy(NP()) def test_reshape_dataobj(): @@ -211,11 +207,11 @@ def test_reshape_dataobj(): assert_array_equal(prox, arr) assert_array_equal(reshape_dataobj(prox, (2, 3, 4)), np.reshape(arr, (2, 3, 4))) - assert_equal(prox.shape, shape) - assert_equal(arr.shape, shape) + assert prox.shape == shape + assert arr.shape == shape assert_array_equal(reshape_dataobj(arr, (2, 3, 4)), np.reshape(arr, (2, 3, 4))) - assert_equal(arr.shape, shape) + assert arr.shape == shape class ArrGiver(object): @@ -224,7 +220,7 @@ def __array__(self): assert_array_equal(reshape_dataobj(ArrGiver(), (2, 3, 4)), np.reshape(arr, (2, 3, 4))) - assert_equal(arr.shape, shape) + assert arr.shape == shape def test_reshaped_is_proxy(): @@ -232,13 +228,16 @@ def test_reshaped_is_proxy(): hdr = FunkyHeader(shape) bio = BytesIO() prox = ArrayProxy(bio, hdr) - assert_true(isinstance(prox.reshape((2, 3, 4)), ArrayProxy)) + assert isinstance(prox.reshape((2, 3, 4)), ArrayProxy) minus1 = prox.reshape((2, -1, 4)) - assert_true(isinstance(minus1, ArrayProxy)) - assert_equal(minus1.shape, (2, 3, 4)) - assert_raises(ValueError, prox.reshape, (-1, -1, 4)) - assert_raises(ValueError, prox.reshape, (2, 3, 5)) - assert_raises(ValueError, prox.reshape, (2, -1, 5)) + assert isinstance(minus1, ArrayProxy) + assert minus1.shape == (2, 3, 4) + with pytest.raises(ValueError): + prox.reshape((-1, -1, 4)) + with pytest.raises(ValueError): + prox.reshape((2, 3, 5)) + with pytest.raises(ValueError): + prox.reshape((2, -1, 5)) def test_get_unscaled(): @@ -321,21 +320,22 @@ def check_mmap(hdr, offset, proxy_class, unscaled_is_mmap = isinstance(unscaled, np.memmap) back_is_mmap = isinstance(back_data, np.memmap) if expected_mode is None: - assert_false(unscaled_is_mmap) - assert_false(back_is_mmap) + assert not unscaled_is_mmap + assert not back_is_mmap else: - assert_equal(unscaled_is_mmap, - viral_memmap or unscaled_really_mmap) - assert_equal(back_is_mmap, - viral_memmap or scaled_really_mmap) + assert unscaled_is_mmap == (viral_memmap or unscaled_really_mmap) + assert back_is_mmap == (viral_memmap or scaled_really_mmap) if scaled_really_mmap: - assert_equal(back_data.mode, expected_mode) + assert back_data.mode == expected_mode del prox, back_data # Check that mmap is keyword-only - assert_raises(TypeError, proxy_class, fname, hdr, True) + with pytest.raises(TypeError): + proxy_class(fname, hdr, True) # Check invalid values raise error - assert_raises(ValueError, proxy_class, fname, hdr, mmap='rw') - assert_raises(ValueError, proxy_class, fname, hdr, mmap='r+') + with pytest.raises(ValueError): + proxy_class(fname, hdr, mmap='rw') + with pytest.raises(ValueError): + proxy_class(fname, hdr, mmap='r+') # An image opener class which counts how many instances of itself have been @@ -373,8 +373,6 @@ def test_keep_file_open_true_false_invalid(): # False | True | True | True # True | False | True | n/a # True | True | True | False - # 'auto' | False | False | n/a - # 'auto' | True | True | False # # Each test tuple contains: # - file type - gzipped ('gz') or not ('bin'), or an open file handle @@ -389,26 +387,18 @@ def test_keep_file_open_true_false_invalid(): ('open', False, True, False, False), ('open', True, False, False, False), ('open', True, True, False, False), - ('open', 'auto', False, False, False), - ('open', 'auto', True, False, False), # non-gzip file - have_igzip is irrelevant, decision should be made # solely from kfo flag ('bin', False, False, False, False), ('bin', False, True, False, False), ('bin', True, False, True, True), ('bin', True, True, True, True), - ('bin', 'auto', False, False, False), - ('bin', 'auto', True, False, False), - # gzip file. If igzip is present, we persist the ImageOpener. If kfo - # is 'auto': - # - if igzip is present, kfo -> True - # - otherwise, kfo -> False + # gzip file. If igzip is present, we persist the ImageOpener. ('gz', False, False, False, False), ('gz', False, True, True, False), ('gz', True, False, True, True), ('gz', True, True, True, True), - ('gz', 'auto', False, False, False), - ('gz', 'auto', True, True, True)] + ] dtype = np.float32 data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) @@ -478,12 +468,14 @@ def test_keep_file_open_true_false_invalid(): fname = 'testdata' with open(fname, 'wb') as fobj: fobj.write(data.tostring(order='F')) - with assert_raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=55) - with assert_raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='autob') - with assert_raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open='cauto') + + for invalid_kfo in (55, 'auto', 'cauto'): + with pytest.raises(ValueError): + ArrayProxy(fname, ((10, 10, 10), dtype), + keep_file_open=invalid_kfo) + with patch_keep_file_open_default(invalid_kfo): + with pytest.raises(ValueError): + ArrayProxy(fname, ((10, 10, 10), dtype)) def test_pickle_lock(): diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index b4a3a48e93..9268c3fe36 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -2,11 +2,8 @@ See docstring of :mod:`nibabel.arraywriters` for API. """ -from __future__ import division, print_function, absolute_import -import sys from platform import python_compiler, machine -from distutils.version import LooseVersion import itertools import numpy as np @@ -17,11 +14,8 @@ from ..casting import int_abs, type_info, shared_range, on_powerpc from ..volumeutils import array_from_file, apply_read_scaling, _dt_min_max -from numpy.testing import (assert_array_almost_equal, - assert_array_equal) -from nose.tools import (assert_true, assert_false, - assert_equal, assert_not_equal, - assert_raises) +from numpy.testing import assert_array_almost_equal, assert_array_equal +import pytest from ..testing import (assert_allclose_safely, suppress_warnings, error_warnings) @@ -34,8 +28,6 @@ IUINT_TYPES = INT_TYPES + UINT_TYPES NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES -NP_VERSION = LooseVersion(np.__version__) - def round_trip(writer, order='F', apply_scale=True): sio = BytesIO() @@ -61,34 +53,19 @@ def test_arraywriters(): for type in test_types: arr = np.arange(10, dtype=type) aw = klass(arr) - assert_true(aw.array is arr) - assert_equal(aw.out_dtype, arr.dtype) + assert aw.array is arr + assert aw.out_dtype == arr.dtype assert_array_equal(arr, round_trip(aw)) # Byteswapped should be OK bs_arr = arr.byteswap().newbyteorder('S') - # Except on some numpies for complex256, where the array does not - # equal itself - if not np.all(bs_arr == arr): - assert_true(NP_VERSION <= LooseVersion('1.7.0')) - assert_true(on_powerpc()) - assert_true(type == np.complex256) - else: - bs_aw = klass(bs_arr) - bs_aw_rt = round_trip(bs_aw) - # On Ubuntu 13.04 with python 3.3 __eq__ comparison on - # arrays with complex numbers fails here for some - # reason -- not our fault, and to test correct operation we - # will just compare element by element - if NP_VERSION == '1.7.1' and sys.version_info[:2] == (3, 3): - assert_array_equal_ = lambda x, y: np.all([x_ == y_ for x_, y_ in zip(x, y)]) - else: - assert_array_equal_ = assert_array_equal - # assert against original array because POWER7 was running into - # trouble using the byteswapped array (bs_arr) - assert_array_equal_(arr, bs_aw_rt) - bs_aw2 = klass(bs_arr, arr.dtype) - bs_aw2_rt = round_trip(bs_aw2) - assert_array_equal(arr, bs_aw2_rt) + bs_aw = klass(bs_arr) + bs_aw_rt = round_trip(bs_aw) + # assert against original array because POWER7 was running into + # trouble using the byteswapped array (bs_arr) + assert_array_equal(arr, bs_aw_rt) + bs_aw2 = klass(bs_arr, arr.dtype) + bs_aw2_rt = round_trip(bs_aw2) + assert_array_equal(arr, bs_aw2_rt) # 2D array arr2 = np.reshape(arr, (2, 5)) a2w = klass(arr2) @@ -100,7 +77,7 @@ def test_arraywriters(): # C order works as well arr_back = round_trip(a2w, 'C') assert_array_equal(arr2, arr_back) - assert_true(arr_back.flags.c_contiguous) + assert arr_back.flags.c_contiguous def test_arraywriter_check_scaling(): @@ -109,14 +86,17 @@ def test_arraywriter_check_scaling(): arr = np.array([0, 1, 128, 255], np.uint8) aw = ArrayWriter(arr) # Out of range, scaling needed, default is error - assert_raises(WriterError, ArrayWriter, arr, np.int8) + with pytest.raises(WriterError): + ArrayWriter(arr, np.int8) # Make default explicit - assert_raises(WriterError, ArrayWriter, arr, np.int8, check_scaling=True) + with pytest.raises(WriterError): + ArrayWriter(arr, np.int8, check_scaling=True) # Turn off scaling check aw = ArrayWriter(arr, np.int8, check_scaling=False) assert_array_equal(round_trip(aw), np.clip(arr, 0, 127)) # Has to be keyword - assert_raises(TypeError, ArrayWriter, arr, np.int8, False) + with pytest.raises(TypeError): + ArrayWriter(arr, np.int8, False) def test_no_scaling(): @@ -174,39 +154,42 @@ def test_scaling_needed(): dt_def = [('f', 'i4')] arr = np.ones(10, dt_def) for t in NUMERIC_TYPES: - assert_raises(WriterError, ArrayWriter, arr, t) + with pytest.raises(WriterError): + ArrayWriter(arr, t) narr = np.ones(10, t) - assert_raises(WriterError, ArrayWriter, narr, dt_def) - assert_false(ArrayWriter(arr).scaling_needed()) - assert_false(ArrayWriter(arr, dt_def).scaling_needed()) + with pytest.raises(WriterError): + ArrayWriter(narr, dt_def) + assert not ArrayWriter(arr).scaling_needed() + assert not ArrayWriter(arr, dt_def).scaling_needed() # Any numeric type that can cast, needs no scaling for in_t in NUMERIC_TYPES: for out_t in NUMERIC_TYPES: if np.can_cast(in_t, out_t): aw = ArrayWriter(np.ones(10, in_t), out_t) - assert_false(aw.scaling_needed()) + assert not aw.scaling_needed() for in_t in NUMERIC_TYPES: # Numeric types to complex never need scaling arr = np.ones(10, in_t) for out_t in COMPLEX_TYPES: - assert_false(ArrayWriter(arr, out_t).scaling_needed()) + assert not ArrayWriter(arr, out_t).scaling_needed() # Attempts to scale from complex to anything else fails for in_t in COMPLEX_TYPES: for out_t in FLOAT_TYPES + IUINT_TYPES: arr = np.ones(10, in_t) - assert_raises(WriterError, ArrayWriter, arr, out_t) + with pytest.raises(WriterError): + ArrayWriter(arr, out_t) # Scaling from anything but complex to floats is OK for in_t in FLOAT_TYPES + IUINT_TYPES: arr = np.ones(10, in_t) for out_t in FLOAT_TYPES: - assert_false(ArrayWriter(arr, out_t).scaling_needed()) + assert not ArrayWriter(arr, out_t).scaling_needed() # For any other output type, arrays with no data don't need scaling for in_t in FLOAT_TYPES + IUINT_TYPES: arr_0 = np.zeros(10, in_t) arr_e = [] for out_t in IUINT_TYPES: - assert_false(ArrayWriter(arr_0, out_t).scaling_needed()) - assert_false(ArrayWriter(arr_e, out_t).scaling_needed()) + assert not ArrayWriter(arr_0, out_t).scaling_needed() + assert not ArrayWriter(arr_e, out_t).scaling_needed() # Going to (u)ints, non-finite arrays don't need scaling for writers that # can do scaling because these use finite_range to threshold the input data, # but ArrayWriter does not do this. so scaling_needed is True @@ -217,17 +200,16 @@ def test_scaling_needed(): arr_mix = np.array([np.nan, np.inf, -np.inf], dtype=in_t) for out_t in IUINT_TYPES: for arr in (arr_nan, arr_inf, arr_minf, arr_mix): - assert_true( - ArrayWriter(arr, out_t, check_scaling=False).scaling_needed()) - assert_false(SlopeArrayWriter(arr, out_t).scaling_needed()) - assert_false(SlopeInterArrayWriter(arr, out_t).scaling_needed()) + assert ArrayWriter(arr, out_t, check_scaling=False).scaling_needed() + assert not SlopeArrayWriter(arr, out_t).scaling_needed() + assert not SlopeInterArrayWriter(arr, out_t).scaling_needed() # Floats as input always need scaling for in_t in FLOAT_TYPES: arr = np.ones(10, in_t) for out_t in IUINT_TYPES: # We need an arraywriter that will tolerate construction when # scaling is needed - assert_true(SlopeArrayWriter(arr, out_t).scaling_needed()) + assert SlopeArrayWriter(arr, out_t).scaling_needed() # in-range (u)ints don't need scaling for in_t in IUINT_TYPES: in_info = np.iinfo(in_t) @@ -237,18 +219,18 @@ def test_scaling_needed(): out_min, out_max = out_info.min, out_info.max if in_min >= out_min and in_max <= out_max: arr = np.array([in_min, in_max], in_t) - assert_true(np.can_cast(arr.dtype, out_t)) + assert np.can_cast(arr.dtype, out_t) # We've already tested this with can_cast above, but... - assert_false(ArrayWriter(arr, out_t).scaling_needed()) + assert not ArrayWriter(arr, out_t).scaling_needed() continue # The output data type does not include the input data range max_min = max(in_min, out_min) # 0 for input or output uint min_max = min(in_max, out_max) arr = np.array([max_min, min_max], in_t) - assert_false(ArrayWriter(arr, out_t).scaling_needed()) - assert_true(SlopeInterArrayWriter(arr + 1, out_t).scaling_needed()) + assert not ArrayWriter(arr, out_t).scaling_needed() + assert SlopeInterArrayWriter(arr + 1, out_t).scaling_needed() if in_t in INT_TYPES: - assert_true(SlopeInterArrayWriter(arr - 1, out_t).scaling_needed()) + assert SlopeInterArrayWriter(arr - 1, out_t).scaling_needed() def test_special_rt(): @@ -259,14 +241,15 @@ def test_special_rt(): for in_dtt in FLOAT_TYPES: for out_dtt in IUINT_TYPES: in_arr = arr.astype(in_dtt) - assert_raises(WriterError, ArrayWriter, in_arr, out_dtt) + with pytest.raises(WriterError): + ArrayWriter(in_arr, out_dtt) aw = ArrayWriter(in_arr, out_dtt, check_scaling=False) mn, mx = shared_range(float, out_dtt) - assert_true(np.allclose(round_trip(aw).astype(float), - [mx, 0, mn])) + assert np.allclose(round_trip(aw).astype(float), + [mx, 0, mn]) for klass in (SlopeArrayWriter, SlopeInterArrayWriter): aw = klass(in_arr, out_dtt) - assert_equal(get_slope_inter(aw), (1, 0)) + assert get_slope_inter(aw) == (1, 0) assert_array_equal(round_trip(aw), 0) for in_dtt, out_dtt, awt in itertools.product( FLOAT_TYPES, @@ -274,7 +257,7 @@ def test_special_rt(): (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)): arr = np.zeros((3,), dtype=in_dtt) aw = awt(arr, out_dtt) - assert_equal(get_slope_inter(aw), (1, 0)) + assert get_slope_inter(aw) == (1, 0) assert_array_equal(round_trip(aw), 0) @@ -285,7 +268,7 @@ def test_high_int2uint(): arr = np.array([2**63], dtype=np.uint64) out_type = np.int64 aw = SlopeInterArrayWriter(arr, out_type) - assert_equal(aw.inter, 2**63) + assert aw.inter == 2**63 def test_slope_inter_castable(): @@ -302,7 +285,8 @@ def test_slope_inter_castable(): for in_dtt in FLOAT_TYPES: for out_dtt in IUINT_TYPES: in_arr = arr.astype(in_dtt) - assert_raises(WriterError, ArrayWriter, in_arr, out_dtt) + with pytest.raises(WriterError): + ArrayWriter(in_arr, out_dtt) aw = SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error aw = SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error for in_dtt, out_dtt, arr, slope_only, slope_inter, neither in ( @@ -333,17 +317,20 @@ def test_slope_inter_castable(): if slope_only: SlopeArrayWriter(data, out_dtt) else: - assert_raises(WriterError, SlopeArrayWriter, data, out_dtt) + with pytest.raises(WriterError): + SlopeArrayWriter(data, out_dtt) # With scaling and intercept if slope_inter: SlopeInterArrayWriter(data, out_dtt) else: - assert_raises(WriterError, SlopeInterArrayWriter, data, out_dtt) + with pytest.raises(WriterError): + SlopeInterArrayWriter(data, out_dtt) # With neither if neither: ArrayWriter(data, out_dtt) else: - assert_raises(WriterError, ArrayWriter, data, out_dtt) + with pytest.raises(WriterError): + ArrayWriter(data, out_dtt) def test_calculate_scale(): @@ -353,27 +340,28 @@ def test_calculate_scale(): SAW = SlopeArrayWriter # Offset handles scaling when it can aw = SIAW(npa([-2, -1], dtype=np.int8), np.uint8) - assert_equal(get_slope_inter(aw), (1.0, -2.0)) + assert get_slope_inter(aw) == (1.0, -2.0) # Sign flip handles these cases aw = SAW(npa([-2, -1], dtype=np.int8), np.uint8) - assert_equal(get_slope_inter(aw), (-1.0, 0.0)) + assert get_slope_inter(aw) == (-1.0, 0.0) aw = SAW(npa([-2, 0], dtype=np.int8), np.uint8) - assert_equal(get_slope_inter(aw), (-1.0, 0.0)) + assert get_slope_inter(aw) == (-1.0, 0.0) # But not when min magnitude is too large (scaling mechanism kicks in) aw = SAW(npa([-510, 0], dtype=np.int16), np.uint8) - assert_equal(get_slope_inter(aw), (-2.0, 0.0)) + assert get_slope_inter(aw) == (-2.0, 0.0) # Or for floats (attempts to expand across range) aw = SAW(npa([-2, 0], dtype=np.float32), np.uint8) - assert_not_equal(get_slope_inter(aw), (-1.0, 0.0)) + assert get_slope_inter(aw) != (-1.0, 0.0) # Case where offset handles scaling aw = SIAW(npa([-1, 1], dtype=np.int8), np.uint8) - assert_equal(get_slope_inter(aw), (1.0, -1.0)) + assert get_slope_inter(aw) == (1.0, -1.0) # Can't work for no offset case - assert_raises(WriterError, SAW, npa([-1, 1], dtype=np.int8), np.uint8) + with pytest.raises(WriterError): + SAW(npa([-1, 1], dtype=np.int8), np.uint8) # Offset trick can't work when max is out of range aw = SIAW(npa([-1, 255], dtype=np.int16), np.uint8) slope_inter = get_slope_inter(aw) - assert_not_equal(slope_inter, (1.0, -1.0)) + assert slope_inter != (1.0, -1.0) def test_resets(): @@ -411,11 +399,11 @@ def test_no_offset_scale(): (126, 127), (-127, 127)): aw = SAW(np.array(data, dtype=np.float32), np.int8) - assert_equal(aw.slope, 1.0) + assert aw.slope == 1.0 aw = SAW(np.array([-126, 127 * 2.0], dtype=np.float32), np.int8) - assert_equal(aw.slope, 2) + assert aw.slope == 2 aw = SAW(np.array([-128 * 2.0, 127], dtype=np.float32), np.int8) - assert_equal(aw.slope, 2) + assert aw.slope == 2 # Test that nasty abs behavior does not upset us n = -2**15 aw = SAW(np.array([n, n], dtype=np.int16), np.uint8) @@ -426,17 +414,17 @@ def test_with_offset_scale(): # Tests of specific cases in slope, inter SIAW = SlopeInterArrayWriter aw = SIAW(np.array([0, 127], dtype=np.int8), np.uint8) - assert_equal((aw.slope, aw.inter), (1, 0)) # in range + assert (aw.slope, aw.inter) == (1, 0) # in range aw = SIAW(np.array([-1, 126], dtype=np.int8), np.uint8) - assert_equal((aw.slope, aw.inter), (1, -1)) # offset only + assert (aw.slope, aw.inter) == (1, -1) # offset only aw = SIAW(np.array([-1, 254], dtype=np.int16), np.uint8) - assert_equal((aw.slope, aw.inter), (1, -1)) # offset only + assert (aw.slope, aw.inter) == (1, -1) # offset only aw = SIAW(np.array([-1, 255], dtype=np.int16), np.uint8) - assert_not_equal((aw.slope, aw.inter), (1, -1)) # Too big for offset only + assert (aw.slope, aw.inter) != (1, -1) # Too big for offset only aw = SIAW(np.array([-256, -2], dtype=np.int16), np.uint8) - assert_equal((aw.slope, aw.inter), (1, -256)) # offset only + assert (aw.slope, aw.inter) == (1, -256) # offset only aw = SIAW(np.array([-256, -2], dtype=np.int16), np.int8) - assert_equal((aw.slope, aw.inter), (1, -129)) # offset only + assert (aw.slope, aw.inter) == (1, -129) # offset only def test_io_scaling(): @@ -470,10 +458,10 @@ def test_io_scaling(): # Slope might be negative max_miss = np.abs(aw.slope) / 2. abs_err = np.abs(arr - arr3) - assert_true(np.all(abs_err <= max_miss)) + assert np.all(abs_err <= max_miss) if out_type in UINT_TYPES and 0 in (min(arr), max(arr)): # Check that error is minimized for 0 as min or max - assert_true(min(abs_err) == abs_err[arr == 0]) + assert min(abs_err) == abs_err[arr == 0] bio.truncate(0) bio.seek(0) @@ -496,10 +484,10 @@ def test_input_ranges(): max_miss = np.abs(aw.slope) / working_type(2.) + work_eps * 10 abs_err = np.abs(arr - arr3) max_err = np.abs(arr) * work_eps + max_miss - assert_true(np.all(abs_err <= max_err)) + assert np.all(abs_err <= max_err) if out_type in UINT_TYPES and 0 in (min(arr), max(arr)): # Check that error is minimized for 0 as min or max - assert_true(min(abs_err) == abs_err[arr == 0]) + assert min(abs_err) == abs_err[arr == 0] bio.truncate(0) bio.seek(0) @@ -520,12 +508,13 @@ def test_nan2zero(): assert_array_equal(np.isnan(data_back), [True, False]) # Deprecation warning for nan2zero as argument to `to_fileobj` with error_warnings(): - assert_raises(DeprecationWarning, - aw.to_fileobj, BytesIO(), 'F', True) - assert_raises(DeprecationWarning, - aw.to_fileobj, BytesIO(), 'F', nan2zero=True) + with pytest.deprecated_call(): + aw.to_fileobj(BytesIO(), 'F', True) + with pytest.deprecated_call(): + aw.to_fileobj(BytesIO(), 'F', nan2zero=True) # Error if nan2zero is not the value set at initialization - assert_raises(WriterError, aw.to_fileobj, BytesIO(), 'F', False) + with pytest.raises(WriterError): + aw.to_fileobj(BytesIO(), 'F', False) # set explicitly aw = awt(arr, np.float32, nan2zero=True, **kwargs) data_back = round_trip(aw) @@ -541,12 +530,13 @@ def test_nan2zero(): assert_array_equal(data_back, [astype_res, 99]) # Deprecation warning for nan2zero as argument to `to_fileobj` with error_warnings(): - assert_raises(DeprecationWarning, - aw.to_fileobj, BytesIO(), 'F', False) - assert_raises(DeprecationWarning, - aw.to_fileobj, BytesIO(), 'F', nan2zero=False) + with pytest.deprecated_call(): + aw.to_fileobj(BytesIO(), 'F', False) + with pytest.deprecated_call(): + aw.to_fileobj(BytesIO(), 'F', nan2zero=False) # Error if nan2zero is not the value set at initialization - assert_raises(WriterError, aw.to_fileobj, BytesIO(), 'F', True) + with pytest.raises(WriterError): + aw.to_fileobj(BytesIO(), 'F', True) def test_byte_orders(): @@ -600,55 +590,62 @@ def test_to_float(): for klass in (SlopeInterArrayWriter, SlopeArrayWriter, ArrayWriter): if in_type in COMPLEX_TYPES and out_type in FLOAT_TYPES: - assert_raises(WriterError, klass, arr, out_type) + with pytest.raises(WriterError): + klass(arr, out_type) continue aw = klass(arr, out_type) - assert_true(aw.array is arr) - assert_equal(aw.out_dtype, out_type) + assert aw.array is arr + assert aw.out_dtype == out_type arr_back = round_trip(aw) assert_array_equal(arr.astype(out_type), arr_back) # Check too-big values overflowed correctly out_min, out_max = out_info['min'], out_info['max'] - assert_true(np.all(arr_back[arr > out_max] == np.inf)) - assert_true(np.all(arr_back[arr < out_min] == -np.inf)) + assert np.all(arr_back[arr > out_max] == np.inf) + assert np.all(arr_back[arr < out_min] == -np.inf) def test_dumber_writers(): arr = np.arange(10, dtype=np.float64) aw = SlopeArrayWriter(arr) aw.slope = 2.0 - assert_equal(aw.slope, 2.0) - assert_raises(AttributeError, getattr, aw, 'inter') + assert aw.slope == 2.0 + with pytest.raises(AttributeError): + aw.inter aw = ArrayWriter(arr) - assert_raises(AttributeError, getattr, aw, 'slope') - assert_raises(AttributeError, getattr, aw, 'inter') + with pytest.raises(AttributeError): + aw.slope + with pytest.raises(AttributeError): + aw.inter # Attempt at scaling should raise error for dumb type - assert_raises(WriterError, ArrayWriter, arr, np.int16) + with pytest.raises(WriterError): + ArrayWriter(arr, np.int16) def test_writer_maker(): arr = np.arange(10, dtype=np.float64) aw = make_array_writer(arr, np.float64) - assert_true(isinstance(aw, SlopeInterArrayWriter)) + assert isinstance(aw, SlopeInterArrayWriter) aw = make_array_writer(arr, np.float64, True, True) - assert_true(isinstance(aw, SlopeInterArrayWriter)) + assert isinstance(aw, SlopeInterArrayWriter) aw = make_array_writer(arr, np.float64, True, False) - assert_true(isinstance(aw, SlopeArrayWriter)) + assert isinstance(aw, SlopeArrayWriter) aw = make_array_writer(arr, np.float64, False, False) - assert_true(isinstance(aw, ArrayWriter)) - assert_raises(ValueError, make_array_writer, arr, np.float64, False) - assert_raises(ValueError, make_array_writer, arr, np.float64, False, True) + assert isinstance(aw, ArrayWriter) + with pytest.raises(ValueError): + make_array_writer(arr, np.float64, False) + with pytest.raises(ValueError): + make_array_writer(arr, np.float64, False, True) # Does calc_scale get run by default? aw = make_array_writer(arr, np.int16, calc_scale=False) - assert_equal((aw.slope, aw.inter), (1, 0)) + assert (aw.slope, aw.inter) == (1, 0) aw.calc_scale() slope, inter = aw.slope, aw.inter - assert_false((slope, inter) == (1, 0)) + assert not (slope, inter) == (1, 0) # Should run by default aw = make_array_writer(arr, np.int16) - assert_equal((aw.slope, aw.inter), (slope, inter)) + assert (aw.slope, aw.inter) == (slope, inter) aw = make_array_writer(arr, np.int16, calc_scale=True) - assert_equal((aw.slope, aw.inter), (slope, inter)) + assert (aw.slope, aw.inter) == (slope, inter) def test_float_int_min_max(): @@ -667,7 +664,7 @@ def test_float_int_min_max(): except ScalingError: continue arr_back_sc = round_trip(aw) - assert_true(np.allclose(arr, arr_back_sc)) + assert np.allclose(arr, arr_back_sc) def test_int_int_min_max(): @@ -686,7 +683,7 @@ def test_int_int_min_max(): # integer allclose adiff = int_abs(arr - arr_back_sc) rdiff = adiff / (arr + eps) - assert_true(np.all(rdiff < rtol)) + assert np.all(rdiff < rtol) def test_int_int_slope(): @@ -707,12 +704,12 @@ def test_int_int_slope(): aw = SlopeArrayWriter(arr, out_dt) except ScalingError: continue - assert_false(aw.slope == 0) + assert not aw.slope == 0 arr_back_sc = round_trip(aw) # integer allclose adiff = int_abs(arr - arr_back_sc) rdiff = adiff / (arr + eps) - assert_true(np.all(rdiff < rtol)) + assert np.all(rdiff < rtol) def test_float_int_spread(): @@ -732,7 +729,7 @@ def test_float_int_spread(): # Simulate allclose test with large atol diff = np.abs(arr_t - arr_back_sc) rdiff = diff / np.abs(arr_t) - assert_true(np.all((diff <= max_miss) | (rdiff <= 1e-5))) + assert np.all((diff <= max_miss) | (rdiff <= 1e-5)) def rt_err_estimate(arr_t, out_dtype, slope, inter): @@ -769,7 +766,7 @@ def test_rt_bias(): aw.inter) # Hokey use of max_miss as a std estimate bias_thresh = np.max([max_miss / np.sqrt(count), eps]) - assert_true(np.abs(bias) < bias_thresh) + assert np.abs(bias) < bias_thresh def test_nan2zero_scaling(): @@ -809,10 +806,10 @@ def test_nan2zero_scaling(): back_nan_0 = round_trip(nan_0_aw) * float(sign) zero_aw = awt(zero_arr, out_dt, nan2zero=True) back_zero = round_trip(zero_aw) * float(sign) - assert_true(np.allclose(back_nan[1:], back_zero[1:])) + assert np.allclose(back_nan[1:], back_zero[1:]) assert_array_equal(back_nan[1:], back_nan_0[2:]) - assert_true(np.abs(back_nan[0] - back_zero[0]) < 1e-2) - assert_equal(*back_nan_0[:2]) + assert np.abs(back_nan[0] - back_zero[0]) < 1e-2 + assert back_nan_0[0] == back_nan_0[1] def test_finite_range_nan(): @@ -854,11 +851,11 @@ def test_finite_range_nan(): continue # Should not matter about the order of finite range method call # and has_nan property - test this is true - assert_equal(aw.has_nan, has_nan) - assert_equal(aw.finite_range(), res) + assert aw.has_nan == has_nan + assert aw.finite_range() == res aw = awt(in_arr, out_type, **kwargs) - assert_equal(aw.finite_range(), res) - assert_equal(aw.has_nan, has_nan) + assert aw.finite_range() == res + assert aw.has_nan == has_nan # Check float types work as complex in_arr = np.array(in_arr) if in_arr.dtype.kind == 'f': @@ -868,10 +865,11 @@ def test_finite_range_nan(): except WriterError: continue aw = awt(c_arr, out_type, **kwargs) - assert_equal(aw.has_nan, has_nan) - assert_equal(aw.finite_range(), res) + assert aw.has_nan == has_nan + assert aw.finite_range() == res # Structured type cannot be nan and we can test this a = np.array([[1., 0, 1], [2, 3, 4]]).view([('f1', 'f')]) aw = awt(a, a.dtype, **kwargs) - assert_raises(TypeError, aw.finite_range) - assert_false(aw.has_nan) + with pytest.raises(TypeError): + aw.finite_range() + assert not aw.has_nan diff --git a/nibabel/tests/test_batteryrunners.py b/nibabel/tests/test_batteryrunners.py index 71cbbba072..69f18b75ac 100644 --- a/nibabel/tests/test_batteryrunners.py +++ b/nibabel/tests/test_batteryrunners.py @@ -9,15 +9,13 @@ ''' Tests for BatteryRunner and Report objects ''' -from six import StringIO +from io import StringIO import logging from ..batteryrunners import BatteryRunner, Report -from ..testing import (assert_true, assert_false, assert_equal, - assert_not_equal, assert_raises) - +import pytest # define some trivial functions as checks def chk1(obj, fix=False): @@ -80,64 +78,67 @@ def chk_error(obj, fix=False): def test_init_basic(): # With no args, raise - assert_raises(TypeError, BatteryRunner) + with pytest.raises(TypeError): + BatteryRunner() # Len returns number of checks battrun = BatteryRunner((chk1,)) - assert_equal(len(battrun), 1) + assert len(battrun) == 1 battrun = BatteryRunner((chk1, chk2)) - assert_equal(len(battrun), 2) + assert len(battrun) == 2 def test_init_report(): rep = Report() - assert_equal(rep, Report(Exception, 0, '', '')) + assert rep == Report(Exception, 0, '', '') def test_report_strings(): rep = Report() - assert_not_equal(rep.__str__(), '') - assert_equal(rep.message, '') + assert rep.__str__() != '' + assert rep.message == '' str_io = StringIO() rep.write_raise(str_io) - assert_equal(str_io.getvalue(), '') + assert str_io.getvalue() == '' rep = Report(ValueError, 20, 'msg', 'fix') rep.write_raise(str_io) - assert_equal(str_io.getvalue(), '') + assert str_io.getvalue() == '' rep.problem_level = 30 rep.write_raise(str_io) - assert_equal(str_io.getvalue(), 'Level 30: msg; fix\n') + assert str_io.getvalue() == 'Level 30: msg; fix\n' str_io.truncate(0) str_io.seek(0) # No fix string, no fix message rep.fix_msg = '' rep.write_raise(str_io) - assert_equal(str_io.getvalue(), 'Level 30: msg\n') + assert str_io.getvalue() == 'Level 30: msg\n' rep.fix_msg = 'fix' str_io.truncate(0) str_io.seek(0) # If we drop the level, nothing goes to the log rep.problem_level = 20 rep.write_raise(str_io) - assert_equal(str_io.getvalue(), '') + assert str_io.getvalue() == '' # Unless we set the default log level in the call rep.write_raise(str_io, log_level=20) - assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n') + assert str_io.getvalue() == 'Level 20: msg; fix\n' str_io.truncate(0) str_io.seek(0) # If we set the error level down this low, we raise an error - assert_raises(ValueError, rep.write_raise, str_io, 20) + with pytest.raises(ValueError): + rep.write_raise(str_io, 20) # But the log level wasn't low enough to do a log entry - assert_equal(str_io.getvalue(), '') + assert str_io.getvalue() == '' # Error still raised with lower log threshold, but now we do get a # log entry - assert_raises(ValueError, rep.write_raise, str_io, 20, 20) - assert_equal(str_io.getvalue(), 'Level 20: msg; fix\n') + with pytest.raises(ValueError): + rep.write_raise(str_io, 20, 20) + assert str_io.getvalue() == 'Level 20: msg; fix\n' # If there's no error, we can't raise str_io.truncate(0) str_io.seek(0) rep.error = None rep.write_raise(str_io, 20) - assert_equal(str_io.getvalue(), '') + assert str_io.getvalue() == '' def test_logging(): @@ -147,10 +148,10 @@ def test_logging(): logger.setLevel(30) # defaultish level logger.addHandler(logging.StreamHandler(str_io)) rep.log_raise(logger) - assert_equal(str_io.getvalue(), '') + assert str_io.getvalue() == '' rep.problem_level = 30 rep.log_raise(logger) - assert_equal(str_io.getvalue(), 'msg; fix\n') + assert str_io.getvalue() == 'msg; fix\n' str_io.truncate(0) str_io.seek(0) @@ -158,43 +159,19 @@ def test_logging(): def test_checks(): battrun = BatteryRunner((chk1,)) reports = battrun.check_only({}) - assert_equal(reports[0], - Report(KeyError, - 20, - 'no "testkey"', - '')) + assert reports[0] == Report(KeyError, 20, 'no "testkey"', '') obj, reports = battrun.check_fix({}) - assert_equal(reports[0], - Report(KeyError, - 20, - 'no "testkey"', - 'added "testkey"')) - assert_equal(obj, {'testkey': 1}) + assert reports[0] == Report(KeyError, 20, 'no "testkey"', 'added "testkey"') + assert obj == {'testkey': 1} battrun = BatteryRunner((chk1, chk2)) reports = battrun.check_only({}) - assert_equal(reports[0], - Report(KeyError, - 20, - 'no "testkey"', - '')) - assert_equal(reports[1], - Report(KeyError, - 20, - 'no "testkey"', - '')) + assert reports[0] == Report(KeyError, 20, 'no "testkey"', '') + assert reports[1] == Report(KeyError, 20, 'no "testkey"', '') obj, reports = battrun.check_fix({}) # In the case of fix, the previous fix exposes a different error # Note, because obj is mutable, first and second point to modified # (and final) dictionary output_obj = {'testkey': 0} - assert_equal(reports[0], - Report(KeyError, - 20, - 'no "testkey"', - 'added "testkey"')) - assert_equal(reports[1], - Report(ValueError, - 10, - '"testkey" != 0', - 'set "testkey" to 0')) - assert_equal(obj, output_obj) + assert reports[0] == Report(KeyError, 20, 'no "testkey"', 'added "testkey"') + assert reports[1] == Report(ValueError, 10, '"testkey" != 0', 'set "testkey" to 0') + assert obj == output_obj diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index c1632c06c2..45e149b93b 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -6,7 +6,6 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from __future__ import division, print_function, absolute_import from os.path import join as pjoin @@ -15,12 +14,11 @@ from .. import load, Nifti1Image from .. import brikhead -from nose.tools import (assert_true, assert_equal, assert_raises) +import pytest from numpy.testing import assert_array_equal -from ..testing import data_path +from ..testing import data_path, assert_data_similar from .test_fileslice import slicer_samples -from .test_helpers import assert_data_similar EXAMPLE_IMAGES = [ dict( @@ -81,10 +79,10 @@ def test_makehead(self): for tp in self.test_files: head1 = self.module.AFNIHeader.from_fileobj(tp['head']) head2 = self.module.AFNIHeader.from_header(head1) - assert_equal(head1, head2) - with assert_raises(self.module.AFNIHeaderError): + assert head1 == head2 + with pytest.raises(self.module.AFNIHeaderError): self.module.AFNIHeader.from_header(header=None) - with assert_raises(self.module.AFNIHeaderError): + with pytest.raises(self.module.AFNIHeaderError): self.module.AFNIHeader.from_header(tp['fname']) @@ -95,36 +93,36 @@ class TestAFNIImage(object): def test_brikheadfile(self): for tp in self.test_files: brik = self.module.load(tp['fname']) - assert_equal(brik.get_data_dtype().type, tp['dtype']) - assert_equal(brik.shape, tp['shape']) - assert_equal(brik.header.get_zooms(), tp['zooms']) + assert brik.get_data_dtype().type == tp['dtype'] + assert brik.shape == tp['shape'] + assert brik.header.get_zooms() == tp['zooms'] assert_array_equal(brik.affine, tp['affine']) - assert_equal(brik.header.get_space(), tp['space']) - data = brik.get_data() - assert_equal(data.shape, tp['shape']) + assert brik.header.get_space() == tp['space'] + data = brik.get_fdata() + assert data.shape == tp['shape'] assert_array_equal(brik.dataobj.scaling, tp['scaling']) - assert_equal(brik.header.get_volume_labels(), tp['labels']) + assert brik.header.get_volume_labels() == tp['labels'] def test_load(self): # Check highest level load of brikhead works for tp in self.test_files: img = self.module.load(tp['head']) - data = img.get_data() - assert_equal(data.shape, tp['shape']) + data = img.get_fdata() + assert data.shape == tp['shape'] # min, max, mean values assert_data_similar(data, tp) # check if file can be converted to nifti ni_img = Nifti1Image.from_image(img) assert_array_equal(ni_img.affine, tp['affine']) - assert_array_equal(ni_img.get_data(), data) + assert_array_equal(ni_img.get_fdata(), data) def test_array_proxy_slicing(self): # Test slicing of array proxy for tp in self.test_files: img = self.module.load(tp['fname']) - arr = img.get_data() + arr = img.get_fdata() prox = img.dataobj - assert_true(prox.is_proxy) + assert prox.is_proxy for sliceobj in slicer_samples(img.shape): assert_array_equal(arr[sliceobj], prox[sliceobj]) @@ -135,7 +133,7 @@ class TestBadFiles(object): def test_brikheadfile(self): for tp in self.test_files: - with assert_raises(tp['err']): + with pytest.raises(tp['err']): self.module.load(tp['head']) @@ -146,5 +144,5 @@ class TestBadVars(object): def test_unpack_var(self): for var in self.vars: - with assert_raises(self.module.AFNIHeaderError): + with pytest.raises(self.module.AFNIHeaderError): self.module._unpack_var(var) diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index c9d3645ad1..b8f56454b5 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -12,7 +12,7 @@ from numpy.testing import (assert_array_almost_equal, assert_array_equal) -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) +import pytest def test_shared_range(): @@ -35,7 +35,7 @@ def test_shared_range(): # not have an exact representation. fimax = int_to_float(imax, ft) if np.isfinite(fimax): - assert_true(int(fimax) != imax) + assert int(fimax) != imax # Therefore the imax, cast back to float, and to integer, will # overflow. If it overflows to the imax, we need to allow for # that possibility in the testing of our overflowed values @@ -43,13 +43,11 @@ def test_shared_range(): if imax_roundtrip == imax: thresh_overflow = True if thresh_overflow: - assert_true(np.all( - (bit_bigger == casted_mx) | - (bit_bigger == imax))) + assert np.all((bit_bigger == casted_mx) | (bit_bigger == imax)) else: - assert_true(np.all((bit_bigger <= casted_mx))) + assert np.all((bit_bigger <= casted_mx)) if it in np.sctypes['uint']: - assert_equal(mn, 0) + assert mn == 0 continue # And something larger for the minimum with suppress_warnings(): # overflow @@ -63,7 +61,7 @@ def test_shared_range(): # not have an exact representation. fimin = int_to_float(imin, ft) if np.isfinite(fimin): - assert_true(int(fimin) != imin) + assert int(fimin) != imin # Therefore the imin, cast back to float, and to integer, will # overflow. If it overflows to the imin, we need to allow for # that possibility in the testing of our overflowed values @@ -71,11 +69,9 @@ def test_shared_range(): if imin_roundtrip == imin: thresh_overflow = True if thresh_overflow: - assert_true(np.all( - (bit_smaller == casted_mn) | - (bit_smaller == imin))) + assert np.all((bit_smaller == casted_mn) | (bit_smaller == imin)) else: - assert_true(np.all((bit_smaller >= casted_mn))) + assert np.all((bit_smaller >= casted_mn)) def test_shared_range_inputs(): @@ -114,7 +110,8 @@ def test_casting(): im_exp[1] = ii.max assert_array_equal(iarr, im_exp) # NaNs, with nan2zero False, gives error - assert_raises(CastingError, float_to_int, farr, it, False) + with pytest.raises(CastingError): + float_to_int(farr, it, False) # We can pass through NaNs if we really want exp_arr[arr.index(np.nan)] = ft(np.nan).astype(it) with np.errstate(invalid='ignore'): @@ -130,7 +127,8 @@ def test_casting(): with np.errstate(invalid='ignore'): assert_array_equal(float_to_int(np.nan, np.int16), [0]) # Test nans give error if not nan2zero - assert_raises(CastingError, float_to_int, np.nan, np.int16, False) + with pytest.raises(CastingError): + float_to_int(np.nan, np.int16, False) def test_int_abs(): @@ -139,25 +137,25 @@ def test_int_abs(): in_arr = np.array([info.min, info.max], dtype=itype) idtype = np.dtype(itype) udtype = np.dtype(idtype.str.replace('i', 'u')) - assert_equal(udtype.kind, 'u') - assert_equal(idtype.itemsize, udtype.itemsize) + assert udtype.kind == 'u' + assert idtype.itemsize == udtype.itemsize mn, mx = in_arr e_mn = as_int(mx) + 1 # as_int needed for numpy 1.4.1 casting - assert_equal(int_abs(mx), mx) - assert_equal(int_abs(mn), e_mn) + assert int_abs(mx) == mx + assert int_abs(mn) == e_mn assert_array_equal(int_abs(in_arr), [e_mn, mx]) def test_floor_log2(): - assert_equal(floor_log2(2**9 + 1), 9) - assert_equal(floor_log2(-2**9 + 1), 8) - assert_equal(floor_log2(2), 1) - assert_equal(floor_log2(1), 0) - assert_equal(floor_log2(0.5), -1) - assert_equal(floor_log2(0.75), -1) - assert_equal(floor_log2(0.25), -2) - assert_equal(floor_log2(0.24), -3) - assert_equal(floor_log2(0), None) + assert floor_log2(2**9 + 1) == 9 + assert floor_log2(-2**9 + 1) == 8 + assert floor_log2(2) == 1 + assert floor_log2(1) == 0 + assert floor_log2(0.5) == -1 + assert floor_log2(0.75) == -1 + assert floor_log2(0.25) == -2 + assert floor_log2(0.24) == -3 + assert floor_log2(0) is None def test_able_int_type(): @@ -176,7 +174,7 @@ def test_able_int_type(): ([-1, 2**64 - 1], None), ([0, 2**64 - 1], np.uint64), ([0, 2**64], None)): - assert_equal(able_int_type(vals), exp_out) + assert able_int_type(vals) == exp_out def test_able_casting(): @@ -193,11 +191,11 @@ def test_able_casting(): ApBt = (A + B).dtype.type able_type = able_int_type([in_mn, in_mx, out_mn, out_mx]) if able_type is None: - assert_equal(ApBt, np.float64) + assert ApBt == np.float64 continue # Use str for comparison to avoid int32/64 vs intp comparison # failures - assert_equal(np.dtype(ApBt).str, np.dtype(able_type).str) + assert np.dtype(ApBt).str == np.dtype(able_type).str def test_best_float(): @@ -212,51 +210,51 @@ def test_best_float(): best = best_float() end_of_ints = np.float64(2**53) # float64 has continuous integers up to 2**53 - assert_equal(end_of_ints, end_of_ints + 1) + assert end_of_ints == end_of_ints + 1 # longdouble may have more, but not on 32 bit windows, at least end_of_ints = np.longdouble(2**53) if (end_of_ints == (end_of_ints + 1) or # off continuous integers machine() == 'sparc64' or # crippling slow longdouble on sparc longdouble_precision_improved()): # Windows precisions can change - assert_equal(best, np.float64) + assert best == np.float64 else: - assert_equal(best, np.longdouble) + assert best == np.longdouble def test_longdouble_precision_improved(): # Just check that this can only be True on windows, msvc from numpy.distutils.ccompiler import get_default_compiler if not (os.name == 'nt' and get_default_compiler() == 'msvc'): - assert_false(longdouble_precision_improved()) + assert not longdouble_precision_improved() def test_ulp(): - assert_equal(ulp(), np.finfo(np.float64).eps) - assert_equal(ulp(1.0), np.finfo(np.float64).eps) - assert_equal(ulp(np.float32(1.0)), np.finfo(np.float32).eps) - assert_equal(ulp(np.float32(1.999)), np.finfo(np.float32).eps) + assert ulp() == np.finfo(np.float64).eps + assert ulp(1.0) == np.finfo(np.float64).eps + assert ulp(np.float32(1.0)) == np.finfo(np.float32).eps + assert ulp(np.float32(1.999)) == np.finfo(np.float32).eps # Integers always return 1 - assert_equal(ulp(1), 1) - assert_equal(ulp(2**63 - 1), 1) + assert ulp(1) == 1 + assert ulp(2**63 - 1) == 1 # negative / positive same - assert_equal(ulp(-1), 1) - assert_equal(ulp(7.999), ulp(4.0)) - assert_equal(ulp(-7.999), ulp(4.0)) - assert_equal(ulp(np.float64(2**54 - 2)), 2) - assert_equal(ulp(np.float64(2**54)), 4) - assert_equal(ulp(np.float64(2**54)), 4) + assert ulp(-1) == 1 + assert ulp(7.999) == ulp(4.0) + assert ulp(-7.999) == ulp(4.0) + assert ulp(np.float64(2**54 - 2)) == 2 + assert ulp(np.float64(2**54)) == 4 + assert ulp(np.float64(2**54)) == 4 # Infs, NaNs return nan - assert_true(np.isnan(ulp(np.inf))) - assert_true(np.isnan(ulp(-np.inf))) - assert_true(np.isnan(ulp(np.nan))) + assert np.isnan(ulp(np.inf)) + assert np.isnan(ulp(-np.inf)) + assert np.isnan(ulp(np.nan)) # 0 gives subnormal smallest subn64 = np.float64(2**(-1022 - 52)) subn32 = np.float32(2**(-126 - 23)) - assert_equal(ulp(0.0), subn64) - assert_equal(ulp(np.float64(0)), subn64) - assert_equal(ulp(np.float32(0)), subn32) + assert ulp(0.0) == subn64 + assert ulp(np.float64(0)) == subn64 + assert ulp(np.float32(0)) == subn32 # as do multiples of subnormal smallest - assert_equal(ulp(subn64 * np.float64(2**52)), subn64) - assert_equal(ulp(subn64 * np.float64(2**53)), subn64 * 2) - assert_equal(ulp(subn32 * np.float32(2**23)), subn32) - assert_equal(ulp(subn32 * np.float32(2**24)), subn32 * 2) + assert ulp(subn64 * np.float64(2**52)) == subn64 + assert ulp(subn64 * np.float64(2**53)) == subn64 * 2 + assert ulp(subn32 * np.float32(2**23)) == subn32 + assert ulp(subn32 * np.float32(2**24)) == subn32 * 2 diff --git a/nibabel/tests/test_checkwarns.py b/nibabel/tests/test_checkwarns.py deleted file mode 100644 index 11c7422326..0000000000 --- a/nibabel/tests/test_checkwarns.py +++ /dev/null @@ -1,21 +0,0 @@ -""" Tests for warnings context managers -""" -from __future__ import division, print_function, absolute_import - -from nose.tools import assert_equal -from ..testing import clear_and_catch_warnings, suppress_warnings - - -def test_ignore_and_error_warnings(): - with suppress_warnings(): - from .. import checkwarns - - with clear_and_catch_warnings() as w: - checkwarns.IgnoreWarnings() - assert_equal(len(w), 1) - assert_equal(w[0].category, FutureWarning) - - with clear_and_catch_warnings() as w: - checkwarns.ErrorWarnings() - assert_equal(len(w), 1) - assert_equal(w[0].category, FutureWarning) diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index fbb225838d..e5d5000438 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -1,7 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ''' Tests for data module ''' -from __future__ import division, print_function, absolute_import import os from os.path import join as pjoin from os import environ as env @@ -17,93 +16,73 @@ from .. import data as nibd -from nose import with_setup -from nose.tools import (assert_equal, assert_raises, raises, assert_false) +import pytest -from .test_environment import (setup_environment, - teardown_environment, - DATA_KEY, - USER_KEY) +from .test_environment import with_environment, DATA_KEY, USER_KEY -DATA_FUNCS = {} - -def setup_data_env(): - setup_environment() - global DATA_FUNCS +@pytest.fixture +def with_nimd_env(request, with_environment): + DATA_FUNCS = {} DATA_FUNCS['home_dir_func'] = nibd.get_nipy_user_dir DATA_FUNCS['sys_dir_func'] = nibd.get_nipy_system_dir DATA_FUNCS['path_func'] = nibd.get_data_path - - -def teardown_data_env(): - teardown_environment() + yield nibd.get_nipy_user_dir = DATA_FUNCS['home_dir_func'] nibd.get_nipy_system_dir = DATA_FUNCS['sys_dir_func'] nibd.get_data_path = DATA_FUNCS['path_func'] -# decorator to use setup, teardown environment -with_environment = with_setup(setup_data_env, teardown_data_env) - - def test_datasource(): # Tests for DataSource pth = pjoin('some', 'path') ds = Datasource(pth) - yield assert_equal, ds.get_filename('unlikeley'), pjoin(pth, 'unlikeley') - yield (assert_equal, ds.get_filename('un', 'like', 'ley'), - pjoin(pth, 'un', 'like', 'ley')) + assert ds.get_filename('unlikeley') == pjoin(pth, 'unlikeley') + assert ds.get_filename('un', 'like', 'ley') == pjoin(pth, 'un', 'like', 'ley') def test_versioned(): with TemporaryDirectory() as tmpdir: - yield (assert_raises, - DataError, - VersionedDatasource, - tmpdir) + with pytest.raises(DataError): + VersionedDatasource(tmpdir) tmpfile = pjoin(tmpdir, 'config.ini') # ini file, but wrong section with open(tmpfile, 'wt') as fobj: fobj.write('[SOMESECTION]\n') fobj.write('version = 0.1\n') - yield (assert_raises, - DataError, - VersionedDatasource, - tmpdir) + with pytest.raises(DataError): + VersionedDatasource(tmpdir) # ini file, but right section, wrong key with open(tmpfile, 'wt') as fobj: fobj.write('[DEFAULT]\n') fobj.write('somekey = 0.1\n') - yield (assert_raises, - DataError, - VersionedDatasource, - tmpdir) + with pytest.raises(DataError): + VersionedDatasource(tmpdir) # ini file, right section and key with open(tmpfile, 'wt') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.1\n') vds = VersionedDatasource(tmpdir) - yield assert_equal, vds.version, '0.1' - yield assert_equal, vds.version_no, 0.1 - yield assert_equal, vds.major_version, 0 - yield assert_equal, vds.minor_version, 1 - yield assert_equal, vds.get_filename('config.ini'), tmpfile + assert vds.version == '0.1' + assert vds.version_no == 0.1 + assert vds.major_version == 0 + assert vds.minor_version == 1 + assert vds.get_filename('config.ini') == tmpfile # ini file, right section and key, funny value with open(tmpfile, 'wt') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.1.2.dev\n') vds = VersionedDatasource(tmpdir) - yield assert_equal, vds.version, '0.1.2.dev' - yield assert_equal, vds.version_no, 0.1 - yield assert_equal, vds.major_version, 0 - yield assert_equal, vds.minor_version, 1 + assert vds.version == '0.1.2.dev' + assert vds.version_no == 0.1 + assert vds.major_version == 0 + assert vds.minor_version == 1 def test__cfg_value(): # no file, return '' - yield assert_equal, _cfg_value('/implausible_file'), '' + assert _cfg_value('/implausible_file') == '' # try files try: fd, tmpfile = tempfile.mkstemp() @@ -112,16 +91,16 @@ def test__cfg_value(): fobj.write('[strange section]\n') fobj.write('path = /some/path\n') fobj.flush() - yield assert_equal, _cfg_value(tmpfile), '' + assert _cfg_value(tmpfile) == '' # right section, wrong key fobj.write('[DATA]\n') fobj.write('funnykey = /some/path\n') fobj.flush() - yield assert_equal, _cfg_value(tmpfile), '' + assert _cfg_value(tmpfile) == '' # right section, right key fobj.write('path = /some/path\n') fobj.flush() - yield assert_equal, _cfg_value(tmpfile), '/some/path' + assert _cfg_value(tmpfile) == '/some/path' fobj.close() finally: try: @@ -130,8 +109,7 @@ def test__cfg_value(): pass -@with_environment -def test_data_path(): +def test_data_path(with_nimd_env): # wipe out any sources of data paths if DATA_KEY in env: del env[DATA_KEY] @@ -148,15 +126,15 @@ def test_data_path(): def_dirs = [pjoin(sys.prefix, 'share', 'nipy')] if sys.prefix == '/usr': def_dirs.append(pjoin('/usr/local', 'share', 'nipy')) - assert_equal(old_pth, def_dirs + ['/user/path']) + assert old_pth == def_dirs + ['/user/path'] # then we'll try adding some of our own tst_pth = '/a/path' + os.path.pathsep + '/b/ path' tst_list = ['/a/path', '/b/ path'] # First, an environment variable os.environ[DATA_KEY] = tst_list[0] - assert_equal(get_data_path(), tst_list[:1] + old_pth) + assert get_data_path() == tst_list[:1] + old_pth os.environ[DATA_KEY] = tst_pth - assert_equal(get_data_path(), tst_list + old_pth) + assert get_data_path() == tst_list + old_pth del os.environ[DATA_KEY] # Next, make a fake user directory, and put a file in there with TemporaryDirectory() as tmpdir: @@ -165,9 +143,9 @@ def test_data_path(): fobj.write('[DATA]\n') fobj.write('path = %s' % tst_pth) nibd.get_nipy_user_dir = lambda: tmpdir - assert_equal(get_data_path(), tst_list + def_dirs + [tmpdir]) + assert get_data_path() == tst_list + def_dirs + [tmpdir] nibd.get_nipy_user_dir = lambda: fake_user_dir - assert_equal(get_data_path(), old_pth) + assert get_data_path() == old_pth # with some trepidation, the system config files with TemporaryDirectory() as tmpdir: nibd.get_nipy_system_dir = lambda: tmpdir @@ -179,8 +157,7 @@ def test_data_path(): with open(tmpfile, 'wt') as fobj: fobj.write('[DATA]\n') fobj.write('path = %s\n' % '/path/two') - assert_equal(get_data_path(), - tst_list + ['/path/two'] + old_pth) + assert get_data_path() == tst_list + ['/path/two'] + old_pth def test_find_data_dir(): @@ -190,52 +167,40 @@ def test_find_data_dir(): # under_here == '/nipy/utils' # subhere = 'tests' # fails with non-existant path - yield (assert_raises, - DataError, - find_data_dir, - [here], - 'implausible', - 'directory') + with pytest.raises(DataError): + find_data_dir([here], 'implausible', 'directory') # fails with file, when directory expected - yield (assert_raises, - DataError, - find_data_dir, - [here], - fname) + with pytest.raises(DataError): + find_data_dir([here], fname) # passes with directory that exists dd = find_data_dir([under_here], subhere) - yield assert_equal, dd, here + assert dd == here # and when one path in path list does not work dud_dir = pjoin(under_here, 'implausible') dd = find_data_dir([dud_dir, under_here], subhere) - yield assert_equal, dd, here + assert dd == here -@with_environment -def test_make_datasource(): +def test_make_datasource(with_nimd_env): pkg_def = dict( relpath='pkg') with TemporaryDirectory() as tmpdir: nibd.get_data_path = lambda: [tmpdir] - yield (assert_raises, - DataError, - make_datasource, - pkg_def) + with pytest.raises(DataError): + make_datasource(pkg_def) pkg_dir = pjoin(tmpdir, 'pkg') os.mkdir(pkg_dir) - yield (assert_raises, - DataError, - make_datasource, - pkg_def) + with pytest.raises(DataError): + make_datasource(pkg_def) tmpfile = pjoin(pkg_dir, 'config.ini') with open(tmpfile, 'wt') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.1\n') ds = make_datasource(pkg_def, data_path=[tmpdir]) - yield assert_equal, ds.version, '0.1' + assert ds.version == '0.1' -@raises(DataError) +@pytest.mark.xfail(raises=DataError) def test_bomber(): b = Bomber('bomber example', 'a message') b.any_attribute # no error @@ -243,21 +208,16 @@ def test_bomber(): def test_bomber_inspect(): b = Bomber('bomber example', 'a message') - assert_false(hasattr(b, 'any_attribute')) + assert not hasattr(b, 'any_attribute') -@with_environment -def test_datasource_or_bomber(): - pkg_def = dict( - relpath='pkg') +def test_datasource_or_bomber(with_nimd_env): + pkg_def = dict(relpath='pkg') with TemporaryDirectory() as tmpdir: nibd.get_data_path = lambda: [tmpdir] ds = datasource_or_bomber(pkg_def) - yield (assert_raises, - DataError, - getattr, - ds, - 'get_filename') + with pytest.raises(DataError): + ds.get_filename('some_file.txt') pkg_dir = pjoin(tmpdir, 'pkg') os.mkdir(pkg_dir) tmpfile = pjoin(pkg_dir, 'config.ini') @@ -272,8 +232,5 @@ def test_datasource_or_bomber(): ds.get_filename('some_file.txt') pkg_def['min version'] = '0.3' ds = datasource_or_bomber(pkg_def) # not OK - yield (assert_raises, - DataError, - getattr, - ds, - 'get_filename') + with pytest.raises(DataError): + ds.get_filename('some_file.txt') diff --git a/nibabel/tests/test_dataobj_images.py b/nibabel/tests/test_dataobj_images.py index 4c40ff9f17..e0f042939a 100644 --- a/nibabel/tests/test_dataobj_images.py +++ b/nibabel/tests/test_dataobj_images.py @@ -16,9 +16,18 @@ class DoNumpyImage(DataobjImage): files_types = (('image', '.npy'),) @classmethod - def from_file_map(klass, file_map): + def from_file_map(klass, file_map, mmap=True, keep_file_open=None): + if mmap not in (True, False, 'c', 'r'): + raise ValueError("mmap should be one of {True, False, 'c', 'r'}") + if mmap is True: + mmap = 'c' + elif mmap is False: + mmap = None with file_map['image'].get_prepare_fileobj('rb') as fobj: - arr = np.load(fobj) + try: + arr = np.load(fobj, mmap=mmap) + except: + arr = np.load(fobj) return klass(arr) def to_file_map(self, file_map=None): diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index e2e7f099ac..c031a0c60d 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -3,31 +3,30 @@ import warnings -from nibabel import info +from nibabel import pkg_info from nibabel.deprecated import (ModuleProxy, FutureWarningMixin, deprecate_with_version) -from nose.tools import (assert_true, assert_equal) from nibabel.tests.test_deprecator import TestDeprecatorFunc as _TestDF def setup(): # Hack nibabel version string - info.cmp_pkg_version.__defaults__ = ('2.0',) + pkg_info.cmp_pkg_version.__defaults__ = ('2.0',) def teardown(): # Hack nibabel version string back again - info.cmp_pkg_version.__defaults__ = (info.__version__,) + pkg_info.cmp_pkg_version.__defaults__ = (pkg_info.__version__,) def test_module_proxy(): # Test proxy for module mp = ModuleProxy('nibabel.deprecated') - assert_true(hasattr(mp, 'ModuleProxy')) - assert_true(mp.ModuleProxy is ModuleProxy) - assert_equal(repr(mp), '') + assert hasattr(mp, 'ModuleProxy') + assert mp.ModuleProxy is ModuleProxy + assert repr(mp) == '' def test_futurewarning_mixin(): @@ -47,19 +46,19 @@ class E(FutureWarningMixin, C): warn_message = "Oh no, not this one" with warnings.catch_warnings(record=True) as warns: c = C(42) - assert_equal(c.meth(), 42) - assert_equal(warns, []) + assert c.meth() == 42 + assert warns == [] d = D(42) - assert_equal(d.meth(), 42) + assert d.meth() == 42 warn = warns.pop(0) - assert_equal(warn.category, FutureWarning) - assert_equal(str(warn.message), + assert warn.category == FutureWarning + assert (str(warn.message) == 'This class will be removed in future versions') e = E(42) - assert_equal(e.meth(), 42) + assert e.meth() == 42 warn = warns.pop(0) - assert_equal(warn.category, FutureWarning) - assert_equal(str(warn.message), 'Oh no, not this one') + assert warn.category == FutureWarning + assert str(warn.message) == 'Oh no, not this one' class TestNibabelDeprecator(_TestDF): @@ -76,8 +75,8 @@ def func(): return 99 try: - info.cmp_pkg_version.__defaults__ = ('2.0dev',) + pkg_info.cmp_pkg_version.__defaults__ = ('2.0dev',) # No error, even though version is dev version of current - assert_equal(func(), 99) + assert func() == 99 finally: - info.cmp_pkg_version.__defaults__ = ('2.0',) + pkg_info.cmp_pkg_version.__defaults__ = ('2.0',) diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index c4dc2437a4..cf56dd598d 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -5,7 +5,7 @@ import warnings from functools import partial -from nose.tools import (assert_true, assert_raises, assert_equal) +import pytest from nibabel.deprecator import (_ensure_cr, _add_dep_doc, ExpiredDeprecationError, Deprecator) @@ -17,26 +17,26 @@ def test__ensure_cr(): # Make sure text ends with carriage return - assert_equal(_ensure_cr(' foo'), ' foo\n') - assert_equal(_ensure_cr(' foo\n'), ' foo\n') - assert_equal(_ensure_cr(' foo '), ' foo\n') - assert_equal(_ensure_cr('foo '), 'foo\n') - assert_equal(_ensure_cr('foo \n bar'), 'foo \n bar\n') - assert_equal(_ensure_cr('foo \n\n'), 'foo\n') + assert _ensure_cr(' foo') == ' foo\n' + assert _ensure_cr(' foo\n') == ' foo\n' + assert _ensure_cr(' foo ') == ' foo\n' + assert _ensure_cr('foo ') == 'foo\n' + assert _ensure_cr('foo \n bar') == 'foo \n bar\n' + assert _ensure_cr('foo \n\n') == 'foo\n' def test__add_dep_doc(): # Test utility function to add deprecation message to docstring - assert_equal(_add_dep_doc('', 'foo'), 'foo\n') - assert_equal(_add_dep_doc('bar', 'foo'), 'bar\n\nfoo\n') - assert_equal(_add_dep_doc(' bar', 'foo'), ' bar\n\nfoo\n') - assert_equal(_add_dep_doc(' bar', 'foo\n'), ' bar\n\nfoo\n') - assert_equal(_add_dep_doc('bar\n\n', 'foo'), 'bar\n\nfoo\n') - assert_equal(_add_dep_doc('bar\n \n', 'foo'), 'bar\n\nfoo\n') - assert_equal(_add_dep_doc(' bar\n\nSome explanation', 'foo\nbaz'), - ' bar\n\nfoo\nbaz\n\nSome explanation\n') - assert_equal(_add_dep_doc(' bar\n\n Some explanation', 'foo\nbaz'), - ' bar\n \n foo\n baz\n \n Some explanation\n') + assert _add_dep_doc('', 'foo') == 'foo\n' + assert _add_dep_doc('bar', 'foo') == 'bar\n\nfoo\n' + assert _add_dep_doc(' bar', 'foo') == ' bar\n\nfoo\n' + assert _add_dep_doc(' bar', 'foo\n') == ' bar\n\nfoo\n' + assert _add_dep_doc('bar\n\n', 'foo') == 'bar\n\nfoo\n' + assert _add_dep_doc('bar\n \n', 'foo') == 'bar\n\nfoo\n' + assert (_add_dep_doc(' bar\n\nSome explanation', 'foo\nbaz') == + ' bar\n\nfoo\nbaz\n\nSome explanation\n') + assert (_add_dep_doc(' bar\n\n Some explanation', 'foo\nbaz') == + ' bar\n \n foo\n baz\n \n Some explanation\n') class CustomError(Exception): @@ -69,77 +69,71 @@ def test_dep_func(self): # Test function deprecation dec = self.dep_func func = dec('foo')(func_no_doc) - with clear_and_catch_warnings(modules=[_OWN_MODULE]) as w: - warnings.simplefilter('always') - assert_equal(func(), None) - assert_equal(len(w), 1) - assert_true(w[0].category is DeprecationWarning) - assert_equal(func.__doc__, 'foo\n') + with pytest.deprecated_call(): + assert func() is None + assert func.__doc__ == 'foo\n' func = dec('foo')(func_doc) - with clear_and_catch_warnings(modules=[_OWN_MODULE]) as w: - warnings.simplefilter('always') - assert_equal(func(1), None) - assert_equal(len(w), 1) - assert_equal(func.__doc__, 'A docstring\n\nfoo\n') + with pytest.deprecated_call() as w: + assert func(1) is None + assert len(w) == 1 + assert func.__doc__ == 'A docstring\n\nfoo\n' func = dec('foo')(func_doc_long) - with clear_and_catch_warnings(modules=[_OWN_MODULE]) as w: - warnings.simplefilter('always') - assert_equal(func(1, 2), None) - assert_equal(len(w), 1) - assert_equal(func.__doc__, 'A docstring\n \n foo\n \n Some text\n') + with pytest.deprecated_call() as w: + assert func(1, 2) is None + assert len(w) == 1 + assert func.__doc__ == 'A docstring\n \n foo\n \n Some text\n' # Try some since and until versions func = dec('foo', '1.1')(func_no_doc) - assert_equal(func.__doc__, 'foo\n\n* deprecated from version: 1.1\n') - with clear_and_catch_warnings(modules=[_OWN_MODULE]) as w: - warnings.simplefilter('always') - assert_equal(func(), None) - assert_equal(len(w), 1) - func = dec('foo', until='2.4')(func_no_doc) - with clear_and_catch_warnings(modules=[_OWN_MODULE]) as w: - warnings.simplefilter('always') - assert_equal(func(), None) - assert_equal(len(w), 1) - assert_equal(func.__doc__, - 'foo\n\n* Will raise {} as of version: 2.4\n' + assert func.__doc__ == 'foo\n\n* deprecated from version: 1.1\n' + with pytest.deprecated_call() as w: + assert func() is None + assert len(w) == 1 + func = dec('foo', until='99.4')(func_no_doc) + with pytest.deprecated_call() as w: + assert func() is None + assert len(w) == 1 + assert (func.__doc__ == + 'foo\n\n* Will raise {} as of version: 99.4\n' .format(ExpiredDeprecationError)) func = dec('foo', until='1.8')(func_no_doc) - assert_raises(ExpiredDeprecationError, func) - assert_equal(func.__doc__, - 'foo\n\n* Raises {} as of version: 1.8\n' - .format(ExpiredDeprecationError)) + with pytest.raises(ExpiredDeprecationError): + func() + assert (func.__doc__ == + 'foo\n\n* Raises {} as of version: 1.8\n' + .format(ExpiredDeprecationError)) func = dec('foo', '1.2', '1.8')(func_no_doc) - assert_raises(ExpiredDeprecationError, func) - assert_equal(func.__doc__, - 'foo\n\n* deprecated from version: 1.2\n' - '* Raises {} as of version: 1.8\n' - .format(ExpiredDeprecationError)) + with pytest.raises(ExpiredDeprecationError): + func() + assert (func.__doc__ == + 'foo\n\n* deprecated from version: 1.2\n' + '* Raises {} as of version: 1.8\n' + .format(ExpiredDeprecationError)) func = dec('foo', '1.2', '1.8')(func_doc_long) - assert_equal(func.__doc__, - 'A docstring\n \n foo\n \n' - ' * deprecated from version: 1.2\n' - ' * Raises {} as of version: 1.8\n \n' - ' Some text\n' - .format(ExpiredDeprecationError)) - assert_raises(ExpiredDeprecationError, func) + assert (func.__doc__ == + 'A docstring\n \n foo\n \n' + ' * deprecated from version: 1.2\n' + ' * Raises {} as of version: 1.8\n \n' + ' Some text\n' + .format(ExpiredDeprecationError)) + with pytest.raises(ExpiredDeprecationError): + func() # Check different warnings and errors func = dec('foo', warn_class=UserWarning)(func_no_doc) with clear_and_catch_warnings(modules=[_OWN_MODULE]) as w: warnings.simplefilter('always') - assert_equal(func(), None) - assert_equal(len(w), 1) - assert_true(w[0].category is UserWarning) + assert func() is None + assert len(w) == 1 + assert w[0].category is UserWarning func = dec('foo', error_class=CustomError)(func_no_doc) - with clear_and_catch_warnings(modules=[_OWN_MODULE]) as w: - warnings.simplefilter('always') - assert_equal(func(), None) - assert_equal(len(w), 1) - assert_true(w[0].category is DeprecationWarning) + with pytest.deprecated_call(): + assert func() is None func = dec('foo', until='1.8', error_class=CustomError)(func_no_doc) - assert_raises(CustomError, func) + with pytest.raises(CustomError): + func() class TestDeprecatorMaker(object): @@ -150,19 +144,16 @@ class TestDeprecatorMaker(object): def test_deprecator_maker(self): dec = self.dep_maker(warn_class=UserWarning) func = dec('foo')(func_no_doc) - with clear_and_catch_warnings(modules=[_OWN_MODULE]) as w: - warnings.simplefilter('always') - assert_equal(func(), None) - assert_equal(len(w), 1) - assert_true(w[0].category is UserWarning) + with pytest.warns(UserWarning) as w: + # warnings.simplefilter('always') + assert func() is None + assert len(w) == 1 dec = self.dep_maker(error_class=CustomError) func = dec('foo')(func_no_doc) - with clear_and_catch_warnings(modules=[_OWN_MODULE]) as w: - warnings.simplefilter('always') - assert_equal(func(), None) - assert_equal(len(w), 1) - assert_true(w[0].category is DeprecationWarning) + with pytest.deprecated_call(): + assert func() is None func = dec('foo', until='1.8')(func_no_doc) - assert_raises(CustomError, func) + with pytest.raises(CustomError): + func() diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index 0285b01575..c7c80b0dd9 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -6,32 +6,28 @@ from io import BytesIO from ..testing import suppress_warnings -import numpy as np - with suppress_warnings(): from .. import dft from .. import nifti1 -from nose import SkipTest -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) +import unittest +import pytest # Shield optional package imports from ..optpkg import optional_package -# setup_module will raise SkipTest if no dicom to import from nibabel.pydicom_compat import have_dicom PImage, have_pil, _ = optional_package('PIL.Image') -pil_test = np.testing.dec.skipif(not have_pil, 'could not import PIL.Image') data_dir = pjoin(dirname(__file__), 'data') -def setup_module(): +def setUpModule(): if os.name == 'nt': - raise SkipTest('FUSE not available for windows, skipping dft tests') + raise unittest.SkipTest('FUSE not available for windows, skipping dft tests') if not have_dicom: - raise SkipTest('Need pydicom for dft tests, skipping') + raise unittest.SkipTest('Need pydicom for dft tests, skipping') def test_init(): @@ -41,41 +37,41 @@ def test_init(): def test_study(): studies = dft.get_studies(data_dir) - assert_equal(len(studies), 1) - assert_equal(studies[0].uid, + assert len(studies) == 1 + assert (studies[0].uid == '1.3.12.2.1107.5.2.32.35119.30000010011408520750000000022') - assert_equal(studies[0].date, '20100114') - assert_equal(studies[0].time, '121314.000000') - assert_equal(studies[0].comments, 'dft study comments') - assert_equal(studies[0].patient_name, 'dft patient name') - assert_equal(studies[0].patient_id, '1234') - assert_equal(studies[0].patient_birth_date, '19800102') - assert_equal(studies[0].patient_sex, 'F') + assert studies[0].date == '20100114' + assert studies[0].time == '121314.000000' + assert studies[0].comments == 'dft study comments' + assert studies[0].patient_name == 'dft patient name' + assert studies[0].patient_id == '1234' + assert studies[0].patient_birth_date == '19800102' + assert studies[0].patient_sex == 'F' def test_series(): studies = dft.get_studies(data_dir) - assert_equal(len(studies[0].series), 1) + assert len(studies[0].series) == 1 ser = studies[0].series[0] - assert_equal(ser.uid, + assert (ser.uid == '1.3.12.2.1107.5.2.32.35119.2010011420292594820699190.0.0.0') - assert_equal(ser.number, '12') - assert_equal(ser.description, 'CBU_DTI_64D_1A') - assert_equal(ser.rows, 256) - assert_equal(ser.columns, 256) - assert_equal(ser.bits_allocated, 16) - assert_equal(ser.bits_stored, 12) + assert ser.number == '12' + assert ser.description == 'CBU_DTI_64D_1A' + assert ser.rows == 256 + assert ser.columns == 256 + assert ser.bits_allocated == 16 + assert ser.bits_stored == 12 def test_storage_instances(): studies = dft.get_studies(data_dir) sis = studies[0].series[0].storage_instances - assert_equal(len(sis), 2) - assert_equal(sis[0].instance_number, 1) - assert_equal(sis[1].instance_number, 2) - assert_equal(sis[0].uid, + assert len(sis) == 2 + assert sis[0].instance_number == 1 + assert sis[1].instance_number == 2 + assert (sis[0].uid == '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.0') - assert_equal(sis[1].uid, + assert (sis[1].uid == '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.1') @@ -83,17 +79,17 @@ def test_storage_instance(): pass -@pil_test +@unittest.skipUnless(have_pil, 'could not import PIL.Image') def test_png(): studies = dft.get_studies(data_dir) data = studies[0].series[0].as_png() im = PImage.open(BytesIO(data)) - assert_equal(im.size, (256, 256)) + assert im.size == (256, 256) def test_nifti(): studies = dft.get_studies(data_dir) data = studies[0].series[0].as_nifti() - assert_equal(len(data), 352 + 2 * 256 * 256 * 2) + assert len(data) == 352 + 2 * 256 * 256 * 2 h = nifti1.Nifti1Header(data[:348]) - assert_equal(h.get_data_shape(), (256, 256, 2)) + assert h.get_data_shape() == (256, 256, 2) diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py index 4f99ca145f..218c4eec60 100644 --- a/nibabel/tests/test_diff.py +++ b/nibabel/tests/test_diff.py @@ -2,7 +2,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """ Test diff """ -from __future__ import division, print_function, absolute_import from os.path import (dirname, join as pjoin, abspath) import numpy as np @@ -72,3 +71,9 @@ def test_diff_values_array(): # and some inf should not be a problem assert not are_values_different(array([0, inf]), array([0, inf])) assert are_values_different(array([0, inf]), array([inf, 0])) + + # we will allow for types to be of different endianness but the + # same in "instnatiation" type and value + assert not are_values_different(np.array(1, dtype='') + assert hdr.endianness == '>' class TestEcatMlist(TestCase): @@ -94,9 +94,9 @@ def test_mlist(self): mats = np.recarray(shape=(32, 4), dtype=dt, buf=dat) fid.close() # tests - assert_true(mats['matlist'][0, 0] + mats['matlist'][0, 3] == 31) - assert_true(get_frame_order(mlist)[0][0] == 0) - assert_true(get_frame_order(mlist)[0][1] == 16842758.0) + assert mats['matlist'][0, 0] + mats['matlist'][0, 3] == 31 + assert get_frame_order(mlist)[0][0] == 0 + assert get_frame_order(mlist)[0][1] == 16842758.0 # test badly ordered mlist badordermlist = np.array([[1.68427540e+07, 3.00000000e+00, 1.20350000e+04, 1.00000000e+00], @@ -111,7 +111,7 @@ def test_mlist(self): [1.68427580e+07, 6.01680000e+04, 7.22000000e+04, 1.00000000e+00]]) with suppress_warnings(): # STORED order - assert_true(get_frame_order(badordermlist)[0][0] == 1) + assert get_frame_order(badordermlist)[0][0] == 1 def test_mlist_errors(self): fid = open(self.example_file, 'rb') @@ -133,17 +133,18 @@ def test_mlist_errors(self): with suppress_warnings(): # STORED order series_framenumbers = get_series_framenumbers(mlist) # first frame stored was actually 2nd frame acquired - assert_true(series_framenumbers[0] == 2) + assert series_framenumbers[0] == 2 order = [series_framenumbers[x] for x in sorted(series_framenumbers)] # true series order is [2,1,3,4,5,6], note counting starts at 1 - assert_true(order == [2, 1, 3, 4, 5, 6]) + assert order == [2, 1, 3, 4, 5, 6] mlist[0, 0] = 0 with suppress_warnings(): frames_order = get_frame_order(mlist) neworder = [frames_order[x][0] for x in sorted(frames_order)] - assert_true(neworder == [1, 2, 3, 4, 5]) + assert neworder == [1, 2, 3, 4, 5] with suppress_warnings(): - assert_raises(IOError, get_series_framenumbers, mlist) + with pytest.raises(IOError): + get_series_framenumbers(mlist) class TestEcatSubHeader(TestCase): @@ -156,26 +157,26 @@ class TestEcatSubHeader(TestCase): subhdr = subhdr_class(hdr, mlist, fid) def test_subheader_size(self): - assert_equal(self.subhdr_class._subhdrdtype.itemsize, 510) + assert self.subhdr_class._subhdrdtype.itemsize == 510 def test_subheader(self): - assert_equal(self.subhdr.get_shape(), (10, 10, 3)) - assert_equal(self.subhdr.get_nframes(), 1) - assert_equal(self.subhdr.get_nframes(), + assert self.subhdr.get_shape() == (10, 10, 3) + assert self.subhdr.get_nframes() == 1 + assert (self.subhdr.get_nframes() == len(self.subhdr.subheaders)) - assert_equal(self.subhdr._check_affines(), True) + assert self.subhdr._check_affines() is True assert_array_almost_equal(np.diag(self.subhdr.get_frame_affine()), np.array([2.20241979, 2.20241979, 3.125, 1.])) - assert_equal(self.subhdr.get_zooms()[0], 2.20241978764534) - assert_equal(self.subhdr.get_zooms()[2], 3.125) - assert_equal(self.subhdr._get_data_dtype(0), np.int16) + assert self.subhdr.get_zooms()[0] == 2.20241978764534 + assert self.subhdr.get_zooms()[2] == 3.125 + assert self.subhdr._get_data_dtype(0) == np.int16 #assert_equal(self.subhdr._get_frame_offset(), 1024) - assert_equal(self.subhdr._get_frame_offset(), 1536) + assert self.subhdr._get_frame_offset() == 1536 dat = self.subhdr.raw_data_from_fileobj() - assert_equal(dat.shape, self.subhdr.get_shape()) - assert_equal(self.subhdr.subheaders[0]['scale_factor'].item(), 1.0) + assert dat.shape == self.subhdr.get_shape() + assert self.subhdr.subheaders[0]['scale_factor'].item() == 1.0 ecat_calib_factor = self.hdr['ecat_calibration_factor'] - assert_equal(ecat_calib_factor, 25007614.0) + assert ecat_calib_factor == 25007614.0 class TestEcatImage(TestCase): @@ -184,9 +185,9 @@ class TestEcatImage(TestCase): img = image_class.load(example_file) def test_file(self): - assert_equal(self.img.file_map['header'].filename, + assert (self.img.file_map['header'].filename == self.example_file) - assert_equal(self.img.file_map['image'].filename, + assert (self.img.file_map['image'].filename == self.example_file) def test_save(self): @@ -194,20 +195,20 @@ def test_save(self): with InTemporaryDirectory(): self.img.to_filename(tmp_file) other = self.image_class.load(tmp_file) - assert_equal(self.img.get_data().all(), other.get_data().all()) + assert_array_equal(self.img.get_fdata(), other.get_fdata()) # Delete object holding reference to temporary file to make Windows # happier. del other def test_data(self): - dat = self.img.get_data() - assert_equal(dat.shape, self.img.shape) + dat = self.img.get_fdata() + assert dat.shape == self.img.shape frame = self.img.get_frame(0) assert_array_equal(frame, dat[:, :, :, 0]) def test_array_proxy(self): # Get the cached data copy - dat = self.img.get_data() + dat = self.img.get_fdata() # Make a new one to test arrayproxy img = self.image_class.load(self.example_file) data_prox = img.dataobj @@ -219,16 +220,16 @@ def test_array_proxy(self): def test_array_proxy_slicing(self): # Test slicing of array proxy - arr = self.img.get_data() + arr = self.img.get_fdata() prox = self.img.dataobj - assert_true(prox.is_proxy) + assert prox.is_proxy for sliceobj in slicer_samples(self.img.shape): assert_array_equal(arr[sliceobj], prox[sliceobj]) def test_isolation(self): # Test image isolated from external changes to affine img_klass = self.image_class - arr, aff, hdr, sub_hdr, mlist = (self.img.get_data(), + arr, aff, hdr, sub_hdr, mlist = (self.img.get_fdata(), self.img.affine, self.img.header, self.img.get_subheaders(), @@ -236,20 +237,20 @@ def test_isolation(self): img = img_klass(arr, aff, hdr, sub_hdr, mlist) assert_array_equal(img.affine, aff) aff[0, 0] = 99 - assert_false(np.all(img.affine == aff)) + assert not np.all(img.affine == aff) def test_float_affine(self): # Check affines get converted to float img_klass = self.image_class - arr, aff, hdr, sub_hdr, mlist = (self.img.get_data(), + arr, aff, hdr, sub_hdr, mlist = (self.img.get_fdata(), self.img.affine, self.img.header, self.img.get_subheaders(), self.img.get_mlist()) img = img_klass(arr, aff.astype(np.float32), hdr, sub_hdr, mlist) - assert_equal(img.get_affine().dtype, np.dtype(np.float64)) + assert img.get_affine().dtype == np.dtype(np.float64) img = img_klass(arr, aff.astype(np.int16), hdr, sub_hdr, mlist) - assert_equal(img.get_affine().dtype, np.dtype(np.float64)) + assert img.get_affine().dtype == np.dtype(np.float64) def test_data_regression(self): # Test whether data read has changed since 1.3.0 @@ -257,9 +258,9 @@ def test_data_regression(self): vals = dict(max=248750736458.0, min=1125342630.0, mean=117907565661.46666) - data = self.img.get_data() - assert_equal(data.max(), vals['max']) - assert_equal(data.min(), vals['min']) + data = self.img.get_fdata() + assert data.max() == vals['max'] + assert data.min() == vals['min'] assert_array_almost_equal(data.mean(), vals['mean']) def test_mlist_regression(self): @@ -270,12 +271,11 @@ def test_mlist_regression(self): def test_from_filespec_deprecation(): # Check from_filespec raises Deprecation - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) + with pytest.deprecated_call() as w: # No warning for standard load img_loaded = EcatImage.load(ecat_file) - assert_equal(len(w), 0) + assert len(w) == 0 # Warning for from_filespec img_speced = EcatImage.from_filespec(ecat_file) - assert_equal(len(w), 1) - assert_array_equal(img_loaded.get_data(), img_speced.get_data()) + assert len(w) == 1 + assert_array_equal(img_loaded.get_fdata(), img_speced.get_fdata()) diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index f0c9d70b3e..1accd01a14 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """ Test we can correctly import example ECAT files """ -from __future__ import print_function, absolute_import import os from os.path import join as pjoin @@ -18,7 +17,6 @@ from .nibabel_data import get_nibabel_data, needs_nibabel_data from ..ecat import load -from nose.tools import assert_equal from numpy.testing import (assert_array_equal, assert_almost_equal) ECAT_TEST_PATH = pjoin(get_nibabel_data(), 'nipy-ecattest') @@ -37,16 +35,28 @@ class TestNegatives(object): # unit: 1/cm ) - @needs_nibabel_data('nitest-minc2') + @needs_nibabel_data('nipy-ecattest') def test_load(self): # Check highest level load of minc works img = self.opener(self.example_params['fname']) - assert_equal(img.shape, self.example_params['shape']) - assert_equal(img.get_data_dtype(0).type, self.example_params['type']) + assert img.shape == self.example_params['shape'] + assert img.get_data_dtype(0).type == self.example_params['type'] # Check correspondence of data and recorded shape - data = img.get_data() - assert_equal(data.shape, self.example_params['shape']) + data = img.get_fdata() + assert data.shape == self.example_params['shape'] # min, max, mean values from given parameters assert_almost_equal(data.min(), self.example_params['min'], 4) assert_almost_equal(data.max(), self.example_params['max'], 4) assert_almost_equal(data.mean(), self.example_params['mean'], 4) + + +class TestMultiframe(TestNegatives): + example_params = dict( + fname=os.path.join(ECAT_TEST_PATH, 'ECAT7_testcase_multiframe.v'), + shape=(256, 256, 207, 3), + type=np.int16, + # Zeroed out image + min=0.0, + max=29170.67905, + mean=121.454, + ) diff --git a/nibabel/tests/test_endiancodes.py b/nibabel/tests/test_endiancodes.py index 805de0d572..94c9ea0344 100644 --- a/nibabel/tests/test_endiancodes.py +++ b/nibabel/tests/test_endiancodes.py @@ -10,31 +10,27 @@ import sys - -from nose.tools import assert_equal -from nose.tools import assert_true - from ..volumeutils import (endian_codes, native_code, swapped_code) def test_native_swapped(): native_is_le = sys.byteorder == 'little' if native_is_le: - assert_equal((native_code, swapped_code), ('<', '>')) + assert (native_code, swapped_code) == ('<', '>') else: - assert_equal((native_code, swapped_code), ('>', '<')) + assert (native_code, swapped_code) == ('>', '<') def test_to_numpy(): if sys.byteorder == 'little': - yield assert_true, endian_codes['native'] == '<' - yield assert_true, endian_codes['swapped'] == '>' + assert endian_codes['native'] == '<' + assert endian_codes['swapped'] == '>' else: - yield assert_true, endian_codes['native'] == '>' - yield assert_true, endian_codes['swapped'] == '<' - yield assert_true, endian_codes['native'] == endian_codes['='] - yield assert_true, endian_codes['big'] == '>' + assert endian_codes['native'] == '>' + assert endian_codes['swapped'] == '<' + assert endian_codes['native'] == endian_codes['='] + assert endian_codes['big'] == '>' for code in ('little', '<', 'l', 'L', 'le'): - yield assert_true, endian_codes[code] == '<' + assert endian_codes[code] == '<' for code in ('big', '>', 'b', 'B', 'be'): - yield assert_true, endian_codes[code] == '>' + assert endian_codes[code] == '>' diff --git a/nibabel/tests/test_environment.py b/nibabel/tests/test_environment.py index 7b02ea866f..19891a607b 100644 --- a/nibabel/tests/test_environment.py +++ b/nibabel/tests/test_environment.py @@ -8,32 +8,24 @@ from .. import environment as nibe -from numpy.testing import (assert_array_almost_equal, - assert_array_equal) +import pytest -from nose.tools import assert_equal - -from nose import with_setup - -GIVEN_ENV = {} DATA_KEY = 'NIPY_DATA_PATH' USER_KEY = 'NIPY_USER_DIR' -def setup_environment(): +@pytest.fixture +def with_environment(request): """Setup test environment for some functions that are tested in this module. In particular this functions stores attributes and other things that we need to stub in some test functions. This needs to be done on a function level and not module level because each testfunction needs a pristine environment. """ - global GIVEN_ENV + GIVEN_ENV = {} GIVEN_ENV['env'] = env.copy() - - -def teardown_environment(): - """Restore things that were remembered by the setup_environment function - """ + yield + """Restore things that were remembered by the setup_environment function """ orig_env = GIVEN_ENV['env'] # Pull keys out into list to avoid altering dictionary during iteration, # causing python 3 error @@ -43,17 +35,12 @@ def teardown_environment(): env.update(orig_env) -# decorator to use setup, teardown environment -with_environment = with_setup(setup_environment, teardown_environment) - - def test_nipy_home(): # Test logic for nipy home directory - assert_equal(nibe.get_home_dir(), os.path.expanduser('~')) + assert nibe.get_home_dir() == os.path.expanduser('~') -@with_environment -def test_user_dir(): +def test_user_dir(with_environment): if USER_KEY in env: del env[USER_KEY] home_dir = nibe.get_home_dir() @@ -61,16 +48,16 @@ def test_user_dir(): exp = pjoin(home_dir, '.nipy') else: exp = pjoin(home_dir, '_nipy') - assert_equal(exp, nibe.get_nipy_user_dir()) + assert exp == nibe.get_nipy_user_dir() env[USER_KEY] = '/a/path' - assert_equal(abspath('/a/path'), nibe.get_nipy_user_dir()) + assert abspath('/a/path') == nibe.get_nipy_user_dir() def test_sys_dir(): sys_dir = nibe.get_nipy_system_dir() if os.name == 'nt': - assert_equal(sys_dir, r'C:\etc\nipy') + assert sys_dir == r'C:\etc\nipy' elif os.name == 'posix': - assert_equal(sys_dir, r'/etc/nipy') + assert sys_dir == r'/etc/nipy' else: - assert_equal(sys_dir, None) + assert sys_dir is None diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index 0d7027222f..915e65e552 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -15,9 +15,7 @@ from .. import eulerangles as nea from .. import quaternions as nq -from nose.tools import assert_false -from nose.tools import assert_true - +import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal FLOAT_EPS = np.finfo(np.float).eps @@ -90,36 +88,38 @@ def test_basic_euler(): M2 = nea.euler2mat(0, yr) M3 = nea.euler2mat(0, 0, xr) # which are all valid rotation matrices - yield assert_true, is_valid_rotation(M) - yield assert_true, is_valid_rotation(M1) - yield assert_true, is_valid_rotation(M2) - yield assert_true, is_valid_rotation(M3) + assert is_valid_rotation(M) + assert is_valid_rotation(M1) + assert is_valid_rotation(M2) + assert is_valid_rotation(M3) # Full matrix is composition of three individual matrices - yield assert_true, np.allclose(M, np.dot(M3, np.dot(M2, M1))) + assert np.allclose(M, np.dot(M3, np.dot(M2, M1))) # Rotations can be specified with named args, default 0 - yield assert_true, np.all(nea.euler2mat(zr) == nea.euler2mat(z=zr)) - yield assert_true, np.all(nea.euler2mat(0, yr) == nea.euler2mat(y=yr)) - yield assert_true, np.all(nea.euler2mat(0, 0, xr) == nea.euler2mat(x=xr)) + assert np.all(nea.euler2mat(zr) == nea.euler2mat(z=zr)) + assert np.all(nea.euler2mat(0, yr) == nea.euler2mat(y=yr)) + assert np.all(nea.euler2mat(0, 0, xr) == nea.euler2mat(x=xr)) # Applying an opposite rotation same as inverse (the inverse is # the same as the transpose, but just for clarity) - yield assert_true, np.allclose(nea.euler2mat(x=-xr), + assert np.allclose(nea.euler2mat(x=-xr), np.linalg.inv(nea.euler2mat(x=xr))) -def test_euler_mat(): +def test_euler_mat_1(): M = nea.euler2mat() - yield assert_array_equal, M, np.eye(3) - for x, y, z in eg_rots: - M1 = nea.euler2mat(z, y, x) - M2 = sympy_euler(z, y, x) - yield assert_array_almost_equal, M1, M2 - M3 = np.dot(x_only(x), np.dot(y_only(y), z_only(z))) - yield assert_array_almost_equal, M1, M3 - zp, yp, xp = nea.mat2euler(M1) - # The parameters may not be the same as input, but they give the - # same rotation matrix - M4 = nea.euler2mat(zp, yp, xp) - yield assert_array_almost_equal, M1, M4 + assert_array_equal(M, np.eye(3)) + +@pytest.mark.parametrize("x, y, z", eg_rots) +def test_euler_mat_2(x, y, z): + M1 = nea.euler2mat(z, y, x) + M2 = sympy_euler(z, y, x) + assert_array_almost_equal(M1, M2) + M3 = np.dot(x_only(x), np.dot(y_only(y), z_only(z))) + assert_array_almost_equal(M1, M3) + zp, yp, xp = nea.mat2euler(M1) + # The parameters may not be the same as input, but they give the + # same rotation matrix + M4 = nea.euler2mat(zp, yp, xp) + assert_array_almost_equal(M1, M4) def sympy_euler2quat(z=0, y=0, x=0): @@ -148,27 +148,27 @@ def test_euler_instability(): M = nea.euler2mat(*zyx) # Round trip M_back = nea.euler2mat(*nea.mat2euler(M)) - yield assert_true, np.allclose(M, M_back) + assert np.allclose(M, M_back) # disturb matrix slightly M_e = M - FLOAT_EPS # round trip to test - OK M_e_back = nea.euler2mat(*nea.mat2euler(M_e)) - yield assert_true, np.allclose(M_e, M_e_back) + assert np.allclose(M_e, M_e_back) # not so with crude routine M_e_back = nea.euler2mat(*crude_mat2euler(M_e)) - yield assert_false, np.allclose(M_e, M_e_back) - - -def test_quats(): - for x, y, z in eg_rots: - M1 = nea.euler2mat(z, y, x) - quatM = nq.mat2quat(M1) - quat = nea.euler2quat(z, y, x) - yield nq.nearly_equivalent, quatM, quat - quatS = sympy_euler2quat(z, y, x) - yield nq.nearly_equivalent, quat, quatS - zp, yp, xp = nea.quat2euler(quat) - # The parameters may not be the same as input, but they give the - # same rotation matrix - M2 = nea.euler2mat(zp, yp, xp) - yield assert_array_almost_equal, M1, M2 + assert not np.allclose(M_e, M_e_back) + + +@pytest.mark.parametrize("x, y, z", eg_rots) +def test_quats(x, y, z): + M1 = nea.euler2mat(z, y, x) + quatM = nq.mat2quat(M1) + quat = nea.euler2quat(z, y, x) + assert nq.nearly_equivalent(quatM, quat) + quatS = sympy_euler2quat(z, y, x) + assert nq.nearly_equivalent(quat, quatS) + zp, yp, xp = nea.quat2euler(quat) + # The parameters may not be the same as input, but they give the + # same rotation matrix + M2 = nea.euler2mat(zp, yp, xp) + assert_array_almost_equal(M1, M2) diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index 9a6f8b3db7..efac76a65a 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -2,15 +2,13 @@ """ from itertools import product +import warnings import numpy as np -from nibabel.filebasedimages import FileBasedHeader, FileBasedImage +from ..filebasedimages import FileBasedHeader, FileBasedImage, SerializableImage -from nibabel.tests.test_image_api import GenericImageAPI - -from nose.tools import (assert_true, assert_false, assert_equal, - assert_not_equal) +from .test_image_api import GenericImageAPI, SerializeMixin class FBNumpyImage(FileBasedImage): @@ -27,6 +25,11 @@ def shape(self): return self.arr.shape def get_data(self): + warnings.warn('Deprecated', DeprecationWarning) + return self.arr + + @property + def dataobj(self): return self.arr def get_fdata(self): @@ -50,6 +53,10 @@ def set_data_dtype(self, dtype): self.arr = self.arr.astype(dtype) +class SerializableNumpyImage(FBNumpyImage, SerializableImage): + pass + + class TestFBImageAPI(GenericImageAPI): """ Validation for FileBasedImage instances """ @@ -80,6 +87,16 @@ def obj_params(self): yield func, params +class TestSerializableImageAPI(TestFBImageAPI, SerializeMixin): + image_maker = SerializableNumpyImage + + @staticmethod + def _header_eq(header_a, header_b): + """ FileBasedHeader is an abstract class, so __eq__ is undefined. + Checking for the same header type is sufficient, here. """ + return type(header_a) == type(header_b) == FileBasedHeader + + def test_filebased_header(): # Test stuff about the default FileBasedHeader @@ -93,20 +110,20 @@ def __init__(self, seq=None): in_list = [1, 3, 2] hdr = H(in_list) hdr_c = hdr.copy() - assert_equal(hdr_c.a_list, hdr.a_list) + assert hdr_c.a_list == hdr.a_list # Copy is independent of original hdr_c.a_list[0] = 99 - assert_not_equal(hdr_c.a_list, hdr.a_list) + assert hdr_c.a_list != hdr.a_list # From header does a copy hdr2 = H.from_header(hdr) - assert_true(isinstance(hdr2, H)) - assert_equal(hdr2.a_list, hdr.a_list) + assert isinstance(hdr2, H) + assert hdr2.a_list == hdr.a_list hdr2.a_list[0] = 42 - assert_not_equal(hdr2.a_list, hdr.a_list) + assert hdr2.a_list != hdr.a_list # Default header input to from_heder gives new empty header hdr3 = H.from_header() - assert_true(isinstance(hdr3, H)) - assert_equal(hdr3.a_list, []) + assert isinstance(hdr3, H) + assert hdr3.a_list == [] hdr4 = H.from_header(None) - assert_true(isinstance(hdr4, H)) - assert_equal(hdr4.a_list, []) + assert isinstance(hdr4, H) + assert hdr4.a_list == [] diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index 365a418890..23ae573a70 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -1,7 +1,6 @@ """ Check that loading an image does not use up filehandles. """ -from __future__ import division, print_function, absolute_import from os.path import join as pjoin import shutil @@ -21,10 +20,6 @@ from ..loadsave import load, save from ..nifti1 import Nifti1Image -from numpy.testing import (assert_array_almost_equal, - assert_array_equal) - - def test_multiload(): # Make a tiny image, save, load many times. If we are leaking filehandles, diff --git a/nibabel/tests/test_fileholders.py b/nibabel/tests/test_fileholders.py index b28727a47e..e31a6efcbc 100644 --- a/nibabel/tests/test_fileholders.py +++ b/nibabel/tests/test_fileholders.py @@ -6,56 +6,49 @@ from ..fileholders import FileHolder -from numpy.testing import (assert_array_almost_equal, - assert_array_equal) - -from nose.tools import assert_equal -from nose.tools import assert_false -from nose.tools import assert_true - def test_init(): fh = FileHolder('a_fname') - assert_equal(fh.filename, 'a_fname') - assert_true(fh.fileobj is None) - assert_equal(fh.pos, 0) + assert fh.filename == 'a_fname' + assert fh.fileobj is None + assert fh.pos == 0 sio0 = BytesIO() fh = FileHolder('a_test', sio0) - assert_equal(fh.filename, 'a_test') - assert_true(fh.fileobj is sio0) - assert_equal(fh.pos, 0) + assert fh.filename == 'a_test' + assert fh.fileobj is sio0 + assert fh.pos == 0 fh = FileHolder('a_test_2', sio0, 3) - assert_equal(fh.filename, 'a_test_2') - assert_true(fh.fileobj is sio0) - assert_equal(fh.pos, 3) + assert fh.filename == 'a_test_2' + assert fh.fileobj is sio0 + assert fh.pos == 3 def test_same_file_as(): fh = FileHolder('a_fname') - assert_true(fh.same_file_as(fh)) + assert fh.same_file_as(fh) fh2 = FileHolder('a_test') - assert_false(fh.same_file_as(fh2)) + assert not fh.same_file_as(fh2) sio0 = BytesIO() fh3 = FileHolder('a_fname', sio0) fh4 = FileHolder('a_fname', sio0) - assert_true(fh3.same_file_as(fh4)) - assert_false(fh3.same_file_as(fh)) + assert fh3.same_file_as(fh4) + assert not fh3.same_file_as(fh) fh5 = FileHolder(fileobj=sio0) fh6 = FileHolder(fileobj=sio0) - assert_true(fh5.same_file_as(fh6)) + assert fh5.same_file_as(fh6) # Not if the filename is the same - assert_false(fh5.same_file_as(fh3)) + assert not fh5.same_file_as(fh3) # pos doesn't matter fh4_again = FileHolder('a_fname', sio0, pos=4) - assert_true(fh3.same_file_as(fh4_again)) + assert fh3.same_file_as(fh4_again) def test_file_like(): # Test returning file object or filename fh = FileHolder('a_fname') - assert_equal(fh.file_like, 'a_fname') + assert fh.file_like == 'a_fname' bio = BytesIO() fh = FileHolder(fileobj=bio) - assert_true(fh.file_like is bio) + assert fh.file_like is bio fh = FileHolder('a_fname', fileobj=bio) - assert_true(fh.file_like is bio) + assert fh.file_like is bio diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index f7317ac183..b0abc6d608 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -11,99 +11,66 @@ from ..filename_parser import (types_filenames, TypesFilenamesError, parse_filename, splitext_addext) -from nose.tools import (assert_equal, assert_true, assert_false, - assert_raises) +import pytest def test_filenames(): types_exts = (('image', '.img'), ('header', '.hdr')) for t_fname in ('test.img', 'test.hdr', 'test', 'test.'): tfns = types_filenames(t_fname, types_exts) - assert_equal(tfns, - {'image': 'test.img', - 'header': 'test.hdr'}) - # enforcing extensions raises an error for bad extension - assert_raises(TypesFilenamesError, - types_filenames, - 'test.funny', - types_exts) - # If not enforcing extensions, it does the best job it can, - # assuming the passed filename is for the first type (in this case - # 'image') - tfns = types_filenames('test.funny', types_exts, - enforce_extensions=False) - assert_equal(tfns, - {'header': 'test.hdr', - 'image': 'test.funny'}) - # .gz and .bz2 suffixes to extensions, by default, are removed - # before extension checking etc, and then put back onto every - # returned filename. - tfns = types_filenames('test.img.gz', types_exts) - assert_equal(tfns, - {'header': 'test.hdr.gz', - 'image': 'test.img.gz'}) - tfns = types_filenames('test.img.bz2', types_exts) - assert_equal(tfns, - {'header': 'test.hdr.bz2', - 'image': 'test.img.bz2'}) - # of course, if we don't know about e.g. gz, and enforce_extensions - # is on, we get an errror - assert_raises(TypesFilenamesError, - types_filenames, - 'test.img.gz', - types_exts, ()) - # if we don't know about .gz extension, and not enforcing, then we - # get something a bit odd - tfns = types_filenames('test.img.gz', types_exts, - trailing_suffixes=(), - enforce_extensions=False) - assert_equal(tfns, - {'header': 'test.img.hdr', - 'image': 'test.img.gz'}) - # the suffixes we remove and replaces can be any suffixes. - tfns = types_filenames('test.img.bzr', types_exts, ('.bzr',)) - assert_equal(tfns, - {'header': 'test.hdr.bzr', - 'image': 'test.img.bzr'}) - # If we specifically pass the remove / replace suffixes, then we - # don't remove / replace the .gz and .bz2, unless they are passed - # specifically. - tfns = types_filenames('test.img.bzr', types_exts, - trailing_suffixes=('.bzr',), - enforce_extensions=False) - assert_equal(tfns, - {'header': 'test.hdr.bzr', - 'image': 'test.img.bzr'}) - # but, just .gz or .bz2 as extension gives an error, if enforcing is on - assert_raises(TypesFilenamesError, - types_filenames, - 'test.gz', - types_exts) - assert_raises(TypesFilenamesError, - types_filenames, - 'test.bz2', - types_exts) - # if enforcing is off, it tries to work out what the other files - # should be assuming the passed filename is of the first input type - tfns = types_filenames('test.gz', types_exts, - enforce_extensions=False) - assert_equal(tfns, - {'image': 'test.gz', - 'header': 'test.hdr.gz'}) - # case (in)sensitivity, and effect of uppercase, lowercase - tfns = types_filenames('test.IMG', types_exts) - assert_equal(tfns, - {'image': 'test.IMG', - 'header': 'test.HDR'}) - tfns = types_filenames('test.img', - (('image', '.IMG'), ('header', '.HDR'))) - assert_equal(tfns, - {'header': 'test.hdr', - 'image': 'test.img'}) - tfns = types_filenames('test.IMG.Gz', types_exts) - assert_equal(tfns, - {'image': 'test.IMG.Gz', - 'header': 'test.HDR.Gz'}) + assert tfns == {'image': 'test.img', 'header': 'test.hdr'} + # enforcing extensions raises an error for bad extension + with pytest.raises(TypesFilenamesError): + types_filenames('test.funny', types_exts) + # If not enforcing extensions, it does the best job it can, + # assuming the passed filename is for the first type (in this case + # 'image') + tfns = types_filenames('test.funny', types_exts, enforce_extensions=False) + assert tfns == {'header': 'test.hdr', 'image': 'test.funny'} + # .gz and .bz2 suffixes to extensions, by default, are removed + # before extension checking etc, and then put back onto every + # returned filename. + tfns = types_filenames('test.img.gz', types_exts) + assert tfns == {'header': 'test.hdr.gz', 'image': 'test.img.gz'} + tfns = types_filenames('test.img.bz2', types_exts) + assert tfns == {'header': 'test.hdr.bz2', 'image': 'test.img.bz2'} + # of course, if we don't know about e.g. gz, and enforce_extensions + # is on, we get an errror + with pytest.raises(TypesFilenamesError): + types_filenames('test.img.gz', types_exts, ()) + # if we don't know about .gz extension, and not enforcing, then we + # get something a bit odd + tfns = types_filenames('test.img.gz', types_exts, + trailing_suffixes=(), + enforce_extensions=False) + assert tfns == {'header': 'test.img.hdr', 'image': 'test.img.gz'} + # the suffixes we remove and replaces can be any suffixes. + tfns = types_filenames('test.img.bzr', types_exts, ('.bzr',)) + assert tfns == {'header': 'test.hdr.bzr', 'image': 'test.img.bzr'} + # If we specifically pass the remove / replace suffixes, then we + # don't remove / replace the .gz and .bz2, unless they are passed + # specifically. + tfns = types_filenames('test.img.bzr', types_exts, + trailing_suffixes=('.bzr',), + enforce_extensions=False) + assert tfns == {'header': 'test.hdr.bzr', 'image': 'test.img.bzr'} + # but, just .gz or .bz2 as extension gives an error, if enforcing is on + with pytest.raises(TypesFilenamesError): + types_filenames('test.gz', types_exts) + with pytest.raises(TypesFilenamesError): + types_filenames('test.bz2', types_exts) + # if enforcing is off, it tries to work out what the other files + # should be assuming the passed filename is of the first input type + tfns = types_filenames('test.gz', types_exts, + enforce_extensions=False) + assert tfns == {'image': 'test.gz', 'header': 'test.hdr.gz'} + # case (in)sensitivity, and effect of uppercase, lowercase + tfns = types_filenames('test.IMG', types_exts) + assert tfns == {'image': 'test.IMG', 'header': 'test.HDR'} + tfns = types_filenames('test.img', (('image', '.IMG'), ('header', '.HDR'))) + assert tfns == {'header': 'test.hdr', 'image': 'test.img'} + tfns = types_filenames('test.IMG.Gz', types_exts) + assert tfns == {'image': 'test.IMG.Gz', 'header': 'test.HDR.Gz'} def test_parse_filename(): @@ -121,52 +88,52 @@ def test_parse_filename(): for inps, exps in exp_in_outs: pth, sufs = inps res = parse_filename(pth, types_exts, sufs) - assert_equal(res, exps) + assert res == exps upth = pth.upper() uexps = (exps[0].upper(), exps[1].upper(), exps[2].upper() if exps[2] else None, exps[3]) res = parse_filename(upth, types_exts, sufs) - assert_equal(res, uexps) + assert res == uexps # test case sensitivity res = parse_filename('/path/fnameext2.GZ', types_exts, ('.gz',), False) # case insensitive again - assert_equal(res, ('/path/fname', 'ext2', '.GZ', 't2')) + assert res == ('/path/fname', 'ext2', '.GZ', 't2') res = parse_filename('/path/fnameext2.GZ', types_exts, ('.gz',), True) # case sensitive - assert_equal(res, ('/path/fnameext2', '.GZ', None, None)) + assert res == ('/path/fnameext2', '.GZ', None, None) res = parse_filename('/path/fnameEXT2.gz', types_exts, ('.gz',), False) # case insensitive - assert_equal(res, ('/path/fname', 'EXT2', '.gz', 't2')) + assert res == ('/path/fname', 'EXT2', '.gz', 't2') res = parse_filename('/path/fnameEXT2.gz', types_exts, ('.gz',), True) # case sensitive - assert_equal(res, ('/path/fnameEXT2', '', '.gz', None)) + assert res == ('/path/fnameEXT2', '', '.gz', None) def test_splitext_addext(): res = splitext_addext('fname.ext.gz') - assert_equal(res, ('fname', '.ext', '.gz')) + assert res == ('fname', '.ext', '.gz') res = splitext_addext('fname.ext') - assert_equal(res, ('fname', '.ext', '')) + assert res == ('fname', '.ext', '') res = splitext_addext('fname.ext.foo', ('.foo', '.bar')) - assert_equal(res, ('fname', '.ext', '.foo')) + assert res == ('fname', '.ext', '.foo') res = splitext_addext('fname.ext.FOO', ('.foo', '.bar')) - assert_equal(res, ('fname', '.ext', '.FOO')) + assert res == ('fname', '.ext', '.FOO') # case sensitive res = splitext_addext('fname.ext.FOO', ('.foo', '.bar'), True) - assert_equal(res, ('fname.ext', '.FOO', '')) + assert res == ('fname.ext', '.FOO', '') # edge cases res = splitext_addext('.nii') - assert_equal(res, ('', '.nii', '')) + assert res == ('', '.nii', '') res = splitext_addext('...nii') - assert_equal(res, ('..', '.nii', '')) + assert res == ('..', '.nii', '') res = splitext_addext('.') - assert_equal(res, ('.', '', '')) + assert res == ('.', '', '') res = splitext_addext('..') - assert_equal(res, ('..', '', '')) + assert res == ('..', '', '') res = splitext_addext('...') - assert_equal(res, ('...', '', '')) + assert res == ('...', '', '') diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index 0e9ed88eb9..d3c895618e 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -17,10 +17,8 @@ from ..fileholders import FileHolderError from ..spatialimages import SpatialImage -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) - from numpy.testing import assert_array_equal - +import pytest def test_files_spatialimages(): # test files creation in image classes @@ -31,9 +29,9 @@ def test_files_spatialimages(): for klass in klasses: file_map = klass.make_file_map() for key, value in file_map.items(): - assert_equal(value.filename, None) - assert_equal(value.fileobj, None) - assert_equal(value.pos, 0) + assert value.filename is None + assert value.fileobj is None + assert value.pos == 0 # If we can't create new images in memory without loading, bail here if not klass.makeable: continue @@ -44,9 +42,9 @@ def test_files_spatialimages(): else: img = klass(arr, aff) for key, value in img.file_map.items(): - assert_equal(value.filename, None) - assert_equal(value.fileobj, None) - assert_equal(value.pos, 0) + assert value.filename is None + assert value.fileobj is None + assert value.pos == 0 def test_files_interface(): @@ -56,32 +54,34 @@ def test_files_interface(): img = Nifti1Image(arr, aff) # single image img.set_filename('test') - assert_equal(img.get_filename(), 'test.nii') - assert_equal(img.file_map['image'].filename, 'test.nii') - assert_raises(KeyError, img.file_map.__getitem__, 'header') + assert img.get_filename() == 'test.nii' + assert img.file_map['image'].filename == 'test.nii' + with pytest.raises(KeyError): + img.file_map['header'] # pair - note new class img = Nifti1Pair(arr, aff) img.set_filename('test') - assert_equal(img.get_filename(), 'test.img') - assert_equal(img.file_map['image'].filename, 'test.img') - assert_equal(img.file_map['header'].filename, 'test.hdr') + assert img.get_filename() == 'test.img' + assert img.file_map['image'].filename == 'test.img' + assert img.file_map['header'].filename == 'test.hdr' # fileobjs - single image img = Nifti1Image(arr, aff) img.file_map['image'].fileobj = BytesIO() img.to_file_map() # saves to files img2 = Nifti1Image.from_file_map(img.file_map) # img still has correct data - assert_array_equal(img2.get_data(), img.get_data()) + assert_array_equal(img2.get_fdata(), img.get_fdata()) # fileobjs - pair img = Nifti1Pair(arr, aff) img.file_map['image'].fileobj = BytesIO() # no header yet - assert_raises(FileHolderError, img.to_file_map) + with pytest.raises(FileHolderError): + img.to_file_map() img.file_map['header'].fileobj = BytesIO() img.to_file_map() # saves to files img2 = Nifti1Pair.from_file_map(img.file_map) # img still has correct data - assert_array_equal(img2.get_data(), img.get_data()) + assert_array_equal(img2.get_fdata(), img.get_fdata()) def test_round_trip_spatialimages(): @@ -99,8 +99,8 @@ def test_round_trip_spatialimages(): img.to_file_map() # read it back again from the written files img2 = klass.from_file_map(file_map) - assert_array_equal(img2.get_data(), data) + assert_array_equal(img2.get_fdata(), data) # write, read it again img2.to_file_map() img3 = klass.from_file_map(file_map) - assert_array_equal(img3.get_data(), data) + assert_array_equal(img3.get_fdata(), data) diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index e9cfe8e0a4..21b1224d66 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -1,21 +1,14 @@ """ Test slicing of file-like objects """ -import sys - -PY2 = sys.version_info[0] < 3 from io import BytesIO from itertools import product from functools import partial -from distutils.version import LooseVersion from threading import Thread, Lock import time import numpy as np -# np > 1.11 makes double ellipsis illegal in indices -HAVE_NP_GT_1p11 = LooseVersion(np.__version__) > '1.11' - from ..fileslice import (is_fancy, canonical_slicers, fileslice, predict_shape, read_segments, _positive_slice, threshold_heuristic, optimize_slicer, slice2len, @@ -23,10 +16,7 @@ calc_slicedefs, _simple_fileslice, slice2outax, strided_scalar) -from nose.tools import assert_equal -from nose.tools import assert_false -from nose.tools import assert_raises - +import pytest from numpy.testing import assert_array_equal @@ -39,7 +29,7 @@ def _check_slice(sliceobj): # Check if this is a view a[:] = 99 b_is_view = np.all(b == 99) - assert_equal(not is_fancy(sliceobj), b_is_view) + assert (not is_fancy(sliceobj)) == b_is_view def test_is_fancy(): @@ -48,16 +38,16 @@ def test_is_fancy(): _check_slice(slice0) _check_slice((slice0,)) # tuple is same # Double ellipsis illegal in np 1.12dev - set up check for that case - maybe_bad = HAVE_NP_GT_1p11 and slice0 is Ellipsis + maybe_bad = slice0 is Ellipsis for slice1 in slices: if maybe_bad and slice1 is Ellipsis: continue _check_slice((slice0, slice1)) - assert_false(is_fancy((None,))) - assert_false(is_fancy((None, 1))) - assert_false(is_fancy((1, None))) + assert not is_fancy((None,)) + assert not is_fancy((None, 1)) + assert not is_fancy((1, None)) # Chack that actual False returned (rather than falsey) - assert_equal(is_fancy(1), False) + assert is_fancy(1) is False def test_canonical_slicers(): @@ -70,90 +60,83 @@ def test_canonical_slicers(): 2) shape = (10, 10) for slice0 in slicers: - assert_equal(canonical_slicers((slice0,), shape), (slice0, slice(None))) + assert canonical_slicers((slice0,), shape) == (slice0, slice(None)) for slice1 in slicers: sliceobj = (slice0, slice1) - assert_equal(canonical_slicers(sliceobj, shape), sliceobj) - assert_equal(canonical_slicers(sliceobj, shape + (2, 3, 4)), - sliceobj + (slice(None),) * 3) - assert_equal(canonical_slicers(sliceobj * 3, shape * 3), - sliceobj * 3) + assert canonical_slicers(sliceobj, shape) == sliceobj + assert canonical_slicers(sliceobj, shape + (2, 3, 4)) == sliceobj + (slice(None),) * 3 + assert canonical_slicers(sliceobj * 3, shape * 3) == sliceobj * 3 # Check None passes through - assert_equal(canonical_slicers(sliceobj + (None,), shape), - sliceobj + (None,)) - assert_equal(canonical_slicers((None,) + sliceobj, shape), - (None,) + sliceobj) - assert_equal(canonical_slicers((None,) + sliceobj + (None,), shape), - (None,) + sliceobj + (None,)) + assert canonical_slicers(sliceobj + (None,), shape) == sliceobj + (None,) + assert canonical_slicers((None,) + sliceobj, shape) == (None,) + sliceobj + assert (canonical_slicers((None,) + sliceobj + (None,), shape) == + (None,) + sliceobj + (None,)) # Check Ellipsis - assert_equal(canonical_slicers((Ellipsis,), shape), - (slice(None), slice(None))) - assert_equal(canonical_slicers((Ellipsis, None), shape), - (slice(None), slice(None), None)) - assert_equal(canonical_slicers((Ellipsis, 1), shape), - (slice(None), 1)) - assert_equal(canonical_slicers((1, Ellipsis), shape), - (1, slice(None))) + assert canonical_slicers((Ellipsis,), shape) == (slice(None), slice(None)) + assert canonical_slicers((Ellipsis, None), shape) == (slice(None), slice(None), None) + assert canonical_slicers((Ellipsis, 1), shape) == (slice(None), 1) + assert canonical_slicers((1, Ellipsis), shape) == (1, slice(None)) # Ellipsis at end does nothing - assert_equal(canonical_slicers((1, 1, Ellipsis), shape), - (1, 1)) - assert_equal(canonical_slicers((1, Ellipsis, 2), (10, 1, 2, 3, 11)), - (1, slice(None), slice(None), slice(None), 2)) - assert_raises(ValueError, - canonical_slicers, (Ellipsis, 1, Ellipsis), (2, 3, 4, 5)) + assert canonical_slicers((1, 1, Ellipsis), shape) == (1, 1) + assert (canonical_slicers((1, Ellipsis, 2), (10, 1, 2, 3, 11)) == + (1, slice(None), slice(None), slice(None), 2)) + with pytest.raises(ValueError): + canonical_slicers((Ellipsis, 1, Ellipsis), (2, 3, 4, 5)) # Check full slices get expanded for slice0 in (slice(10), slice(0, 10), slice(0, 10, 1)): - assert_equal(canonical_slicers((slice0, 1), shape), - (slice(None), 1)) + assert canonical_slicers((slice0, 1), shape) == (slice(None), 1) for slice0 in (slice(10), slice(0, 10), slice(0, 10, 1)): - assert_equal(canonical_slicers((slice0, 1), shape), - (slice(None), 1)) - assert_equal(canonical_slicers((1, slice0), shape), - (1, slice(None))) + assert canonical_slicers((slice0, 1), shape) == (slice(None), 1) + assert canonical_slicers((1, slice0), shape) == (1, slice(None)) # Check ints etc get parsed through to tuples - assert_equal(canonical_slicers(1, shape), (1, slice(None))) - assert_equal(canonical_slicers(slice(None), shape), - (slice(None), slice(None))) + assert canonical_slicers(1, shape) == (1, slice(None)) + assert canonical_slicers(slice(None), shape) == (slice(None), slice(None)) # Check fancy indexing raises error - assert_raises(ValueError, canonical_slicers, (np.array(1), 1), shape) - assert_raises(ValueError, canonical_slicers, (1, np.array(1)), shape) + with pytest.raises(ValueError): + canonical_slicers((np.array(1), 1), shape) + with pytest.raises(ValueError): + canonical_slicers((1, np.array(1)), shape) # Check out of range integer raises error - assert_raises(ValueError, canonical_slicers, (10,), shape) - assert_raises(ValueError, canonical_slicers, (1, 10), shape) - assert_raises(ValueError, canonical_slicers, (10,), shape, True) - assert_raises(ValueError, canonical_slicers, (1, 10), shape, True) + with pytest.raises(ValueError): + canonical_slicers((10,), shape) + with pytest.raises(ValueError): + canonical_slicers((1, 10), shape) + with pytest.raises(ValueError): + canonical_slicers((10,), shape, True) + with pytest.raises(ValueError): + canonical_slicers((1, 10), shape, True) # Unless check_inds is False - assert_equal(canonical_slicers((10,), shape, False), (10, slice(None))) - assert_equal(canonical_slicers((1, 10,), shape, False), (1, 10)) + assert canonical_slicers((10,), shape, False) == (10, slice(None)) + assert canonical_slicers((1, 10,), shape, False) == (1, 10) # Check negative -> positive - assert_equal(canonical_slicers(-1, shape), (9, slice(None))) - assert_equal(canonical_slicers((slice(None), -1), shape), (slice(None), 9)) + assert canonical_slicers(-1, shape) == (9, slice(None)) + assert canonical_slicers((slice(None), -1), shape) == (slice(None), 9) def test_slice2outax(): # Test function giving output axes from input ndims and slice sn = slice(None) - assert_equal(slice2outax(1, (sn,)), (0,)) - assert_equal(slice2outax(1, (1,)), (None,)) - assert_equal(slice2outax(1, (None,)), (1,)) - assert_equal(slice2outax(1, (None, 1)), (None,)) - assert_equal(slice2outax(1, (None, 1, None)), (None,)) - assert_equal(slice2outax(1, (None, sn)), (1,)) - assert_equal(slice2outax(2, (sn,)), (0, 1)) - assert_equal(slice2outax(2, (sn, sn)), (0, 1)) - assert_equal(slice2outax(2, (1,)), (None, 0)) - assert_equal(slice2outax(2, (sn, 1)), (0, None)) - assert_equal(slice2outax(2, (None,)), (1, 2)) - assert_equal(slice2outax(2, (None, 1)), (None, 1)) - assert_equal(slice2outax(2, (None, 1, None)), (None, 2)) - assert_equal(slice2outax(2, (None, 1, None, 2)), (None, None)) - assert_equal(slice2outax(2, (None, sn, None, 1)), (1, None)) - assert_equal(slice2outax(3, (sn,)), (0, 1, 2)) - assert_equal(slice2outax(3, (sn, sn)), (0, 1, 2)) - assert_equal(slice2outax(3, (sn, None, sn)), (0, 2, 3)) - assert_equal(slice2outax(3, (sn, None, sn, None, sn)), (0, 2, 4)) - assert_equal(slice2outax(3, (1,)), (None, 0, 1)) - assert_equal(slice2outax(3, (None, sn, None, 1)), (1, None, 3)) + assert slice2outax(1, (sn,)) == (0,) + assert slice2outax(1, (1,)) == (None,) + assert slice2outax(1, (None,)) == (1,) + assert slice2outax(1, (None, 1)) == (None,) + assert slice2outax(1, (None, 1, None)) == (None,) + assert slice2outax(1, (None, sn)) == (1,) + assert slice2outax(2, (sn,)) == (0, 1) + assert slice2outax(2, (sn, sn)) == (0, 1) + assert slice2outax(2, (1,)) == (None, 0) + assert slice2outax(2, (sn, 1)) == (0, None) + assert slice2outax(2, (None,)) == (1, 2) + assert slice2outax(2, (None, 1)) == (None, 1) + assert slice2outax(2, (None, 1, None)) == (None, 2) + assert slice2outax(2, (None, 1, None, 2)) == (None, None) + assert slice2outax(2, (None, sn, None, 1)) == (1, None) + assert slice2outax(3, (sn,)) == (0, 1, 2) + assert slice2outax(3, (sn, sn)) == (0, 1, 2) + assert slice2outax(3, (sn, None, sn)) == (0, 2, 3) + assert slice2outax(3, (sn, None, sn, None, sn)) == (0, 2, 4) + assert slice2outax(3, (1,)) == (None, 0, 1) + assert slice2outax(3, (None, sn, None, 1)) == (1, None, 3) def _slices_for_len(L): @@ -179,112 +162,83 @@ def _slices_for_len(L): def test_slice2len(): # Test slice length calculation - assert_equal(slice2len(slice(None), 10), 10) - assert_equal(slice2len(slice(11), 10), 10) - assert_equal(slice2len(slice(1, 11), 10), 9) - assert_equal(slice2len(slice(1, 1), 10), 0) - assert_equal(slice2len(slice(1, 11, 2), 10), 5) - assert_equal(slice2len(slice(0, 11, 3), 10), 4) - assert_equal(slice2len(slice(1, 11, 3), 10), 3) - assert_equal(slice2len(slice(None, None, -1), 10), 10) - assert_equal(slice2len(slice(11, None, -1), 10), 10) - assert_equal(slice2len(slice(None, 1, -1), 10), 8) - assert_equal(slice2len(slice(None, None, -2), 10), 5) - assert_equal(slice2len(slice(None, None, -3), 10), 4) - assert_equal(slice2len(slice(None, 0, -3), 10), 3) + assert slice2len(slice(None), 10) == 10 + assert slice2len(slice(11), 10) == 10 + assert slice2len(slice(1, 11), 10) == 9 + assert slice2len(slice(1, 1), 10) == 0 + assert slice2len(slice(1, 11, 2), 10) == 5 + assert slice2len(slice(0, 11, 3), 10) == 4 + assert slice2len(slice(1, 11, 3), 10) == 3 + assert slice2len(slice(None, None, -1), 10) == 10 + assert slice2len(slice(11, None, -1), 10) == 10 + assert slice2len(slice(None, 1, -1), 10) == 8 + assert slice2len(slice(None, None, -2), 10) == 5 + assert slice2len(slice(None, None, -3), 10) == 4 + assert slice2len(slice(None, 0, -3), 10) == 3 # Start, end are always taken to be relative if negative - assert_equal(slice2len(slice(None, -4, -1), 10), 3) - assert_equal(slice2len(slice(-4, -2, 1), 10), 2) + assert slice2len(slice(None, -4, -1), 10) == 3 + assert slice2len(slice(-4, -2, 1), 10) == 2 # start after stop - assert_equal(slice2len(slice(3, 2, 1), 10), 0) - assert_equal(slice2len(slice(2, 3, -1), 10), 0) + assert slice2len(slice(3, 2, 1), 10) == 0 + assert slice2len(slice(2, 3, -1), 10) == 0 def test_fill_slicer(): # Test slice length calculation - assert_equal(fill_slicer(slice(None), 10), slice(0, 10, 1)) - assert_equal(fill_slicer(slice(11), 10), slice(0, 10, 1)) - assert_equal(fill_slicer(slice(1, 11), 10), slice(1, 10, 1)) - assert_equal(fill_slicer(slice(1, 1), 10), slice(1, 1, 1)) - assert_equal(fill_slicer(slice(1, 11, 2), 10), slice(1, 10, 2)) - assert_equal(fill_slicer(slice(0, 11, 3), 10), slice(0, 10, 3)) - assert_equal(fill_slicer(slice(1, 11, 3), 10), slice(1, 10, 3)) - assert_equal(fill_slicer(slice(None, None, -1), 10), - slice(9, None, -1)) - assert_equal(fill_slicer(slice(11, None, -1), 10), - slice(9, None, -1)) - assert_equal(fill_slicer(slice(None, 1, -1), 10), - slice(9, 1, -1)) - assert_equal(fill_slicer(slice(None, None, -2), 10), - slice(9, None, -2)) - assert_equal(fill_slicer(slice(None, None, -3), 10), - slice(9, None, -3)) - assert_equal(fill_slicer(slice(None, 0, -3), 10), - slice(9, 0, -3)) + assert fill_slicer(slice(None), 10) == slice(0, 10, 1) + assert fill_slicer(slice(11), 10) == slice(0, 10, 1) + assert fill_slicer(slice(1, 11), 10) == slice(1, 10, 1) + assert fill_slicer(slice(1, 1), 10) == slice(1, 1, 1) + assert fill_slicer(slice(1, 11, 2), 10) == slice(1, 10, 2) + assert fill_slicer(slice(0, 11, 3), 10) == slice(0, 10, 3) + assert fill_slicer(slice(1, 11, 3), 10) == slice(1, 10, 3) + assert fill_slicer(slice(None, None, -1), 10) == slice(9, None, -1) + assert fill_slicer(slice(11, None, -1), 10) == slice(9, None, -1) + assert fill_slicer(slice(None, 1, -1), 10) == slice(9, 1, -1) + assert fill_slicer(slice(None, None, -2), 10) == slice(9, None, -2) + assert fill_slicer(slice(None, None, -3), 10) == slice(9, None, -3) + assert fill_slicer(slice(None, 0, -3), 10) == slice(9, 0, -3) # Start, end are always taken to be relative if negative - assert_equal(fill_slicer(slice(None, -4, -1), 10), - slice(9, 6, -1)) - assert_equal(fill_slicer(slice(-4, -2, 1), 10), - slice(6, 8, 1)) + assert fill_slicer(slice(None, -4, -1), 10) == slice(9, 6, -1) + assert fill_slicer(slice(-4, -2, 1), 10) == slice(6, 8, 1) # start after stop - assert_equal(fill_slicer(slice(3, 2, 1), 10), - slice(3, 2, 1)) - assert_equal(fill_slicer(slice(2, 3, -1), 10), - slice(2, 3, -1)) + assert fill_slicer(slice(3, 2, 1), 10) == slice(3, 2, 1) + assert fill_slicer(slice(2, 3, -1), 10) == slice(2, 3, -1) def test__positive_slice(): # Reverse slice direction to be positive - assert_equal(_positive_slice(slice(0, 5, 1)), slice(0, 5, 1)) - assert_equal(_positive_slice(slice(1, 5, 3)), slice(1, 5, 3)) - assert_equal(_positive_slice(slice(4, None, -2)), slice(0, 5, 2)) - assert_equal(_positive_slice(slice(4, None, -1)), slice(0, 5, 1)) - assert_equal(_positive_slice(slice(4, 1, -1)), slice(2, 5, 1)) - assert_equal(_positive_slice(slice(4, 1, -2)), slice(2, 5, 2)) + assert _positive_slice(slice(0, 5, 1)) == slice(0, 5, 1) + assert _positive_slice(slice(1, 5, 3)) == slice(1, 5, 3) + assert _positive_slice(slice(4, None, -2)) == slice(0, 5, 2) + assert _positive_slice(slice(4, None, -1)) == slice(0, 5, 1) + assert _positive_slice(slice(4, 1, -1)) == slice(2, 5, 1) + assert _positive_slice(slice(4, 1, -2)) == slice(2, 5, 2) def test_threshold_heuristic(): # Test for default skip / read heuristic # int - assert_equal(threshold_heuristic(1, 9, 1, skip_thresh=8), 'full') - assert_equal(threshold_heuristic(1, 9, 1, skip_thresh=7), None) - assert_equal(threshold_heuristic(1, 9, 2, skip_thresh=16), 'full') - assert_equal(threshold_heuristic(1, 9, 2, skip_thresh=15), None) - # long if on Python 2 - if PY2: - assert_equal(threshold_heuristic(long(1), 9, 1, skip_thresh=8), 'full') + assert threshold_heuristic(1, 9, 1, skip_thresh=8) == 'full' + assert threshold_heuristic(1, 9, 1, skip_thresh=7) is None + assert threshold_heuristic(1, 9, 2, skip_thresh=16) == 'full' + assert threshold_heuristic(1, 9, 2, skip_thresh=15) is None # full slice, smallest step size - assert_equal(threshold_heuristic( - slice(0, 9, 1), 9, 2, skip_thresh=2), - 'full') + assert threshold_heuristic(slice(0, 9, 1), 9, 2, skip_thresh=2) == 'full' # Dropping skip thresh below step size gives None - assert_equal(threshold_heuristic( - slice(0, 9, 1), 9, 2, skip_thresh=1), - None) + assert threshold_heuristic(slice(0, 9, 1), 9, 2, skip_thresh=1) == None # As does increasing step size - assert_equal(threshold_heuristic( - slice(0, 9, 2), 9, 2, skip_thresh=3), - None) + assert threshold_heuristic(slice(0, 9, 2), 9, 2, skip_thresh=3) == None # Negative step size same as positive - assert_equal(threshold_heuristic( - slice(9, None, -1), 9, 2, skip_thresh=2), - 'full') + assert threshold_heuristic(slice(9, None, -1), 9, 2, skip_thresh=2) == 'full' # Add a gap between start and end. Now contiguous because of step size - assert_equal(threshold_heuristic( - slice(2, 9, 1), 9, 2, skip_thresh=2), - 'contiguous') + assert threshold_heuristic(slice(2, 9, 1), 9, 2, skip_thresh=2) == 'contiguous' # To not-contiguous, even with step size 1 - assert_equal(threshold_heuristic( - slice(2, 9, 1), 9, 2, skip_thresh=1), - None) + assert threshold_heuristic(slice(2, 9, 1), 9, 2, skip_thresh=1) == None # Back to full when skip covers gap - assert_equal(threshold_heuristic( - slice(2, 9, 1), 9, 2, skip_thresh=4), - 'full') + assert threshold_heuristic(slice(2, 9, 1), 9, 2, skip_thresh=4) == 'full' # Until it doesn't cover the gap - assert_equal(threshold_heuristic( - slice(2, 9, 1), 9, 2, skip_thresh=3), - 'contiguous') + assert threshold_heuristic(slice(2, 9, 1), 9, 2, skip_thresh=3) == 'contiguous' # Some dummy heuristics for optimize_slicer @@ -315,235 +269,176 @@ def test_optimize_slicer(): for is_slowest in (True, False): # following tests not affected by all_full or optimization # full - always passes through - assert_equal( - optimize_slicer(slice(None), 10, all_full, 4, heuristic), + assert ( + optimize_slicer(slice(None), 10, all_full, is_slowest, 4, heuristic) == (slice(None), slice(None))) # Even if full specified with explicit values - assert_equal( - optimize_slicer(slice(10), 10, all_full, 4, heuristic), + assert ( + optimize_slicer(slice(10), 10, all_full, is_slowest, 4, heuristic) == (slice(None), slice(None))) - assert_equal( - optimize_slicer(slice(0, 10), 10, all_full, 4, heuristic), + assert ( + optimize_slicer(slice(0, 10), 10, all_full, is_slowest, 4, heuristic) == (slice(None), slice(None))) - assert_equal( - optimize_slicer(slice(0, 10, 1), 10, all_full, 4, heuristic), + assert ( + optimize_slicer(slice(0, 10, 1), 10, all_full, is_slowest, 4, heuristic) == (slice(None), slice(None))) # Reversed full is still full, but with reversed post_slice - assert_equal( + assert ( optimize_slicer( - slice(None, None, -1), 10, all_full, 4, heuristic), + slice(None, None, -1), 10, all_full, is_slowest, 4, heuristic) == (slice(None), slice(None, None, -1))) # Contiguous is contiguous unless heuristic kicks in, in which case it may # be 'full' - assert_equal( - optimize_slicer(slice(9), 10, False, False, 4, _always), - (slice(0, 9, 1), slice(None))) - assert_equal( - optimize_slicer(slice(9), 10, True, False, 4, _always), - (slice(None), slice(0, 9, 1))) + assert optimize_slicer(slice(9), 10, False, False, 4, _always) == (slice(0, 9, 1), slice(None)) + assert optimize_slicer(slice(9), 10, True, False, 4, _always) == (slice(None), slice(0, 9, 1)) # Unless this is the slowest dimenion, and all_true is True, in which case # we don't update to full - assert_equal( - optimize_slicer(slice(9), 10, True, True, 4, _always), - (slice(0, 9, 1), slice(None))) + assert optimize_slicer(slice(9), 10, True, True, 4, _always) == (slice(0, 9, 1), slice(None)) # Nor if the heuristic won't update - assert_equal( - optimize_slicer(slice(9), 10, True, False, 4, _never), - (slice(0, 9, 1), slice(None))) - assert_equal( - optimize_slicer(slice(1, 10), 10, True, False, 4, _never), - (slice(1, 10, 1), slice(None))) + assert optimize_slicer(slice(9), 10, True, False, 4, _never) == (slice(0, 9, 1), slice(None)) + assert (optimize_slicer(slice(1, 10), 10, True, False, 4, _never) == + (slice(1, 10, 1), slice(None))) # Reversed contiguous still contiguous - assert_equal( - optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never), - (slice(0, 9, 1), slice(None, None, -1))) - assert_equal( - optimize_slicer(slice(8, None, -1), 10, True, False, 4, _always), - (slice(None), slice(8, None, -1))) - assert_equal( - optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never), - (slice(0, 9, 1), slice(None, None, -1))) - assert_equal( - optimize_slicer(slice(9, 0, -1), 10, False, False, 4, _never), - (slice(1, 10, 1), slice(None, None, -1))) + assert (optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == + (slice(0, 9, 1), slice(None, None, -1))) + assert (optimize_slicer(slice(8, None, -1), 10, True, False, 4, _always) == + (slice(None), slice(8, None, -1))) + assert (optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == + (slice(0, 9, 1), slice(None, None, -1))) + assert (optimize_slicer(slice(9, 0, -1), 10, False, False, 4, _never) == + (slice(1, 10, 1), slice(None, None, -1))) # Non-contiguous - assert_equal( - optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _never), - (slice(0, 10, 2), slice(None))) + assert (optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _never) == + (slice(0, 10, 2), slice(None))) # all_full triggers optimization, but optimization does nothing - assert_equal( - optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _never), - (slice(0, 10, 2), slice(None))) + assert (optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _never) == + (slice(0, 10, 2), slice(None))) # all_full triggers optimization, optimization does something - assert_equal( - optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always), - (slice(None), slice(0, 10, 2))) + assert (optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == + (slice(None), slice(0, 10, 2))) # all_full disables optimization, optimization does something - assert_equal( - optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _always), - (slice(0, 10, 2), slice(None))) + assert (optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _always) == + (slice(0, 10, 2), slice(None))) # Non contiguous, reversed - assert_equal( - optimize_slicer(slice(10, None, -2), 10, False, False, 4, _never), - (slice(1, 10, 2), slice(None, None, -1))) - assert_equal( - optimize_slicer(slice(10, None, -2), 10, True, False, 4, _always), - (slice(None), slice(9, None, -2))) + assert (optimize_slicer(slice(10, None, -2), 10, False, False, 4, _never) == + (slice(1, 10, 2), slice(None, None, -1))) + assert (optimize_slicer(slice(10, None, -2), 10, True, False, 4, _always) == + (slice(None), slice(9, None, -2))) # Short non-contiguous - assert_equal( - optimize_slicer(slice(2, 8, 2), 10, False, False, 4, _never), - (slice(2, 8, 2), slice(None))) + assert (optimize_slicer(slice(2, 8, 2), 10, False, False, 4, _never) == + (slice(2, 8, 2), slice(None))) # with partial read - assert_equal( - optimize_slicer(slice(2, 8, 2), 10, True, False, 4, _partial), - (slice(2, 8, 1), slice(None, None, 2))) + assert (optimize_slicer(slice(2, 8, 2), 10, True, False, 4, _partial) == + (slice(2, 8, 1), slice(None, None, 2))) # If this is the slowest changing dimension, heuristic can upgrade None to # contiguous, but not (None, contiguous) to full - assert_equal( # we've done this one already - optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always), - (slice(None), slice(0, 10, 2))) - assert_equal( # if slowest, just upgrade to contiguous - optimize_slicer(slice(0, 10, 2), 10, True, True, 4, _always), - (slice(0, 10, 1), slice(None, None, 2))) - assert_equal( # contiguous does not upgrade to full - optimize_slicer(slice(9), 10, True, True, 4, _always), - (slice(0, 9, 1), slice(None))) + # we've done this one already + assert (optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == + (slice(None), slice(0, 10, 2))) + # if slowest, just upgrade to contiguous + assert (optimize_slicer(slice(0, 10, 2), 10, True, True, 4, _always) == + (slice(0, 10, 1), slice(None, None, 2))) + # contiguous does not upgrade to full + assert optimize_slicer(slice(9), 10, True, True, 4, _always) == (slice(0, 9, 1), slice(None)) # integer - assert_equal( - optimize_slicer(0, 10, True, False, 4, _never), - (0, 'dropped')) - assert_equal( # can be negative - optimize_slicer(-1, 10, True, False, 4, _never), - (9, 'dropped')) - assert_equal( # or float - optimize_slicer(0.9, 10, True, False, 4, _never), - (0, 'dropped')) - assert_raises(ValueError, # should never get 'contiguous' - optimize_slicer, 0, 10, True, False, 4, _partial) - assert_equal( # full can be forced with heuristic - optimize_slicer(0, 10, True, False, 4, _always), - (slice(None), 0)) - assert_equal( # but disabled for slowest changing dimension - optimize_slicer(0, 10, True, True, 4, _always), - (0, 'dropped')) + assert optimize_slicer(0, 10, True, False, 4, _never) == (0, 'dropped') + # can be negative + assert optimize_slicer(-1, 10, True, False, 4, _never) == (9, 'dropped') + # or float + assert optimize_slicer(0.9, 10, True, False, 4, _never) == (0, 'dropped') + # should never get 'contiguous' + with pytest.raises(ValueError): + optimize_slicer(0, 10, True, False, 4, _partial) + # full can be forced with heuristic + assert optimize_slicer(0, 10, True, False, 4, _always) == (slice(None), 0) + # but disabled for slowest changing dimension + assert optimize_slicer(0, 10, True, True, 4, _always) == (0, 'dropped') def test_optimize_read_slicers(): # Test function to optimize read slicers - assert_equal(optimize_read_slicers((1,), (10,), 4, _never), - ((1,), ())) - assert_equal(optimize_read_slicers((slice(None),), (10,), 4, _never), - ((slice(None),), (slice(None),))) - assert_equal(optimize_read_slicers((slice(9),), (10,), 4, _never), - ((slice(0, 9, 1),), (slice(None),))) + assert optimize_read_slicers((1,), (10,), 4, _never) == ((1,), ()) + assert (optimize_read_slicers((slice(None),), (10,), 4, _never) == + ((slice(None),), (slice(None),))) + assert (optimize_read_slicers((slice(9),), (10,), 4, _never) == + ((slice(0, 9, 1),), (slice(None),))) # optimize cannot update a continuous to a full if last - assert_equal(optimize_read_slicers((slice(9),), (10,), 4, _always), - ((slice(0, 9, 1),), (slice(None),))) + assert (optimize_read_slicers((slice(9),), (10,), 4, _always) == + ((slice(0, 9, 1),), (slice(None),))) # optimize can update non-contiguous to continuous even if last # not optimizing - assert_equal(optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _never), - ((slice(0, 9, 2),), (slice(None),))) + assert (optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _never) == + ((slice(0, 9, 2),), (slice(None),))) # optimizing - assert_equal(optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _always), - ((slice(0, 9, 1),), (slice(None, None, 2),))) + assert (optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _always) == + ((slice(0, 9, 1),), (slice(None, None, 2),))) # Optimize does nothing for integer when last - assert_equal(optimize_read_slicers((1,), (10,), 4, _always), - ((1,), ())) + assert optimize_read_slicers((1,), (10,), 4, _always) == ((1,), ()) # 2D - assert_equal(optimize_read_slicers( - (slice(None), slice(None)), (10, 6), 4, _never), - ((slice(None), slice(None)), (slice(None), slice(None)))) - assert_equal(optimize_read_slicers((slice(None), 1), (10, 6), 4, _never), - ((slice(None), 1), (slice(None),))) - assert_equal(optimize_read_slicers((1, slice(None)), (10, 6), 4, _never), - ((1, slice(None)), (slice(None),))) + assert (optimize_read_slicers((slice(None), slice(None)), (10, 6), 4, _never) == + ((slice(None), slice(None)), (slice(None), slice(None)))) + assert (optimize_read_slicers((slice(None), 1), (10, 6), 4, _never) == + ((slice(None), 1), (slice(None),))) + assert (optimize_read_slicers((1, slice(None)), (10, 6), 4, _never) == + ((1, slice(None)), (slice(None),))) # Not optimizing a partial slice - assert_equal(optimize_read_slicers( - (slice(9), slice(None)), (10, 6), 4, _never), - ((slice(0, 9, 1), slice(None)), (slice(None), slice(None)))) + assert (optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _never) == + ((slice(0, 9, 1), slice(None)), (slice(None), slice(None)))) # Optimizing a partial slice - assert_equal(optimize_read_slicers( - (slice(9), slice(None)), (10, 6), 4, _always), - ((slice(None), slice(None)), (slice(0, 9, 1), slice(None)))) + assert (optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _always) == + ((slice(None), slice(None)), (slice(0, 9, 1), slice(None)))) # Optimize cannot update a continuous to a full if last - assert_equal(optimize_read_slicers( - (slice(None), slice(5)), (10, 6), 4, _always), - ((slice(None), slice(0, 5, 1)), (slice(None), slice(None)))) + assert (optimize_read_slicers((slice(None), slice(5)), (10, 6), 4, _always) == + ((slice(None), slice(0, 5, 1)), (slice(None), slice(None)))) # optimize can update non-contiguous to full if not last # not optimizing - assert_equal(optimize_read_slicers( - (slice(0, 9, 3), slice(None)), (10, 6), 4, _never), - ((slice(0, 9, 3), slice(None)), (slice(None), slice(None)))) + assert (optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _never) == + ((slice(0, 9, 3), slice(None)), (slice(None), slice(None)))) # optimizing full - assert_equal(optimize_read_slicers( - (slice(0, 9, 3), slice(None)), (10, 6), 4, _always), - ((slice(None), slice(None)), (slice(0, 9, 3), slice(None)))) + assert (optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _always) == + ((slice(None), slice(None)), (slice(0, 9, 3), slice(None)))) # optimizing partial - assert_equal(optimize_read_slicers( - (slice(0, 9, 3), slice(None)), (10, 6), 4, _partial), - ((slice(0, 9, 1), slice(None)), (slice(None, None, 3), slice(None)))) + assert (optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _partial) == + ((slice(0, 9, 1), slice(None)), (slice(None, None, 3), slice(None)))) # optimize can update non-contiguous to continuous even if last # not optimizing - assert_equal(optimize_read_slicers( - (slice(None), slice(0, 5, 2)), (10, 6), 4, _never), - ((slice(None), slice(0, 5, 2)), (slice(None), slice(None)))) + assert (optimize_read_slicers((slice(None), slice(0, 5, 2)), (10, 6), 4, _never) == + ((slice(None), slice(0, 5, 2)), (slice(None), slice(None)))) # optimizing - assert_equal(optimize_read_slicers( - (slice(None), slice(0, 5, 2),), (10, 6), 4, _always), - ((slice(None), slice(0, 5, 1)), (slice(None), slice(None, None, 2)))) + assert (optimize_read_slicers((slice(None), slice(0, 5, 2),), (10, 6), 4, _always) == + ((slice(None), slice(0, 5, 1)), (slice(None), slice(None, None, 2)))) # Optimize does nothing for integer when last - assert_equal(optimize_read_slicers( - (slice(None), 1), (10, 6), 4, _always), - ((slice(None), 1), (slice(None),))) + assert (optimize_read_slicers((slice(None), 1), (10, 6), 4, _always) == + ((slice(None), 1), (slice(None),))) # Check gap threshold with 3D _depends0 = partial(threshold_heuristic, skip_thresh=10 * 4 - 1) _depends1 = partial(threshold_heuristic, skip_thresh=10 * 4) - assert_equal(optimize_read_slicers( - (slice(9), slice(None), slice(None)), (10, 6, 2), 4, _depends0), - ((slice(None), slice(None), slice(None)), - (slice(0, 9, 1), slice(None), slice(None)))) - assert_equal(optimize_read_slicers( - (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends0), - ((slice(None), slice(0, 5, 1), slice(None)), - (slice(None), slice(None), slice(None)))) - assert_equal(optimize_read_slicers( - (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends1), - ((slice(None), slice(None), slice(None)), - (slice(None), slice(0, 5, 1), slice(None)))) + assert (optimize_read_slicers( + (slice(9), slice(None), slice(None)), (10, 6, 2), 4, _depends0) == + ((slice(None), slice(None), slice(None)), (slice(0, 9, 1), slice(None), slice(None)))) + assert (optimize_read_slicers( + (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends0) == + ((slice(None), slice(0, 5, 1), slice(None)), (slice(None), slice(None), slice(None)))) + assert (optimize_read_slicers( + (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends1) == + ((slice(None), slice(None), slice(None)), (slice(None), slice(0, 5, 1), slice(None)))) # Check longs as integer slices sn = slice(None) - assert_equal(optimize_read_slicers( - (1, 2, 3), (2, 3, 4), 4, _always), - ((sn, sn, 3), (1, 2))) - if PY2: # Check we can pass in longs as well - assert_equal(optimize_read_slicers( - (long(1), long(2), long(3)), (2, 3, 4), 4, _always), - ((sn, sn, 3), (1, 2))) + assert optimize_read_slicers((1, 2, 3), (2, 3, 4), 4, _always) == ((sn, sn, 3), (1, 2)) def test_slicers2segments(): # Test function to construct segments from slice objects - assert_equal(slicers2segments((0,), (10,), 7, 4), - [[7, 4]]) - assert_equal(slicers2segments((0, 1), (10, 6), 7, 4), - [[7 + 10 * 4, 4]]) - assert_equal(slicers2segments((0, 1, 2), (10, 6, 4), 7, 4), - [[7 + 10 * 4 + 10 * 6 * 2 * 4, 4]]) - assert_equal(slicers2segments((slice(None),), (10,), 7, 4), - [[7, 10 * 4]]) - assert_equal(slicers2segments((0, slice(None)), (10, 6), 7, 4), - [[7 + 10 * 4 * i, 4] for i in range(6)]) - assert_equal(slicers2segments((slice(None), 0), (10, 6), 7, 4), - [[7, 10 * 4]]) - assert_equal(slicers2segments((slice(None), slice(None)), (10, 6), 7, 4), - [[7, 10 * 6 * 4]]) - assert_equal(slicers2segments( - (slice(None), slice(None), 2), (10, 6, 4), 7, 4), - [[7 + 10 * 6 * 2 * 4, 10 * 6 * 4]]) - if PY2: # Check we can pass longs on Python 2 - assert_equal( - slicers2segments((long(0), long(1), long(2)), (10, 6, 4), 7, 4), - [[7 + 10 * 4 + 10 * 6 * 2 * 4, 4]]) + assert slicers2segments((0,), (10,), 7, 4) == [[7, 4]] + assert slicers2segments((0, 1), (10, 6), 7, 4) == [[7 + 10 * 4, 4]] + assert slicers2segments((0, 1, 2), (10, 6, 4), 7, 4) == [[7 + 10 * 4 + 10 * 6 * 2 * 4, 4]] + assert slicers2segments((slice(None),), (10,), 7, 4) == [[7, 10 * 4]] + assert (slicers2segments((0, slice(None)), (10, 6), 7, 4) == + [[7 + 10 * 4 * i, 4] for i in range(6)]) + assert slicers2segments((slice(None), 0), (10, 6), 7, 4) == [[7, 10 * 4]] + assert slicers2segments((slice(None), slice(None)), (10, 6), 7, 4) == [[7, 10 * 6 * 4]] + assert (slicers2segments((slice(None), slice(None), 2), (10, 6, 4), 7, 4) == + [[7 + 10 * 6 * 2 * 4, 10 * 6 * 4]]) def test_calc_slicedefs(): @@ -551,71 +446,71 @@ def test_calc_slicedefs(): # wrote them after the code. We live and (fail to) learn segments, out_shape, new_slicing = calc_slicedefs( (1,), (10,), 4, 7, 'F', _never) - assert_equal(segments, [[11, 4]]) - assert_equal(new_slicing, ()) - assert_equal(out_shape, ()) - assert_equal( - calc_slicedefs((slice(None),), (10,), 4, 7, 'F', _never), + assert segments == [[11, 4]] + assert new_slicing == () + assert out_shape == () + assert ( + calc_slicedefs((slice(None),), (10,), 4, 7, 'F', _never) == ([[7, 40]], (10,), (), )) - assert_equal( - calc_slicedefs((slice(9),), (10,), 4, 7, 'F', _never), + assert ( + calc_slicedefs((slice(9),), (10,), 4, 7, 'F', _never) == ([[7, 36]], (9,), (), )) - assert_equal( - calc_slicedefs((slice(1, 9),), (10,), 4, 7, 'F', _never), + assert ( + calc_slicedefs((slice(1, 9),), (10,), 4, 7, 'F', _never) == ([[11, 32]], (8,), (), )) # Two dimensions, single slice - assert_equal( - calc_slicedefs((0,), (10, 6), 4, 7, 'F', _never), + assert ( + calc_slicedefs((0,), (10, 6), 4, 7, 'F', _never) == ([[7, 4], [47, 4], [87, 4], [127, 4], [167, 4], [207, 4]], (6,), (), )) - assert_equal( - calc_slicedefs((0,), (10, 6), 4, 7, 'C', _never), + assert ( + calc_slicedefs((0,), (10, 6), 4, 7, 'C', _never) == ([[7, 6 * 4]], (6,), (), )) # Two dimensions, contiguous not full - assert_equal( - calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'F', _never), + assert ( + calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'F', _never) == ([[51, 4], [91, 4], [131, 4], [171, 4]], (4,), (), )) - assert_equal( - calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'C', _never), + assert ( + calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'C', _never) == ([[7 + 7 * 4, 16]], (4,), (), )) # With full slice first - assert_equal( - calc_slicedefs((slice(None), slice(1, 5)), (10, 6), 4, 7, 'F', _never), + assert ( + calc_slicedefs((slice(None), slice(1, 5)), (10, 6), 4, 7, 'F', _never) == ([[47, 160]], (10, 4), (), )) # Check effect of heuristic on calc_slicedefs # Even integer slices can generate full when heuristic says so - assert_equal( - calc_slicedefs((1, slice(None)), (10, 6), 4, 7, 'F', _always), + assert ( + calc_slicedefs((1, slice(None)), (10, 6), 4, 7, 'F', _always) == ([[7, 10 * 6 * 4]], (10, 6), (1, slice(None)), )) # Except when last - assert_equal( - calc_slicedefs((slice(None), 1), (10, 6), 4, 7, 'F', _always), + assert ( + calc_slicedefs((slice(None), 1), (10, 6), 4, 7, 'F', _always) == ([[7 + 10 * 4, 10 * 4]], (10,), (), @@ -631,17 +526,16 @@ def test_predict_shape(): for i in range(n_dim): slicers_list.append(_slices_for_len(shape[i])) for sliceobj in product(*slicers_list): - assert_equal(predict_shape(sliceobj, shape), - arr[sliceobj].shape) + assert predict_shape(sliceobj, shape) == arr[sliceobj].shape # Try some Nones and ellipses - assert_equal(predict_shape((Ellipsis,), (2, 3)), (2, 3)) - assert_equal(predict_shape((Ellipsis, 1), (2, 3)), (2,)) - assert_equal(predict_shape((1, Ellipsis), (2, 3)), (3,)) - assert_equal(predict_shape((1, slice(None), Ellipsis), (2, 3)), (3,)) - assert_equal(predict_shape((None,), (2, 3)), (1, 2, 3)) - assert_equal(predict_shape((None, 1), (2, 3)), (1, 3)) - assert_equal(predict_shape((1, None, slice(None)), (2, 3)), (1, 3)) - assert_equal(predict_shape((1, slice(None), None), (2, 3)), (3, 1)) + assert predict_shape((Ellipsis,), (2, 3)) == (2, 3) + assert predict_shape((Ellipsis, 1), (2, 3)) == (2,) + assert predict_shape((1, Ellipsis), (2, 3)) == (3,) + assert predict_shape((1, slice(None), Ellipsis), (2, 3)) == (3,) + assert predict_shape((None,), (2, 3)) == (1, 2, 3) + assert predict_shape((None, 1), (2, 3)) == (1, 3) + assert predict_shape((1, None, slice(None)), (2, 3)) == (1, 3) + assert predict_shape((1, slice(None), None), (2, 3)) == (3, 1) def test_strided_scalar(): @@ -652,18 +546,19 @@ def test_strided_scalar(): expected = np.zeros(shape, dtype=np.array(scalar).dtype) + scalar observed = strided_scalar(shape, scalar) assert_array_equal(observed, expected) - assert_equal(observed.shape, shape) - assert_equal(observed.dtype, expected.dtype) + assert observed.shape == shape + assert observed.dtype == expected.dtype assert_array_equal(observed.strides, 0) # Strided scalars are set as not writeable # This addresses a numpy 1.10 breakage of broadcasting a strided # array without resizing (see GitHub PR #358) - assert_false(observed.flags.writeable) + assert not observed.flags.writeable def setval(x): x[..., 0] = 99 # RuntimeError for numpy < 1.10 - assert_raises((RuntimeError, ValueError), setval, observed) + with pytest.raises((RuntimeError, ValueError)): + setval(observed) # Default scalar value is 0 assert_array_equal(strided_scalar((2, 3, 4)), np.zeros((2, 3, 4))) @@ -686,9 +581,12 @@ def test_read_segments(): np.r_[arr[5:25], arr[50:75]]) _check_bytes(read_segments(fobj, [], 0), arr[0:0]) # Error conditions - assert_raises(ValueError, read_segments, fobj, [], 1) - assert_raises(ValueError, read_segments, fobj, [(0, 200)], 199) - assert_raises(Exception, read_segments, fobj, [(0, 100), (100, 200)], 199) + with pytest.raises(ValueError): + read_segments(fobj, [], 1) + with pytest.raises(ValueError): + read_segments(fobj, [(0, 200)], 199) + with pytest.raises(Exception): + read_segments(fobj, [(0, 100), (100, 200)], 199) def test_read_segments_lock(): @@ -747,10 +645,8 @@ def runtest(): assert numpassed[0] == len(threads) -def _check_slicer(sliceobj, arr, fobj, offset, order, - heuristic=threshold_heuristic): - new_slice = fileslice(fobj, sliceobj, arr.shape, arr.dtype, offset, order, - heuristic) +def _check_slicer(sliceobj, arr, fobj, offset, order, heuristic=threshold_heuristic): + new_slice = fileslice(fobj, sliceobj, arr.shape, arr.dtype, offset, order, heuristic) assert_array_equal(arr[sliceobj], new_slice) @@ -814,8 +710,8 @@ def test_fileslice_errors(): fobj = BytesIO(arr.tostring()) _check_slicer((1,), arr, fobj, 0, 'C') # Fancy indexing raises error - assert_raises(ValueError, - fileslice, fobj, (np.array(1),), (2, 3, 4), arr.dtype) + with pytest.raises(ValueError): + fileslice(fobj, (np.array(1),), (2, 3, 4), arr.dtype) def test_fileslice_heuristic(): diff --git a/nibabel/tests/test_fileutils.py b/nibabel/tests/test_fileutils.py index 63ecc8ee34..ffd7d91b6a 100644 --- a/nibabel/tests/test_fileutils.py +++ b/nibabel/tests/test_fileutils.py @@ -12,12 +12,7 @@ from ..fileutils import read_zt_byte_strings -from numpy.testing import (assert_almost_equal, - assert_array_equal) - -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) - +import pytest from ..tmpdirs import InTemporaryDirectory @@ -35,22 +30,22 @@ def test_read_zt_byte_strings(): # open it again fread = open(path, 'rb') # test readout of one string - assert_equal(read_zt_byte_strings(fread), [b'test.fmr']) + assert read_zt_byte_strings(fread) == [b'test.fmr'] # test new file position - assert_equal(fread.tell(), 9) + assert fread.tell() == 9 # manually rewind fread.seek(0) # test readout of two strings - assert_equal(read_zt_byte_strings(fread, 2), - [b'test.fmr', b'test.prt']) - assert_equal(fread.tell(), 18) + assert read_zt_byte_strings(fread, 2) == [b'test.fmr', b'test.prt'] + assert fread.tell() == 18 # test readout of more strings than present fread.seek(0) - assert_raises(ValueError, read_zt_byte_strings, fread, 3) + with pytest.raises(ValueError): + read_zt_byte_strings(fread, 3) fread.seek(9) - assert_raises(ValueError, read_zt_byte_strings, fread, 2) + with pytest.raises(ValueError): + read_zt_byte_strings(fread, 2) # Try with a small bufsize fread.seek(0) - assert_equal(read_zt_byte_strings(fread, 2, 4), - [b'test.fmr', b'test.prt']) + assert read_zt_byte_strings(fread, 2, 4) == [b'test.fmr', b'test.prt'] fread.close() diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 96376270b1..e419eb8868 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -2,9 +2,6 @@ """ import sys -PY2 = sys.version_info[0] < 3 - -from distutils.version import LooseVersion import numpy as np @@ -14,18 +11,9 @@ longdouble_precision_improved) from ..testing import suppress_warnings -from nose import SkipTest -from nose.tools import assert_equal, assert_raises, assert_true, assert_false +import pytest -IEEE_floats = [np.float32, np.float64] -try: - np.float16 -except AttributeError: # float16 not present in np < 1.6 - have_float16 = False -else: - have_float16 = True -if have_float16: - IEEE_floats.append(np.float16) +IEEE_floats = [np.float16, np.float32, np.float64] LD_INFO = type_info(np.longdouble) @@ -45,17 +33,17 @@ def test_type_info(): for dtt in np.sctypes['int'] + np.sctypes['uint']: info = np.iinfo(dtt) infod = type_info(dtt) - assert_equal(dict(min=info.min, max=info.max, - nexp=None, nmant=None, - minexp=None, maxexp=None, - width=np.dtype(dtt).itemsize), infod) - assert_equal(infod['min'].dtype.type, dtt) - assert_equal(infod['max'].dtype.type, dtt) + assert dict(min=info.min, max=info.max, + nexp=None, nmant=None, + minexp=None, maxexp=None, + width=np.dtype(dtt).itemsize) == infod + assert infod['min'].dtype.type == dtt + assert infod['max'].dtype.type == dtt for dtt in IEEE_floats + [np.complex64, np.complex64]: infod = type_info(dtt) - assert_equal(dtt2dict(dtt), infod) - assert_equal(infod['min'].dtype.type, dtt) - assert_equal(infod['max'].dtype.type, dtt) + assert dtt2dict(dtt) == infod + assert infod['min'].dtype.type == dtt + assert infod['max'].dtype.type == dtt # What is longdouble? ld_dict = dtt2dict(np.longdouble) dbl_dict = dtt2dict(np.float64) @@ -80,14 +68,14 @@ def test_type_info(): ld_dict['width'] = width else: raise ValueError("Unexpected float type {} to test".format(np.longdouble)) - assert_equal(ld_dict, infod) + assert ld_dict == infod def test_nmant(): for t in IEEE_floats: - assert_equal(type_info(t)['nmant'], np.finfo(t).nmant) + assert type_info(t)['nmant'] == np.finfo(t).nmant if (LD_INFO['nmant'], LD_INFO['nexp']) == (63, 15): - assert_equal(type_info(np.longdouble)['nmant'], 63) + assert type_info(np.longdouble)['nmant'] == 63 def test_check_nmant_nexp(): @@ -95,37 +83,37 @@ def test_check_nmant_nexp(): for t in IEEE_floats: nmant = np.finfo(t).nmant maxexp = np.finfo(t).maxexp - assert_true(_check_nmant(t, nmant)) - assert_false(_check_nmant(t, nmant - 1)) - assert_false(_check_nmant(t, nmant + 1)) + assert _check_nmant(t, nmant) + assert not _check_nmant(t, nmant - 1) + assert not _check_nmant(t, nmant + 1) with suppress_warnings(): # overflow - assert_true(_check_maxexp(t, maxexp)) - assert_false(_check_maxexp(t, maxexp - 1)) + assert _check_maxexp(t, maxexp) + assert not _check_maxexp(t, maxexp - 1) with suppress_warnings(): - assert_false(_check_maxexp(t, maxexp + 1)) + assert not _check_maxexp(t, maxexp + 1) # Check against type_info for t in ok_floats(): ti = type_info(t) if ti['nmant'] not in (105, 106): # This check does not work for PPC double pair - assert_true(_check_nmant(t, ti['nmant'])) + assert _check_nmant(t, ti['nmant']) # Test fails for longdouble after blacklisting of OSX powl as of numpy # 1.12 - see https://github.com/numpy/numpy/issues/8307 - if (t != np.longdouble or - sys.platform != 'darwin' or - LooseVersion(np.__version__) < LooseVersion('1.12')): - assert_true(_check_maxexp(t, ti['maxexp'])) + if t != np.longdouble or sys.platform != 'darwin': + assert _check_maxexp(t, ti['maxexp']) def test_as_int(): # Integer representation of number - assert_equal(as_int(2.0), 2) - assert_equal(as_int(-2.0), -2) - assert_raises(FloatingError, as_int, 2.1) - assert_raises(FloatingError, as_int, -2.1) - assert_equal(as_int(2.1, False), 2) - assert_equal(as_int(-2.1, False), -2) + assert as_int(2.0) == 2 + assert as_int(-2.0) == -2 + with pytest.raises(FloatingError): + as_int(2.1) + with pytest.raises(FloatingError): + as_int(-2.1) + assert as_int(2.1, False) == 2 + assert as_int(-2.1, False) == -2 v = np.longdouble(2**64) - assert_equal(as_int(v), 2**64) + assert as_int(v) == 2**64 # Have all long doubles got 63+1 binary bits of precision? Windows 32-bit # longdouble appears to have 52 bit precision, but we avoid that by checking # for known precisions that are less than that required @@ -134,13 +122,15 @@ def test_as_int(): except FloatingError: nmant = 63 # Unknown precision, let's hope it's at least 63 v = np.longdouble(2) ** (nmant + 1) - 1 - assert_equal(as_int(v), 2**(nmant + 1) - 1) + assert as_int(v) == 2**(nmant + 1) - 1 # Check for predictable overflow nexp64 = floor_log2(type_info(np.float64)['max']) with np.errstate(over='ignore'): val = np.longdouble(2**nexp64) * 2 # outside float64 range - assert_raises(OverflowError, as_int, val) - assert_raises(OverflowError, as_int, -val) + with pytest.raises(OverflowError): + as_int(val) + with pytest.raises(OverflowError): + as_int(-val) def test_int_to_float(): @@ -150,18 +140,20 @@ def test_int_to_float(): nmant = type_info(ie3)['nmant'] for p in range(nmant + 3): i = 2**p + 1 - assert_equal(int_to_float(i, ie3), ie3(i)) - assert_equal(int_to_float(-i, ie3), ie3(-i)) + assert int_to_float(i, ie3) == ie3(i) + assert int_to_float(-i, ie3) == ie3(-i) # IEEEs in this case are binary formats only nexp = floor_log2(type_info(ie3)['max']) # Values too large for the format smn, smx = -2**(nexp + 1), 2**(nexp + 1) if ie3 is np.float64: - assert_raises(OverflowError, int_to_float, smn, ie3) - assert_raises(OverflowError, int_to_float, smx, ie3) + with pytest.raises(OverflowError): + int_to_float(smn, ie3) + with pytest.raises(OverflowError): + int_to_float(smx, ie3) else: - assert_equal(int_to_float(smn, ie3), ie3(smn)) - assert_equal(int_to_float(smx, ie3), ie3(smx)) + assert int_to_float(smn, ie3) == ie3(smn) + assert int_to_float(smx, ie3) == ie3(smx) # Longdoubles do better than int, we hope LD = np.longdouble # up to integer precision of float64 nmant, we get the same result as for @@ -169,34 +161,31 @@ def test_int_to_float(): nmant = type_info(np.float64)['nmant'] for p in range(nmant + 2): # implicit i = 2**p - 1 - assert_equal(int_to_float(i, LD), LD(i)) - assert_equal(int_to_float(-i, LD), LD(-i)) + assert int_to_float(i, LD) == LD(i) + assert int_to_float(-i, LD) == LD(-i) # Above max of float64, we're hosed nexp64 = floor_log2(type_info(np.float64)['max']) smn64, smx64 = -2**(nexp64 + 1), 2**(nexp64 + 1) # The algorithm here implemented goes through float64, so supermax and # supermin will cause overflow errors - assert_raises(OverflowError, int_to_float, smn64, LD) - assert_raises(OverflowError, int_to_float, smx64, LD) + with pytest.raises(OverflowError): + int_to_float(smn64, LD) + with pytest.raises(OverflowError): + int_to_float(smx64, LD) try: nmant = type_info(np.longdouble)['nmant'] except FloatingError: # don't know where to test return # test we recover precision just above nmant i = 2**(nmant + 1) - 1 - assert_equal(as_int(int_to_float(i, LD)), i) - assert_equal(as_int(int_to_float(-i, LD)), -i) - # Test no error for longs - if PY2: - i = long(i) - assert_equal(as_int(int_to_float(i, LD)), i) - assert_equal(as_int(int_to_float(-i, LD)), -i) + assert as_int(int_to_float(i, LD)) == i + assert as_int(int_to_float(-i, LD)) == -i # If longdouble can cope with 2**64, test if nmant >= 63: # Check conversion to int; the line below causes an error subtracting # ints / uint64 values, at least for Python 3.3 and numpy dev 1.8 big_int = np.uint64(2**64 - 1) - assert_equal(as_int(int_to_float(big_int, LD)), big_int) + assert as_int(int_to_float(big_int, LD)) == big_int def test_as_int_np_fix(): @@ -205,15 +194,13 @@ def test_as_int_np_fix(): for t in np.sctypes['int'] + np.sctypes['uint']: info = np.iinfo(t) mn, mx = np.array([info.min, info.max], dtype=t) - assert_equal((mn, mx), (as_int(mn), as_int(mx))) + assert (mn, mx) == (as_int(mn), as_int(mx)) def test_floor_exact_16(): # A normal integer can generate an inf in float16 - if not have_float16: - raise SkipTest('No float16') - assert_equal(floor_exact(2**31, np.float16), np.inf) - assert_equal(floor_exact(-2**31, np.float16), -np.inf) + assert floor_exact(2**31, np.float16) == np.inf + assert floor_exact(-2**31, np.float16) == -np.inf def test_floor_exact_64(): @@ -222,11 +209,11 @@ def test_floor_exact_64(): start = np.float64(2**e) across = start + np.arange(2048, dtype=np.float64) gaps = set(np.diff(across)).difference([0]) - assert_equal(len(gaps), 1) + assert len(gaps) == 1 gap = gaps.pop() - assert_equal(gap, int(gap)) + assert gap == int(gap) test_val = 2**(e + 1) - 1 - assert_equal(floor_exact(test_val, np.float64), 2**(e + 1) - int(gap)) + assert floor_exact(test_val, np.float64) == 2**(e + 1) - int(gap) def test_floor_exact(): @@ -245,21 +232,21 @@ def test_floor_exact(): for t in to_test: # A number bigger than the range returns the max info = type_info(t) - assert_equal(floor_exact(2**5000, t), np.inf) - assert_equal(ceil_exact(2**5000, t), np.inf) + assert floor_exact(2**5000, t) == np.inf + assert ceil_exact(2**5000, t) == np.inf # A number more negative returns -inf - assert_equal(floor_exact(-2**5000, t), -np.inf) - assert_equal(ceil_exact(-2**5000, t), -np.inf) + assert floor_exact(-2**5000, t) == -np.inf + assert ceil_exact(-2**5000, t) == -np.inf # Check around end of integer precision nmant = info['nmant'] for i in range(nmant + 1): iv = 2**i # up to 2**nmant should be exactly representable for func in (int_flex, int_ceex): - assert_equal(func(iv, t), iv) - assert_equal(func(-iv, t), -iv) - assert_equal(func(iv - 1, t), iv - 1) - assert_equal(func(-iv + 1, t), -iv + 1) + assert func(iv, t) == iv + assert func(-iv, t) == -iv + assert func(iv - 1, t) == iv - 1 + assert func(-iv + 1, t) == -iv + 1 if t is np.longdouble and ( on_powerpc() or longdouble_precision_improved()): @@ -270,28 +257,28 @@ def test_floor_exact(): continue # Confirm to ourselves that 2**(nmant+1) can't be exactly represented iv = 2**(nmant + 1) - assert_equal(int_flex(iv + 1, t), iv) - assert_equal(int_ceex(iv + 1, t), iv + 2) + assert int_flex(iv + 1, t) == iv + assert int_ceex(iv + 1, t) == iv + 2 # negatives - assert_equal(int_flex(-iv - 1, t), -iv - 2) - assert_equal(int_ceex(-iv - 1, t), -iv) + assert int_flex(-iv - 1, t) == -iv - 2 + assert int_ceex(-iv - 1, t) == -iv # The gap in representable numbers is 2 above 2**(nmant+1), 4 above # 2**(nmant+2), and so on. for i in range(5): iv = 2**(nmant + 1 + i) gap = 2**(i + 1) - assert_equal(as_int(t(iv) + t(gap)), iv + gap) + assert as_int(t(iv) + t(gap)) == iv + gap for j in range(1, gap): - assert_equal(int_flex(iv + j, t), iv) - assert_equal(int_flex(iv + gap + j, t), iv + gap) - assert_equal(int_ceex(iv + j, t), iv + gap) - assert_equal(int_ceex(iv + gap + j, t), iv + 2 * gap) + assert int_flex(iv + j, t) == iv + assert int_flex(iv + gap + j, t) == iv + gap + assert int_ceex(iv + j, t) == iv + gap + assert int_ceex(iv + gap + j, t) == iv + 2 * gap # negatives for j in range(1, gap): - assert_equal(int_flex(-iv - j, t), -iv - gap) - assert_equal(int_flex(-iv - gap - j, t), -iv - 2 * gap) - assert_equal(int_ceex(-iv - j, t), -iv) - assert_equal(int_ceex(-iv - gap - j, t), -iv - gap) + assert int_flex(-iv - j, t) == -iv - gap + assert int_flex(-iv - gap - j, t) == -iv - 2 * gap + assert int_ceex(-iv - j, t) == -iv + assert int_ceex(-iv - gap - j, t) == -iv - gap def test_usable_binary128(): @@ -299,7 +286,6 @@ def test_usable_binary128(): yes = have_binary128() with np.errstate(over='ignore'): exp_test = np.longdouble(2) ** 16383 - assert_equal(yes, - exp_test.dtype.itemsize == 16 and - np.isfinite(exp_test) and - _check_nmant(np.longdouble, 112)) + assert yes == (exp_test.dtype.itemsize == 16 and + np.isfinite(exp_test) and + _check_nmant(np.longdouble, 112)) diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 6032c08672..db196995e0 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Test for image funcs ''' -from __future__ import division, print_function, absolute_import import numpy as np @@ -19,7 +18,7 @@ from ..tmpdirs import InTemporaryDirectory from numpy.testing import assert_array_equal -from nose.tools import (assert_true, assert_false, assert_equal, assert_raises) +import pytest _counter = 0 @@ -34,7 +33,8 @@ def _as_fname(img): def test_concat(): # Smoke test: concat empty list. - assert_raises(ValueError, concat_images, []) + with pytest.raises(ValueError): + concat_images([]) # Build combinations of 3D, 4D w/size[3] == 1, and 4D w/size[3] == 3 all_shapes_5D = ((1, 4, 5, 3, 3), @@ -105,26 +105,24 @@ def test_concat(): all_imgs = concat_images([img0, img1], **concat_imgs_kwargs) except ValueError as ve: - assert_true(expect_error, str(ve)) + assert expect_error, str(ve) else: - assert_false( - expect_error, "Expected a concatenation error, but got none.") - assert_array_equal(all_imgs.get_data(), all_data) + assert not expect_error, "Expected a concatenation error, but got none." + assert_array_equal(all_imgs.get_fdata(), all_data) assert_array_equal(all_imgs.affine, affine) # check that not-matching affines raise error - assert_raises(ValueError, concat_images, [ - img0, img2], **concat_imgs_kwargs) + with pytest.raises(ValueError): + concat_images([img0, img2], **concat_imgs_kwargs) # except if check_affines is False try: all_imgs = concat_images([img0, img1], **concat_imgs_kwargs) except ValueError as ve: - assert_true(expect_error, str(ve)) + assert expect_error, str(ve) else: - assert_false( - expect_error, "Expected a concatenation error, but got none.") - assert_array_equal(all_imgs.get_data(), all_data) + assert not expect_error, "Expected a concatenation error, but got none." + assert_array_equal(all_imgs.get_fdata(), all_data) assert_array_equal(all_imgs.affine, affine) @@ -135,13 +133,13 @@ def test_closest_canonical(): # Test with an AnalyzeImage first img = AnalyzeImage(arr, np.eye(4)) xyz_img = as_closest_canonical(img) - assert_true(img is xyz_img) + assert img is xyz_img # And a case where the Analyze image has to be flipped img = AnalyzeImage(arr, np.diag([-1, 1, 1, 1])) xyz_img = as_closest_canonical(img) - assert_false(img is xyz_img) - out_arr = xyz_img.get_data() + assert img is not xyz_img + out_arr = xyz_img.get_fdata() assert_array_equal(out_arr, np.flipud(arr)) # Now onto the NIFTI cases (where dim_info also has to be updated) @@ -152,15 +150,15 @@ def test_closest_canonical(): # re-order them properly img.header.set_dim_info(0, 1, 2) xyz_img = as_closest_canonical(img) - assert_true(img is xyz_img) + assert img is xyz_img # a axis flip img = Nifti1Image(arr, np.diag([-1, 1, 1, 1])) img.header.set_dim_info(0, 1, 2) xyz_img = as_closest_canonical(img) - assert_false(img is xyz_img) - assert_true(img.header.get_dim_info() == xyz_img.header.get_dim_info()) - out_arr = xyz_img.get_data() + assert img is not xyz_img + assert img.header.get_dim_info() == xyz_img.header.get_dim_info() + out_arr = xyz_img.get_fdata() assert_array_equal(out_arr, np.flipud(arr)) # no error for enforce_diag in this case @@ -171,9 +169,10 @@ def test_closest_canonical(): # although it's more or less canonical already img = Nifti1Image(arr, aff) xyz_img = as_closest_canonical(img) - assert_true(img is xyz_img) + assert img is xyz_img # it's still not diagnonal - assert_raises(OrientationError, as_closest_canonical, img, True) + with pytest.raises(OrientationError): + as_closest_canonical(img, True) # an axis swap aff = np.diag([1, 0, 0, 1]) @@ -182,14 +181,14 @@ def test_closest_canonical(): img.header.set_dim_info(0, 1, 2) xyz_img = as_closest_canonical(img) - assert_false(img is xyz_img) + assert img is not xyz_img # Check both the original and new objects - assert_true(img.header.get_dim_info() == (0, 1, 2)) - assert_true(xyz_img.header.get_dim_info() == (0, 2, 1)) - out_arr = xyz_img.get_data() + assert img.header.get_dim_info() == (0, 1, 2) + assert xyz_img.header.get_dim_info() == (0, 2, 1) + out_arr = xyz_img.get_fdata() assert_array_equal(out_arr, np.transpose(arr, (0, 2, 1, 3))) # same axis swap but with None dim info (except for slice dim) img.header.set_dim_info(None, None, 2) xyz_img = as_closest_canonical(img) - assert_true(xyz_img.header.get_dim_info() == (None, None, 1)) + assert xyz_img.header.get_dim_info() == (None, None, 1) diff --git a/nibabel/tests/test_h5py_compat.py b/nibabel/tests/test_h5py_compat.py new file mode 100644 index 0000000000..325645a18c --- /dev/null +++ b/nibabel/tests/test_h5py_compat.py @@ -0,0 +1,46 @@ +""" +These tests are almost certainly overkill, but serve to verify that +the behavior of _h5py_compat is pass-through in all but a small set of +well-defined cases +""" +import sys +import os +from distutils.version import LooseVersion +import numpy as np + +from ..optpkg import optional_package +from .. import _h5py_compat as compat + +h5py, have_h5py, _ = optional_package('h5py') + + +def test_optpkg_equivalence(): + # No effect on Linux/OSX + if os.name == 'posix': + assert have_h5py == compat.have_h5py + # No effect on Python 2.7 or 3.6+ + if sys.version_info >= (3, 6) or sys.version_info < (3,): + assert have_h5py == compat.have_h5py + # Available in a strict subset of cases + if not have_h5py: + assert not compat.have_h5py + # Available when version is high enough + elif LooseVersion(h5py.__version__) >= '2.10': + assert compat.have_h5py + + +def test_disabled_h5py_cases(): + # On mismatch + if have_h5py and not compat.have_h5py: + # Recapitulate min_h5py conditions from _h5py_compat + assert os.name == 'nt' + assert (3,) <= sys.version_info < (3, 6) + assert LooseVersion(h5py.__version__) < '2.10' + # Verify that the root cause is present + # If any tests fail, they will likely be these, so they may be + # ill-advised... + if LooseVersion(np.__version__) < '1.18': + assert str(np.longdouble) == str(np.float64) + else: + assert str(np.longdouble) != str(np.float64) + assert np.longdouble != np.float64 diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index ba51878715..8af303914b 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -14,44 +14,43 @@ * ``img.shape`` (shape of data as read with ``np.array(img.dataobj)`` * ``img.get_fdata()`` (returns floating point data as read with ``np.array(img.dataobj)`` and the cast to float); -* ``img.get_data()`` (returns data as read with ``np.array(img.dataobj)``); -* ``img.uncache()`` (``img.get_data()`` and ``img.get_data`` are allowed to - cache the result of the array creation. If they do, this call empties that - cache. Implement this as a no-op if ``get_fdata()``, ``get_data`` do not - cache. +* ``img.uncache()`` (``img.get_fdata()`` (recommended) and ``img.get_data()`` + (deprecated) are allowed to cache the result of the array creation. If they + do, this call empties that cache. Implement this as a no-op if + ``get_fdata()``, ``get_data()`` do not cache.) * ``img[something]`` generates an informative TypeError * ``img.in_memory`` is True for an array image, and for a proxy image that is cached, but False otherwise. """ -from __future__ import division, print_function, absolute_import import warnings from functools import partial from itertools import product -from six import string_types +import pathlib import numpy as np from ..optpkg import optional_package _, have_scipy, _ = optional_package('scipy') -_, have_h5py, _ = optional_package('h5py') +from .._h5py_compat import have_h5py from .. import (AnalyzeImage, Spm99AnalyzeImage, Spm2AnalyzeImage, Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, + GiftiImage, MGHImage, Minc1Image, Minc2Image, is_proxy) from ..spatialimages import SpatialImage from .. import minc1, minc2, parrec, brikhead -from nose import SkipTest -from nose.tools import (assert_true, assert_false, assert_raises, assert_equal) +import unittest +import pytest -from numpy.testing import (assert_almost_equal, assert_array_equal) -from ..testing import clear_and_catch_warnings +from numpy.testing import assert_almost_equal, assert_array_equal, assert_warns, assert_allclose +from ..testing import (bytesio_round_trip, bytesio_filemap, + assert_data_similar, clear_and_catch_warnings) from ..tmpdirs import InTemporaryDirectory +from ..deprecator import ExpiredDeprecationError from .test_api_validators import ValidateAPI -from .test_helpers import (bytesio_round_trip, bytesio_filemap, - assert_data_similar) from .test_minc1 import EXAMPLE_IMAGES as MINC1_EXAMPLE_IMAGES from .test_minc2 import EXAMPLE_IMAGES as MINC2_EXAMPLE_IMAGES from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLE_IMAGES @@ -93,7 +92,7 @@ def obj_params(self): ``data_summary`` : dict with data ``min``, ``max``, ``mean``; * ``shape`` : shape of image; * ``affine`` : shape (4, 4) affine array for image; - * ``dtype`` : dtype of data returned from ``get_data()``; + * ``dtype`` : dtype of data returned from ``np.asarray(dataobj)``; * ``is_proxy`` : bool, True if image data is proxied; Notes @@ -109,21 +108,21 @@ def validate_header(self, imaker, params): img = imaker() hdr = img.header # we can fetch it # Read only - assert_raises(AttributeError, setattr, img, 'header', hdr) + with pytest.raises(AttributeError): + img.header = hdr def validate_header_deprecated(self, imaker, params): # Check deprecated header API img = imaker() - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) + with pytest.deprecated_call(): hdr = img.get_header() - assert_equal(len(w), 1) - assert_true(hdr is img.header) + assert hdr is img.header def validate_filenames(self, imaker, params): # Validate the filename, file_map interface + if not self.can_save: - raise SkipTest + raise unittest.SkipTest img = imaker() img.set_data_dtype(np.float32) # to avoid rounding in load / save # Make sure the object does not have a file_map @@ -132,8 +131,7 @@ def validate_filenames(self, imaker, params): rt_img = bytesio_round_trip(img) assert_array_equal(img.shape, rt_img.shape) assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) - # get_data will be deprecated - assert_almost_equal(img.get_data(), rt_img.get_data()) + assert_almost_equal(np.asanyarray(img.dataobj), np.asanyarray(rt_img.dataobj)) # Give the image a file map klass = type(img) rt_img.file_map = bytesio_filemap(klass) @@ -141,28 +139,40 @@ def validate_filenames(self, imaker, params): rt_img.to_file_map() rt_rt_img = klass.from_file_map(rt_img.file_map) assert_almost_equal(img.get_fdata(), rt_rt_img.get_fdata()) - # get_data will be deprecated - assert_almost_equal(img.get_data(), rt_rt_img.get_data()) + assert_almost_equal(np.asanyarray(img.dataobj), np.asanyarray(rt_img.dataobj)) # get_ / set_ filename fname = 'an_image' + self.standard_extension - img.set_filename(fname) - assert_equal(img.get_filename(), fname) - assert_equal(img.file_map['image'].filename, fname) + for path in (fname, pathlib.Path(fname)): + img.set_filename(path) + assert img.get_filename() == str(path) + assert img.file_map['image'].filename == str(path) # to_ / from_ filename fname = 'another_image' + self.standard_extension - with InTemporaryDirectory(): - img.to_filename(fname) - rt_img = img.__class__.from_filename(fname) - assert_array_equal(img.shape, rt_img.shape) - assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) - # get_data will be deprecated - assert_almost_equal(img.get_data(), rt_img.get_data()) - del rt_img # to allow windows to delete the directory + for path in (fname, pathlib.Path(fname)): + with InTemporaryDirectory(): + # Validate that saving or loading a file doesn't use deprecated methods internally + with clear_and_catch_warnings() as w: + warnings.simplefilter('error', DeprecationWarning) + img.to_filename(path) + rt_img = img.__class__.from_filename(path) + assert_array_equal(img.shape, rt_img.shape) + assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) + assert_almost_equal(np.asanyarray(img.dataobj), np.asanyarray(rt_img.dataobj)) + del rt_img # to allow windows to delete the directory def validate_no_slicing(self, imaker, params): img = imaker() - assert_raises(TypeError, img.__getitem__, 'string') - assert_raises(TypeError, img.__getitem__, slice(None)) + with pytest.raises(TypeError): + img['string'] + with pytest.raises(TypeError): + img[:] + + def validate_get_data_deprecated(self, imaker, params): + # Check deprecated header API + img = imaker() + with pytest.deprecated_call(): + data = img.get_data() + assert_array_equal(np.asanyarray(img.dataobj), data) class GetSetDtypeMixin(object): @@ -175,19 +185,19 @@ def validate_dtype(self, imaker, params): # data / storage dtype img = imaker() # Need to rename this one - assert_equal(img.get_data_dtype().type, params['dtype']) + assert img.get_data_dtype().type == params['dtype'] # dtype survives round trip if self.has_scaling and self.can_save: with np.errstate(invalid='ignore'): rt_img = bytesio_round_trip(img) - assert_equal(rt_img.get_data_dtype().type, params['dtype']) + assert rt_img.get_data_dtype().type == params['dtype'] # Setting to a different dtype img.set_data_dtype(np.float32) # assumed supported for all formats - assert_equal(img.get_data_dtype().type, np.float32) + assert img.get_data_dtype().type == np.float32 # dtype survives round trip if self.can_save: rt_img = bytesio_round_trip(img) - assert_equal(rt_img.get_data_dtype().type, np.float32) + assert rt_img.get_data_dtype().type == np.float32 class DataInterfaceMixin(GetSetDtypeMixin): @@ -201,8 +211,8 @@ class DataInterfaceMixin(GetSetDtypeMixin): def validate_data_interface(self, imaker, params): # Check get data returns array, and caches img = imaker() - assert_equal(img.shape, img.dataobj.shape) - assert_equal(img.ndim, len(img.shape)) + assert img.shape == img.dataobj.shape + assert img.ndim == len(img.shape) assert_data_similar(img.dataobj, params) for meth_name in self.meth_names: if params['is_proxy']: @@ -210,53 +220,56 @@ def validate_data_interface(self, imaker, params): else: # Array image self._check_array_interface(imaker, meth_name) # Data shape is same as image shape - assert_equal(img.shape, getattr(img, meth_name)().shape) + assert img.shape == getattr(img, meth_name)().shape # Data ndim is same as image ndim - assert_equal(img.ndim, getattr(img, meth_name)().ndim) + assert img.ndim == getattr(img, meth_name)().ndim # Values to get_data caching parameter must be 'fill' or # 'unchanged' - assert_raises(ValueError, img.get_data, caching='something') + with pytest.raises(ValueError): + img.get_data(caching='something') # dataobj is read only fake_data = np.zeros(img.shape).astype(img.get_data_dtype()) - assert_raises(AttributeError, setattr, img, 'dataobj', fake_data) + with pytest.raises(AttributeError): + img.dataobj = fake_data # So is in_memory - assert_raises(AttributeError, setattr, img, 'in_memory', False) + with pytest.raises(AttributeError): + img.in_memory = False def _check_proxy_interface(self, imaker, meth_name): # Parameters assert this is an array proxy img = imaker() # Does is_proxy agree? - assert_true(is_proxy(img.dataobj)) + assert is_proxy(img.dataobj) # Confirm it is not a numpy array - assert_false(isinstance(img.dataobj, np.ndarray)) + assert not isinstance(img.dataobj, np.ndarray) # Confirm it can be converted to a numpy array with asarray proxy_data = np.asarray(img.dataobj) proxy_copy = proxy_data.copy() # Not yet cached, proxy image: in_memory is False - assert_false(img.in_memory) + assert not img.in_memory # Load with caching='unchanged' method = getattr(img, meth_name) data = method(caching='unchanged') # Still not cached - assert_false(img.in_memory) + assert not img.in_memory # Default load, does caching data = method() # Data now cached. in_memory is True if either of the get_data # or get_fdata caches are not-None - assert_true(img.in_memory) + assert img.in_memory # We previously got proxy_data from disk, but data, which we # have just fetched, is a fresh copy. - assert_false(proxy_data is data) + assert not proxy_data is data # asarray on dataobj, applied above, returns same numerical # values. This might not be true get_fdata operating on huge # integers, but lets assume that's not true here. assert_array_equal(proxy_data, data) # Now caching='unchanged' does nothing, returns cached version data_again = method(caching='unchanged') - assert_true(data is data_again) + assert data is data_again # caching='fill' does nothing because the cache is already full data_yet_again = method(caching='fill') - assert_true(data is data_yet_again) + assert data is data_yet_again # changing array data does not change proxy data, or reloaded # data data[:] = 42 @@ -267,16 +280,16 @@ def _check_proxy_interface(self, imaker, meth_name): # until we uncache img.uncache() # Which unsets in_memory - assert_false(img.in_memory) + assert not img.in_memory assert_array_equal(method(), proxy_copy) # Check caching='fill' does cache data img = imaker() method = getattr(img, meth_name) - assert_false(img.in_memory) + assert not img.in_memory data = method(caching='fill') - assert_true(img.in_memory) + assert img.in_memory data_again = method() - assert_true(data is data_again) + assert data is data_again # Check the interaction of caching with get_data, get_fdata. # Caching for `get_data` should have no effect on caching for # get_fdata, and vice versa. @@ -288,36 +301,39 @@ def _check_proxy_interface(self, imaker, meth_name): other_data = other_method() # We get the original data, not the modified cache assert_array_equal(proxy_data, other_data) - assert_false(np.all(data == other_data)) + assert not np.all(data == other_data) # We can modify the other cache, without affecting the first other_data[:] = 44 assert_array_equal(other_method(), 44) - assert_false(np.all(method() == other_method())) + assert not np.all(method() == other_method()) if meth_name != 'get_fdata': return # Check that caching refreshes for new floating point type. img.uncache() fdata = img.get_fdata() - assert_equal(fdata.dtype, np.float64) + assert fdata.dtype == np.float64 fdata[:] = 42 fdata_back = img.get_fdata() assert_array_equal(fdata_back, 42) - assert_equal(fdata_back.dtype, np.float64) + assert fdata_back.dtype == np.float64 # New data dtype, no caching, doesn't use or alter cache fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') # We get back the original read, not the modified cache - assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) - assert_equal(fdata_new_dt.dtype, np.float32) + # Allow for small rounding error when the data is scaled with 32-bit + # factors, rather than 64-bit factors and then cast to float-32 + # Use rtol/atol from numpy.allclose + assert_allclose(fdata_new_dt, proxy_data.astype('f4'), rtol=1e-05, atol=1e-08) + assert fdata_new_dt.dtype == np.float32 # The original cache stays in place, for default float64 assert_array_equal(img.get_fdata(), 42) # And for not-default float32, because we haven't cached fdata_new_dt[:] = 43 fdata_new_dt = img.get_fdata(caching='unchanged', dtype='f4') - assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) + assert_allclose(fdata_new_dt, proxy_data.astype('f4'), rtol=1e-05, atol=1e-08) # Until we reset with caching='fill', at which point we # drop the original float64 cache, and have a float32 cache fdata_new_dt = img.get_fdata(caching='fill', dtype='f4') - assert_array_equal(fdata_new_dt, proxy_data.astype('f4')) + assert_allclose(fdata_new_dt, proxy_data.astype('f4'), rtol=1e-05, atol=1e-08) # We're using the cache, for dtype='f4' reads fdata_new_dt[:] = 43 assert_array_equal(img.get_fdata(dtype='f4'), 43) @@ -333,8 +349,8 @@ def _check_array_caching(self, imaker, meth_name, caching): method = getattr(img, meth_name) get_data_func = (method if caching is None else partial(method, caching=caching)) - assert_true(isinstance(img.dataobj, np.ndarray)) - assert_true(img.in_memory) + assert isinstance(img.dataobj, np.ndarray) + assert img.in_memory data = get_data_func() # Returned data same object as underlying dataobj if using # old ``get_data`` method, or using newer ``get_fdata`` @@ -344,10 +360,10 @@ def _check_array_caching(self, imaker, meth_name, caching): # Set something to the output array. data[:] = 42 get_result_changed = np.all(get_data_func() == 42) - assert_equal(get_result_changed, - dataobj_is_data or caching != 'unchanged') + assert (get_result_changed == + (dataobj_is_data or caching != 'unchanged')) if dataobj_is_data: - assert_true(data is img.dataobj) + assert data is img.dataobj # Changing array data changes # data assert_array_equal(np.asarray(img.dataobj), 42) @@ -355,15 +371,15 @@ def _check_array_caching(self, imaker, meth_name, caching): img.uncache() assert_array_equal(get_data_func(), 42) else: - assert_false(data is img.dataobj) - assert_false(np.all(np.asarray(img.dataobj) == 42)) + assert not data is img.dataobj + assert not np.all(np.asarray(img.dataobj) == 42) # Uncache does have an effect img.uncache() - assert_false(np.all(get_data_func() == 42)) + assert not np.all(get_data_func() == 42) # in_memory is always true for array images, regardless of # cache state. img.uncache() - assert_true(img.in_memory) + assert img.in_memory if meth_name != 'get_fdata': return # Return original array from get_fdata only if the input array is the @@ -373,48 +389,75 @@ def _check_array_caching(self, imaker, meth_name, caching): return for float_type in float_types: data = get_data_func(dtype=float_type) - assert_equal(data is img.dataobj, arr_dtype == float_type) + assert (data is img.dataobj) == (arr_dtype == float_type) def validate_data_deprecated(self, imaker, params): # Check _data property still exists, but raises warning img = imaker() - with warnings.catch_warnings(record=True) as warns: - warnings.simplefilter("always") + with pytest.deprecated_call(): assert_data_similar(img._data, params) - assert_equal(warns.pop(0).category, DeprecationWarning) # Check setting _data raises error fake_data = np.zeros(img.shape).astype(img.get_data_dtype()) - assert_raises(AttributeError, setattr, img, '_data', fake_data) + with pytest.raises(AttributeError): + img._data = fake_data def validate_shape(self, imaker, params): # Validate shape img = imaker() # Same as expected shape - assert_equal(img.shape, params['shape']) + assert img.shape == params['shape'] # Same as array shape if passed if 'data' in params: - assert_equal(img.shape, params['data'].shape) + assert img.shape == params['data'].shape # Read only - assert_raises(AttributeError, setattr, img, 'shape', np.eye(4)) + with pytest.raises(AttributeError): + img.shape = np.eye(4) def validate_ndim(self, imaker, params): # Validate shape img = imaker() # Same as expected ndim - assert_equal(img.ndim, len(params['shape'])) + assert img.ndim == len(params['shape']) # Same as array ndim if passed if 'data' in params: - assert_equal(img.ndim, params['data'].ndim) + assert img.ndim == params['data'].ndim # Read only - assert_raises(AttributeError, setattr, img, 'ndim', 5) + with pytest.raises(AttributeError): + img.ndim = 5 def validate_shape_deprecated(self, imaker, params): # Check deprecated get_shape API img = imaker() - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) - assert_equal(img.get_shape(), params['shape']) - assert_equal(len(w), 1) + with pytest.raises(ExpiredDeprecationError): + img.get_shape() + + def validate_mmap_parameter(self, imaker, params): + img = imaker() + fname = img.get_filename() + with InTemporaryDirectory(): + # Load test files with mmap parameters + # or + # Save a generated file so we can test it + if fname is None: + # Skip only formats we can't write + if not img.rw or not img.valid_exts: + return + fname = 'image' + img.valid_exts[0] + img.to_filename(fname) + rt_img = img.__class__.from_filename(fname, mmap=True) + assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) + rt_img = img.__class__.from_filename(fname, mmap=False) + assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) + rt_img = img.__class__.from_filename(fname, mmap='c') + assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) + rt_img = img.__class__.from_filename(fname, mmap='r') + assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) + # r+ is specifically not valid for images + with pytest.raises(ValueError): + img.__class__.from_filename(fname, mmap='r+') + with pytest.raises(ValueError): + img.__class__.from_filename(fname, mmap='invalid') + del rt_img # to allow windows to delete the directory class HeaderShapeMixin(object): @@ -431,8 +474,8 @@ def validate_header_shape(self, imaker, params): shape = hdr.get_data_shape() new_shape = (shape[0] + 1,) + shape[1:] hdr.set_data_shape(new_shape) - assert_true(img.header is hdr) - assert_equal(img.header.get_data_shape(), new_shape) + assert img.header is hdr + assert img.header.get_data_shape() == new_shape class AffineMixin(object): @@ -446,23 +489,83 @@ def validate_affine(self, imaker, params): # Check affine API img = imaker() assert_almost_equal(img.affine, params['affine'], 6) - assert_equal(img.affine.dtype, np.float64) + assert img.affine.dtype == np.float64 img.affine[0, 0] = 1.5 - assert_equal(img.affine[0, 0], 1.5) + assert img.affine[0, 0] == 1.5 # Read only - assert_raises(AttributeError, setattr, img, 'affine', np.eye(4)) + with pytest.raises(AttributeError): + img.affine = np.eye(4) def validate_affine_deprecated(self, imaker, params): # Check deprecated affine API img = imaker() - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) + with pytest.deprecated_call(): assert_almost_equal(img.get_affine(), params['affine'], 6) - assert_equal(len(w), 1) - assert_equal(img.get_affine().dtype, np.float64) + assert img.get_affine().dtype == np.float64 aff = img.get_affine() aff[0, 0] = 1.5 - assert_true(aff is img.get_affine()) + assert aff is img.get_affine() + + +class SerializeMixin(object): + def validate_to_bytes(self, imaker, params): + img = imaker() + serialized = img.to_bytes() + with InTemporaryDirectory(): + fname = 'img' + self.standard_extension + img.to_filename(fname) + with open(fname, 'rb') as fobj: + file_contents = fobj.read() + assert serialized == file_contents + + def validate_from_bytes(self, imaker, params): + img = imaker() + klass = getattr(self, 'klass', img.__class__) + with InTemporaryDirectory(): + fname = 'img' + self.standard_extension + img.to_filename(fname) + + all_images = list(getattr(self, 'example_images', [])) + [{'fname': fname}] + for img_params in all_images: + img_a = klass.from_filename(img_params['fname']) + with open(img_params['fname'], 'rb') as fobj: + img_b = klass.from_bytes(fobj.read()) + + assert self._header_eq(img_a.header, img_b.header) + assert np.array_equal(img_a.get_fdata(), img_b.get_fdata()) + del img_a + del img_b + + def validate_to_from_bytes(self, imaker, params): + img = imaker() + klass = getattr(self, 'klass', img.__class__) + with InTemporaryDirectory(): + fname = 'img' + self.standard_extension + img.to_filename(fname) + + all_images = list(getattr(self, 'example_images', [])) + [{'fname': fname}] + for img_params in all_images: + img_a = klass.from_filename(img_params['fname']) + bytes_a = img_a.to_bytes() + + img_b = klass.from_bytes(bytes_a) + + assert img_b.to_bytes() == bytes_a + assert self._header_eq(img_a.header, img_b.header) + assert np.array_equal(img_a.get_fdata(), img_b.get_fdata()) + del img_a + del img_b + + @staticmethod + def _header_eq(header_a, header_b): + """ Header equality check that can be overridden by a subclass of this test + + This allows us to retain the same tests above when testing an image that uses an + abstract class as a header, namely when testing the FileBasedImage API, which + raises a NotImplementedError for __eq__ + """ + return header_a == header_b + class LoadImageAPI(GenericImageAPI, @@ -485,10 +588,10 @@ def obj_params(self): def validate_path_maybe_image(self, imaker, params): for img_params in self.example_images: test, sniff = self.klass.path_maybe_image(img_params['fname']) - assert_true(isinstance(test, bool)) + assert isinstance(test, bool) if sniff is not None: assert isinstance(sniff[0], bytes) - assert isinstance(sniff[1], string_types) + assert isinstance(sniff[1], str) class MakeImageAPI(LoadImageAPI): @@ -585,7 +688,7 @@ class TestNifti1PairAPI(TestSpm99AnalyzeAPI): can_save = True -class TestNifti1API(TestNifti1PairAPI): +class TestNifti1API(TestNifti1PairAPI, SerializeMixin): klass = image_maker = Nifti1Image standard_extension = '.nii' @@ -608,7 +711,7 @@ class TestMinc2API(TestMinc1API): def __init__(self): if not have_h5py: - raise SkipTest('Need h5py for these tests') + raise unittest.SkipTest('Need h5py for these tests') klass = image_maker = Minc2Image loader = minc2.load @@ -632,7 +735,7 @@ def loader(self, fname): # standard_extension = '.v' -class TestMGHAPI(ImageHeaderAPI): +class TestMGHAPI(ImageHeaderAPI, SerializeMixin): klass = image_maker = MGHImage example_shapes = ((2, 3, 4), (2, 3, 4, 5)) # MGH can only do >= 3D has_scaling = True @@ -640,6 +743,12 @@ class TestMGHAPI(ImageHeaderAPI): standard_extension = '.mgh' +class TestGiftiAPI(LoadImageAPI, SerializeMixin): + klass = image_maker = GiftiImage + can_save = True + standard_extension = '.gii' + + class TestAFNIAPI(LoadImageAPI): loader = brikhead.load klass = image_maker = brikhead.AFNIImage diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index f7318945e7..429144108c 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -7,12 +7,12 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Tests for loader function ''' -from __future__ import division, print_function, absolute_import from io import BytesIO import shutil from os.path import dirname, join as pjoin from tempfile import mkdtemp +import pathlib import numpy as np @@ -30,7 +30,7 @@ from ..spatialimages import SpatialImage from numpy.testing import assert_array_equal, assert_array_almost_equal -from nose.tools import assert_true, assert_equal, assert_raises +import pytest _, have_scipy, _ = optional_package('scipy') # No scipy=>no SPM-format writing DATA_PATH = pjoin(dirname(__file__), 'data') @@ -39,11 +39,7 @@ def round_trip(img): # round trip a nifti single - sio = BytesIO() - img.file_map['image'].fileobj = sio - img.to_file_map() - img2 = Nifti1Image.from_file_map(img.file_map) - return img2 + return Nifti1Image.from_bytes(img.to_bytes()) def test_conversion_spatialimages(): @@ -62,7 +58,7 @@ def test_conversion_spatialimages(): if not w_class.makeable: continue img2 = w_class.from_image(img) - assert_array_equal(img2.get_data(), data) + assert_array_equal(img2.get_fdata(), data) assert_array_equal(img2.affine, affine) @@ -72,38 +68,44 @@ def test_save_load_endian(): data = np.arange(np.prod(shape), dtype='f4').reshape(shape) # Native endian image img = Nifti1Image(data, affine) - assert_equal(img.header.endianness, native_code) + assert img.header.endianness == native_code img2 = round_trip(img) - assert_equal(img2.header.endianness, native_code) - assert_array_equal(img2.get_data(), data) + assert img2.header.endianness == native_code + assert_array_equal(img2.get_fdata(), data) + assert_array_equal(np.asanyarray(img2.dataobj), data) # byte swapped endian image bs_hdr = img.header.as_byteswapped() bs_img = Nifti1Image(data, affine, bs_hdr) - assert_equal(bs_img.header.endianness, swapped_code) + assert bs_img.header.endianness == swapped_code # of course the data is the same because it's not written to disk - assert_array_equal(bs_img.get_data(), data) + assert_array_equal(bs_img.get_fdata(), data) + assert_array_equal(np.asanyarray(bs_img.dataobj), data) # Check converting to another image cbs_img = AnalyzeImage.from_image(bs_img) # this will make the header native by doing the header conversion cbs_hdr = cbs_img.header - assert_equal(cbs_hdr.endianness, native_code) + assert cbs_hdr.endianness == native_code # and the byte order follows it back into another image cbs_img2 = Nifti1Image.from_image(cbs_img) cbs_hdr2 = cbs_img2.header - assert_equal(cbs_hdr2.endianness, native_code) + assert cbs_hdr2.endianness == native_code # Try byteswapped round trip bs_img2 = round_trip(bs_img) - bs_data2 = bs_img2.get_data() + bs_data2 = np.asanyarray(bs_img2.dataobj) + bs_fdata2 = bs_img2.get_fdata() # now the data dtype was swapped endian, so the read data is too - assert_equal(bs_data2.dtype.byteorder, swapped_code) - assert_equal(bs_img2.header.endianness, swapped_code) + assert bs_data2.dtype.byteorder == swapped_code + assert bs_img2.header.endianness == swapped_code assert_array_equal(bs_data2, data) + # but get_fdata uses native endian + assert bs_fdata2.dtype.byteorder != swapped_code + assert_array_equal(bs_fdata2, data) # Now mix up byteswapped data and non-byteswapped header mixed_img = Nifti1Image(bs_data2, affine) - assert_equal(mixed_img.header.endianness, native_code) + assert mixed_img.header.endianness == native_code m_img2 = round_trip(mixed_img) - assert_equal(m_img2.header.endianness, native_code) - assert_array_equal(m_img2.get_data(), data) + assert m_img2.header.endianness == native_code + assert_array_equal(m_img2.get_fdata(), data) def test_save_load(): @@ -119,8 +121,8 @@ def test_save_load(): sifn = 'another_image.img' ni1.save(img, nifn) re_img = nils.load(nifn) - assert_true(isinstance(re_img, ni1.Nifti1Image)) - assert_array_equal(re_img.get_data(), data) + assert isinstance(re_img, ni1.Nifti1Image) + assert_array_equal(re_img.get_fdata(), data) assert_array_equal(re_img.affine, affine) # These and subsequent del statements are to prevent confusing # windows errors when trying to open files or delete the @@ -129,21 +131,20 @@ def test_save_load(): if have_scipy: # skip we we cannot read .mat files spm2.save(img, sifn) re_img2 = nils.load(sifn) - assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage)) - assert_array_equal(re_img2.get_data(), data) + assert isinstance(re_img2, spm2.Spm2AnalyzeImage) + assert_array_equal(re_img2.get_fdata(), data) assert_array_equal(re_img2.affine, affine) del re_img2 spm99.save(img, sifn) re_img3 = nils.load(sifn) - assert_true(isinstance(re_img3, - spm99.Spm99AnalyzeImage)) - assert_array_equal(re_img3.get_data(), data) + assert isinstance(re_img3, spm99.Spm99AnalyzeImage) + assert_array_equal(re_img3.get_fdata(), data) assert_array_equal(re_img3.affine, affine) ni1.save(re_img3, nifn) del re_img3 re_img = nils.load(nifn) - assert_true(isinstance(re_img, ni1.Nifti1Image)) - assert_array_equal(re_img.get_data(), data) + assert isinstance(re_img, ni1.Nifti1Image) + assert_array_equal(re_img.get_fdata(), data) assert_array_equal(re_img.affine, affine) del re_img @@ -157,13 +158,13 @@ def test_two_to_one(): affine[:3, 3] = [3, 2, 1] # single file format img = ni1.Nifti1Image(data, affine) - assert_equal(img.header['magic'], b'n+1') + assert img.header['magic'] == b'n+1' str_io = BytesIO() img.file_map['image'].fileobj = str_io # check that the single format vox offset stays at zero img.to_file_map() - assert_equal(img.header['magic'], b'n+1') - assert_equal(img.header['vox_offset'], 0) + assert img.header['magic'] == b'n+1' + assert img.header['vox_offset'] == 0 # make a new pair image, with the single image header pimg = ni1.Nifti1Pair(data, affine, img.header) isio = BytesIO() @@ -172,32 +173,32 @@ def test_two_to_one(): pimg.file_map['header'].fileobj = hsio pimg.to_file_map() # the offset stays at zero (but is 352 on disk) - assert_equal(pimg.header['magic'], b'ni1') - assert_equal(pimg.header['vox_offset'], 0) - assert_array_equal(pimg.get_data(), data) + assert pimg.header['magic'] == b'ni1' + assert pimg.header['vox_offset'] == 0 + assert_array_equal(pimg.get_fdata(), data) # same for from_image, going from single image to pair format ana_img = ana.AnalyzeImage.from_image(img) - assert_equal(ana_img.header['vox_offset'], 0) + assert ana_img.header['vox_offset'] == 0 # back to the single image, save it again to a stringio str_io = BytesIO() img.file_map['image'].fileobj = str_io img.to_file_map() - assert_equal(img.header['vox_offset'], 0) + assert img.header['vox_offset'] == 0 aimg = ana.AnalyzeImage.from_image(img) - assert_equal(aimg.header['vox_offset'], 0) + assert aimg.header['vox_offset'] == 0 aimg = spm99.Spm99AnalyzeImage.from_image(img) - assert_equal(aimg.header['vox_offset'], 0) + assert aimg.header['vox_offset'] == 0 aimg = spm2.Spm2AnalyzeImage.from_image(img) - assert_equal(aimg.header['vox_offset'], 0) + assert aimg.header['vox_offset'] == 0 nfimg = ni1.Nifti1Pair.from_image(img) - assert_equal(nfimg.header['vox_offset'], 0) + assert nfimg.header['vox_offset'] == 0 # now set the vox offset directly hdr = nfimg.header hdr['vox_offset'] = 16 - assert_equal(nfimg.header['vox_offset'], 16) + assert nfimg.header['vox_offset'] == 16 # check it gets properly set by the nifti single image nfimg = ni1.Nifti1Image.from_image(img) - assert_equal(nfimg.header['vox_offset'], 0) + assert nfimg.header['vox_offset'] == 0 def test_negative_load_save(): @@ -212,7 +213,7 @@ def test_negative_load_save(): img.to_file_map() str_io.seek(0) re_img = Nifti1Image.from_file_map(img.file_map) - assert_array_almost_equal(re_img.get_data(), data, 4) + assert_array_almost_equal(re_img.get_fdata(), data, 4) def test_filename_save(): @@ -254,13 +255,14 @@ def test_filename_save(): try: pth = mkdtemp() fname = pjoin(pth, 'image' + out_ext) - nils.save(img, fname) - rt_img = nils.load(fname) - assert_array_almost_equal(rt_img.get_data(), data) - assert_true(type(rt_img) is loadklass) - # delete image to allow file close. Otherwise windows - # raises an error when trying to delete the directory - del rt_img + for path in (fname, pathlib.Path(fname)): + nils.save(img, path) + rt_img = nils.load(path) + assert_array_almost_equal(rt_img.get_fdata(), data) + assert type(rt_img) is loadklass + # delete image to allow file close. Otherwise windows + # raises an error when trying to delete the directory + del rt_img finally: shutil.rmtree(pth) @@ -271,57 +273,41 @@ def test_analyze_detection(): def wat(hdr): return nils.which_analyze_type(hdr.binaryblock) n1_hdr = Nifti1Header(b'\0' * 348, check=False) - assert_equal(wat(n1_hdr), None) + assert wat(n1_hdr) is None n1_hdr['sizeof_hdr'] = 540 - assert_equal(wat(n1_hdr), 'nifti2') - assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti2') + assert wat(n1_hdr) == 'nifti2' + assert wat(n1_hdr.as_byteswapped()) == 'nifti2' n1_hdr['sizeof_hdr'] = 348 - assert_equal(wat(n1_hdr), 'analyze') - assert_equal(wat(n1_hdr.as_byteswapped()), 'analyze') + assert wat(n1_hdr) == 'analyze' + assert wat(n1_hdr.as_byteswapped()) == 'analyze' n1_hdr['magic'] = b'n+1' - assert_equal(wat(n1_hdr), 'nifti1') - assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1') + assert wat(n1_hdr) == 'nifti1' + assert wat(n1_hdr.as_byteswapped()) == 'nifti1' n1_hdr['magic'] = b'ni1' - assert_equal(wat(n1_hdr), 'nifti1') - assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1') + assert wat(n1_hdr) == 'nifti1' + assert wat(n1_hdr.as_byteswapped()) == 'nifti1' # Doesn't matter what magic is if it's not a nifti1 magic n1_hdr['magic'] = b'ni2' - assert_equal(wat(n1_hdr), 'analyze') + assert wat(n1_hdr) == 'analyze' n1_hdr['sizeof_hdr'] = 0 n1_hdr['magic'] = b'' - assert_equal(wat(n1_hdr), None) + assert wat(n1_hdr) is None n1_hdr['magic'] = 'n+1' - assert_equal(wat(n1_hdr), 'nifti1') + assert wat(n1_hdr) == 'nifti1' n1_hdr['magic'] = 'ni1' - assert_equal(wat(n1_hdr), 'nifti1') + assert wat(n1_hdr) == 'nifti1' def test_guessed_image_type(): # Test whether we can guess the image type from example files - assert_equal(nils.guessed_image_type( - pjoin(DATA_PATH, 'example4d.nii.gz')), - Nifti1Image) - assert_equal(nils.guessed_image_type( - pjoin(DATA_PATH, 'nifti1.hdr')), - Nifti1Pair) - assert_equal(nils.guessed_image_type( - pjoin(DATA_PATH, 'example_nifti2.nii.gz')), - Nifti2Image) - assert_equal(nils.guessed_image_type( - pjoin(DATA_PATH, 'nifti2.hdr')), - Nifti2Pair) - assert_equal(nils.guessed_image_type( - pjoin(DATA_PATH, 'tiny.mnc')), - Minc1Image) - assert_equal(nils.guessed_image_type( - pjoin(DATA_PATH, 'small.mnc')), - Minc2Image) - assert_equal(nils.guessed_image_type( - pjoin(DATA_PATH, 'test.mgz')), - MGHImage) - assert_equal(nils.guessed_image_type( - pjoin(DATA_PATH, 'analyze.hdr')), - Spm2AnalyzeImage) + assert nils.guessed_image_type(pjoin(DATA_PATH, 'example4d.nii.gz')) == Nifti1Image + assert nils.guessed_image_type(pjoin(DATA_PATH, 'nifti1.hdr')) == Nifti1Pair + assert nils.guessed_image_type(pjoin(DATA_PATH, 'example_nifti2.nii.gz')) == Nifti2Image + assert nils.guessed_image_type(pjoin(DATA_PATH, 'nifti2.hdr')) == Nifti2Pair + assert nils.guessed_image_type(pjoin(DATA_PATH, 'tiny.mnc')) == Minc1Image + assert nils.guessed_image_type(pjoin(DATA_PATH, 'small.mnc')) == Minc2Image + assert nils.guessed_image_type(pjoin(DATA_PATH, 'test.mgz')) == MGHImage + assert nils.guessed_image_type(pjoin(DATA_PATH, 'analyze.hdr')) == Spm2AnalyzeImage def test_fail_save(): @@ -330,6 +316,6 @@ def test_fail_save(): affine = np.eye(4, dtype=np.float32) img = SpatialImage(dataobj, affine) # Fails because float16 is not supported. - with assert_raises(AttributeError): + with pytest.raises(AttributeError): nils.save(img, 'foo.nii.gz') del img diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index e72ad6bbbc..632e23224d 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Tests for is_image / may_contain_header functions ''' -from __future__ import division, print_function, absolute_import import copy from os.path import dirname, basename, join as pjoin @@ -21,7 +20,6 @@ Spm2AnalyzeImage, Spm99AnalyzeImage, MGHImage, all_image_classes) -from nose.tools import assert_true DATA_PATH = pjoin(dirname(__file__), 'data') @@ -65,7 +63,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, 'sizeof_hdr', 0) current_sizeof_hdr = 0 if new_sniff is None else \ len(new_sniff[0]) - assert_true(current_sizeof_hdr >= expected_sizeof_hdr, new_msg) + assert current_sizeof_hdr >= expected_sizeof_hdr, new_msg # Check that the image type was recognized. new_msg = '%s (%s) image is%s a %s image.' % ( @@ -73,7 +71,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, msg, '' if is_img else ' not', img_klass.__name__) - assert_true(is_img, new_msg) + assert is_img, new_msg if sniff_mode == 'vanilla': return new_sniff diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 3c3c437136..193cf38cb9 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -6,25 +6,21 @@ import numpy as np -from nibabel.optpkg import optional_package - import nibabel as nib from nibabel.analyze import AnalyzeImage from nibabel.nifti1 import Nifti1Image from nibabel.nifti2 import Nifti2Image +from .._h5py_compat import have_h5py from nibabel import imageclasses from nibabel.imageclasses import spatial_axes_first, class_map, ext_map -from nose.tools import (assert_true, assert_false, assert_equal) from nibabel.testing import clear_and_catch_warnings DATA_DIR = pjoin(dirname(__file__), 'data') -have_h5py = optional_package('h5py')[1] - MINC_3DS = ('minc1_1_scale.mnc',) MINC_4DS = ('minc1_4d.mnc',) if have_h5py: @@ -40,26 +36,26 @@ def test_spatial_axes_first(): for img_class in (AnalyzeImage, Nifti1Image, Nifti2Image): data = np.zeros(shape) img = img_class(data, affine) - assert_true(spatial_axes_first(img)) + assert spatial_axes_first(img) # True for MINC images < 4D for fname in MINC_3DS: img = nib.load(pjoin(DATA_DIR, fname)) - assert_true(len(img.shape) == 3) - assert_true(spatial_axes_first(img)) + assert len(img.shape) == 3 + assert spatial_axes_first(img) # False for MINC images < 4D for fname in MINC_4DS: img = nib.load(pjoin(DATA_DIR, fname)) - assert_true(len(img.shape) == 4) - assert_false(spatial_axes_first(img)) + assert len(img.shape) == 4 + assert not spatial_axes_first(img) def test_deprecations(): with clear_and_catch_warnings(modules=[imageclasses]) as w: warnings.filterwarnings('always', category=DeprecationWarning) nifti_single = class_map['nifti_single'] - assert_equal(nifti_single['class'], Nifti1Image) - assert_equal(len(w), 1) + assert nifti_single['class'] == Nifti1Image + assert len(w) == 1 nifti_ext = ext_map['.nii'] - assert_equal(nifti_ext, 'nifti_single') - assert_equal(len(w), 2) + assert nifti_ext == 'nifti_single' + assert len(w) == 2 diff --git a/nibabel/tests/test_imageglobals.py b/nibabel/tests/test_imageglobals.py index f730a4db01..42cbe6fdce 100644 --- a/nibabel/tests/test_imageglobals.py +++ b/nibabel/tests/test_imageglobals.py @@ -8,10 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """ Tests for imageglobals module """ - -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) - from .. import imageglobals as igs @@ -19,5 +15,5 @@ def test_errorlevel(): orig_level = igs.error_level for level in (10, 20, 30): with igs.ErrorLevel(level): - assert_equal(igs.error_level, level) - assert_equal(igs.error_level, orig_level) + assert igs.error_level == level + assert igs.error_level == orig_level diff --git a/nibabel/tests/test_info.py b/nibabel/tests/test_info.py deleted file mode 100644 index d16c7f76b8..0000000000 --- a/nibabel/tests/test_info.py +++ /dev/null @@ -1,47 +0,0 @@ -""" Testing info module -""" - -import nibabel as nib -from nibabel import info -from nibabel.info import cmp_pkg_version - -from nose.tools import (assert_raises, assert_equal) - - -def test_version(): - # Test info version is the same as our own version - assert_equal(info.__version__, nib.__version__) - - -def test_cmp_pkg_version(): - # Test version comparator - assert_equal(cmp_pkg_version(info.__version__), 0) - assert_equal(cmp_pkg_version('0.0'), -1) - assert_equal(cmp_pkg_version('1000.1000.1'), 1) - assert_equal(cmp_pkg_version(info.__version__, info.__version__), 0) - for test_ver, pkg_ver, exp_out in (('1.0', '1.0', 0), - ('1.0.0', '1.0', 0), - ('1.0', '1.0.0', 0), - ('1.1', '1.1', 0), - ('1.2', '1.1', 1), - ('1.1', '1.2', -1), - ('1.1.1', '1.1.1', 0), - ('1.1.2', '1.1.1', 1), - ('1.1.1', '1.1.2', -1), - ('1.1', '1.1dev', 1), - ('1.1dev', '1.1', -1), - ('1.2.1', '1.2.1rc1', 1), - ('1.2.1rc1', '1.2.1', -1), - ('1.2.1rc1', '1.2.1rc', 1), - ('1.2.1rc', '1.2.1rc1', -1), - ('1.2.1rc1', '1.2.1rc', 1), - ('1.2.1rc', '1.2.1rc1', -1), - ('1.2.1b', '1.2.1a', 1), - ('1.2.1a', '1.2.1b', -1), - ): - assert_equal(cmp_pkg_version(test_ver, pkg_ver), exp_out) - assert_raises(ValueError, cmp_pkg_version, 'foo.2') - assert_raises(ValueError, cmp_pkg_version, 'foo.2', '1.0') - assert_raises(ValueError, cmp_pkg_version, '1.0', 'foo.2') - assert_raises(ValueError, cmp_pkg_version, '1') - assert_raises(ValueError, cmp_pkg_version, 'foo') diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py new file mode 100644 index 0000000000..97f440497e --- /dev/null +++ b/nibabel/tests/test_init.py @@ -0,0 +1,59 @@ +import nibabel as nib +from pkg_resources import resource_filename +import pytest +from unittest import mock + +@pytest.mark.parametrize("verbose, v_args", [(-2, ["-qq"]), + (-1, ["-q"]), + (0, []), + (1, ["-v"]), + (2, ["-vv"])]) +@pytest.mark.parametrize("doctests", (True, False)) +@pytest.mark.parametrize("coverage", (True, False)) +def test_nibabel_test(verbose, v_args, doctests, coverage): + expected_args = v_args + ["--doctest-modules", "--cov", "nibabel", "--pyargs", "nibabel"] + if not doctests: + expected_args.remove("--doctest-modules") + if not coverage: + expected_args[-4:-2] = [] + + with mock.patch("pytest.main") as pytest_main: + nib.test(verbose=verbose, doctests=doctests, coverage=coverage) + + args, kwargs = pytest_main.call_args + assert args == () + assert kwargs == {"args": expected_args} + + +def test_nibabel_test_errors(): + with pytest.raises(NotImplementedError): + nib.test(label="fast") + with pytest.raises(NotImplementedError): + nib.test(raise_warnings=[]) + with pytest.raises(NotImplementedError): + nib.test(timer=True) + with pytest.raises(ValueError): + nib.test(verbose="-v") + + +def test_nibabel_bench(): + expected_args = ["-c", "--pyargs", "nibabel"] + + try: + expected_args.insert(1, resource_filename("nibabel", "benchmarks/pytest.benchmark.ini")) + except: + raise unittest.SkipTest("Not installed") + + with mock.patch("pytest.main") as pytest_main: + nib.bench(verbose=0) + + args, kwargs = pytest_main.call_args + assert args == () + assert kwargs == {"args": expected_args} + + with mock.patch("pytest.main") as pytest_main: + nib.bench(verbose=0, extra_argv=[]) + + args, kwargs = pytest_main.call_args + assert args == () + assert kwargs == {"args": expected_args} diff --git a/nibabel/tests/test_keywordonly.py b/nibabel/tests/test_keywordonly.py index 0ef63d9b13..26e21ce02d 100644 --- a/nibabel/tests/test_keywordonly.py +++ b/nibabel/tests/test_keywordonly.py @@ -2,8 +2,7 @@ from ..keywordonly import kw_only_func, kw_only_meth -from nose.tools import assert_equal -from nose.tools import assert_raises +import pytest def test_kw_only_func(): @@ -11,23 +10,28 @@ def test_kw_only_func(): def func(an_arg): "My docstring" return an_arg - assert_equal(func(1), 1) - assert_raises(TypeError, func, 1, 2) + assert func(1) == 1 + with pytest.raises(TypeError): + func(1, 2) dec_func = kw_only_func(1)(func) - assert_equal(dec_func(1), 1) - assert_raises(TypeError, dec_func, 1, 2) - assert_raises(TypeError, dec_func, 1, akeyarg=3) - assert_equal(dec_func.__doc__, 'My docstring') + assert dec_func(1) == 1 + with pytest.raises(TypeError): + dec_func(1, 2) + with pytest.raises(TypeError): + dec_func(1, akeyarg=3) + assert dec_func.__doc__ == 'My docstring' @kw_only_func(1) def kw_func(an_arg, a_kwarg='thing'): "Another docstring" return an_arg, a_kwarg - assert_equal(kw_func(1), (1, 'thing')) - assert_raises(TypeError, kw_func, 1, 2) - assert_equal(kw_func(1, a_kwarg=2), (1, 2)) - assert_raises(TypeError, kw_func, 1, akeyarg=3) - assert_equal(kw_func.__doc__, 'Another docstring') + assert kw_func(1) == (1, 'thing') + with pytest.raises(TypeError): + kw_func(1, 2) + assert kw_func(1, a_kwarg=2) == (1, 2) + with pytest.raises(TypeError): + kw_func(1, akeyarg=3) + assert kw_func.__doc__ == 'Another docstring' class C(object): @@ -36,8 +40,10 @@ def kw_meth(self, an_arg, a_kwarg='thing'): "Method docstring" return an_arg, a_kwarg c = C() - assert_equal(c.kw_meth(1), (1, 'thing')) - assert_raises(TypeError, c.kw_meth, 1, 2) - assert_equal(c.kw_meth(1, a_kwarg=2), (1, 2)) - assert_raises(TypeError, c.kw_meth, 1, akeyarg=3) - assert_equal(c.kw_meth.__doc__, 'Method docstring') + assert c.kw_meth(1) == (1, 'thing') + with pytest.raises(TypeError): + c.kw_meth(1, 2) + assert c.kw_meth(1, a_kwarg=2) == (1, 2) + with pytest.raises(TypeError): + c.kw_meth(1, akeyarg=3) + assert c.kw_meth.__doc__ == 'Method docstring' diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 676c09c121..71f0435f1a 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -1,9 +1,9 @@ """ Testing loadsave module """ -from __future__ import print_function from os.path import dirname, join as pjoin import shutil +import pathlib import numpy as np @@ -20,50 +20,58 @@ from numpy.testing import (assert_almost_equal, assert_array_equal) -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) -from ..py3k import FileNotFoundError +import pytest data_path = pjoin(dirname(__file__), 'data') def test_read_img_data(): - for fname in ('example4d.nii.gz', - 'example_nifti2.nii.gz', - 'minc1_1_scale.mnc', - 'minc1_4d.mnc', - 'test.mgz', - 'tiny.mnc' - ): - fpath = pjoin(data_path, fname) + fnames_test = [ + 'example4d.nii.gz', + 'example_nifti2.nii.gz', + 'minc1_1_scale.mnc', + 'minc1_4d.mnc', + 'test.mgz', + 'tiny.mnc' + ] + fnames_test += [pathlib.Path(p) for p in fnames_test] + for fname in fnames_test: + # os.path.join doesnt work between str / os.PathLike in py3.5 + fpath = pjoin(data_path, str(fname)) + if isinstance(fname, pathlib.Path): + fpath = pathlib.Path(fpath) img = load(fpath) - data = img.get_data() + data = img.get_fdata() data2 = read_img_data(img) assert_array_equal(data, data2) # These examples have null scaling - assert prefer=unscaled is the same dao = img.dataobj if hasattr(dao, 'slope') and hasattr(img.header, 'raw_data_from_fileobj'): - assert_equal((dao.slope, dao.inter), (1, 0)) + assert (dao.slope, dao.inter) == (1, 0) assert_array_equal(read_img_data(img, prefer='unscaled'), data) # Assert all caps filename works as well with TemporaryDirectory() as tmpdir: - up_fpath = pjoin(tmpdir, fname.upper()) - shutil.copyfile(fpath, up_fpath) + up_fpath = pjoin(tmpdir, str(fname).upper()) + if isinstance(fname, pathlib.Path): + up_fpath = pathlib.Path(up_fpath) + # shutil doesnt work with os.PathLike in py3.5 + shutil.copyfile(str(fpath), str(up_fpath)) img = load(up_fpath) assert_array_equal(img.dataobj, data) del img def test_file_not_found(): - assert_raises(FileNotFoundError, load, 'does_not_exist.nii.gz') + with pytest.raises(FileNotFoundError): + load('does_not_exist.nii.gz') def test_load_empty_image(): with InTemporaryDirectory(): open('empty.nii', 'w').close() - with assert_raises(ImageFileError) as err: + with pytest.raises(ImageFileError) as err: load('empty.nii') - assert_true(err.exception.args[0].startswith('Empty file: ')) + assert str(err.value).startswith('Empty file: ') def test_read_img_data_nifti(): @@ -78,18 +86,20 @@ def test_read_img_data_nifti(): img = img_class(data, np.eye(4)) img.set_data_dtype(out_dtype) # No filemap => error - assert_raises(ImageFileError, read_img_data, img) + with pytest.raises(ImageFileError): + read_img_data(img) # Make a filemap froot = 'an_image_{0}'.format(i) img.file_map = img.filespec_to_file_map(froot) # Trying to read from this filemap will generate an error because # we are going to read from files that do not exist - assert_raises(IOError, read_img_data, img) + with pytest.raises(IOError): + read_img_data(img) img.to_file_map() # Load - now the scaling and offset correctly applied img_fname = img.file_map['image'].filename img_back = load(img_fname) - data_back = img_back.get_data() + data_back = img_back.get_fdata() assert_array_equal(data_back, read_img_data(img_back)) # This is the same as if we loaded the image and header separately hdr_fname = (img.file_map['header'].filename @@ -119,8 +129,8 @@ def test_read_img_data_nifti(): else: new_inter = 0 # scaled scaling comes from new parameters in header - assert_true(np.allclose(actual_unscaled * 2.1 + new_inter, - read_img_data(img_back))) + assert np.allclose(actual_unscaled * 2.1 + new_inter, + read_img_data(img_back)) # Unscaled array didn't change assert_array_equal(actual_unscaled, read_img_data(img_back, prefer='unscaled')) @@ -133,7 +143,7 @@ def test_read_img_data_nifti(): with open(img_fname, 'ab') as fobj: fobj.write(b'\x00\x00') img_back = load(img_fname) - data_back = img_back.get_data() + data_back = img_back.get_fdata() assert_array_equal(data_back, read_img_data(img_back)) img_back.header.set_data_offset(1026) # Check we pick up new offset diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index cb59d921eb..a908ee6ad9 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -6,7 +6,6 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from __future__ import division, print_function, absolute_import from os.path import join as pjoin @@ -24,14 +23,14 @@ from .. import minc1 from ..minc1 import Minc1File, Minc1Image, MincHeader -from nose.tools import (assert_true, assert_equal, assert_false, assert_raises) -from numpy.testing import assert_array_equal from ..tmpdirs import InTemporaryDirectory -from ..testing import data_path +from ..deprecator import ExpiredDeprecationError +from ..testing import assert_data_similar, data_path, clear_and_catch_warnings +from numpy.testing import assert_array_equal +import pytest from . import test_spatialimages as tsi from .test_fileslice import slicer_samples -from .test_helpers import assert_data_similar EG_FNAME = pjoin(data_path, 'tiny.mnc') @@ -106,43 +105,16 @@ def test_old_namespace(): # Check warnings raised arr = np.arange(24).reshape((2, 3, 4)) aff = np.diag([2, 3, 4, 1]) - with warnings.catch_warnings(record=True) as warns: - # Top level import. - # This import does not trigger an import of the minc.py module, because - # it's the proxy object. - from .. import minc - assert_equal(warns, []) - # If there was a previous import it will be module, otherwise it will be - # a proxy - previous_import = isinstance(minc, types.ModuleType) - if not previous_import: - assert_true(isinstance(minc, ModuleProxy)) - old_minc1image = minc.Minc1Image # just to check it works - # There may or may not be a warning raised on accessing the proxy, - # depending on whether the minc.py module is already imported in this - # test run. - if not previous_import: - assert_equal(warns.pop(0).category, FutureWarning) - from .. import Minc1Image, MincImage - assert_equal(warns, []) - # The import from old module is the same as that from new - assert_true(old_minc1image is Minc1Image) - # But the old named import, imported from new, is not the same - assert_false(Minc1Image is MincImage) - assert_equal(warns, []) - # Create object using old name - mimg = MincImage(arr, aff) - assert_array_equal(mimg.get_data(), arr) - # Call to create object created warning - assert_equal(warns.pop(0).category, FutureWarning) - # Another old name - from ..minc1 import MincFile, Minc1File - assert_false(MincFile is Minc1File) - assert_equal(warns, []) + + from .. import Minc1Image, MincImage + assert Minc1Image is not MincImage + with pytest.raises(ExpiredDeprecationError): + MincImage(arr, aff) + # Another old name + from ..minc1 import MincFile, Minc1File + assert MincFile is not Minc1File + with pytest.raises(ExpiredDeprecationError): mf = MincFile(netcdf_file(EG_FNAME)) - assert_equal(mf.get_data_shape(), (10, 20, 20)) - # Call to create object created warning - assert_equal(warns.pop(0).category, FutureWarning) class _TestMincFile(object): @@ -156,12 +128,12 @@ def test_mincfile(self): for tp in self.test_files: mnc_obj = self.opener(tp['fname'], 'r') mnc = self.file_class(mnc_obj) - assert_equal(mnc.get_data_dtype().type, tp['dtype']) - assert_equal(mnc.get_data_shape(), tp['shape']) - assert_equal(mnc.get_zooms(), tp['zooms']) + assert mnc.get_data_dtype().type == tp['dtype'] + assert mnc.get_data_shape() == tp['shape'] + assert mnc.get_zooms() == tp['zooms'] assert_array_equal(mnc.get_affine(), tp['affine']) data = mnc.get_scaled_data() - assert_equal(data.shape, tp['shape']) + assert data.shape == tp['shape'] def test_mincfile_slicing(self): # Test slicing and scaling of mincfile data @@ -184,22 +156,22 @@ def test_load(self): # Check highest level load of minc works for tp in self.test_files: img = load(tp['fname']) - data = img.get_data() - assert_equal(data.shape, tp['shape']) + data = img.get_fdata() + assert data.shape == tp['shape'] # min, max, mean values from read in SPM2 / minctools assert_data_similar(data, tp) # check if mnc can be converted to nifti ni_img = Nifti1Image.from_image(img) assert_array_equal(ni_img.affine, tp['affine']) - assert_array_equal(ni_img.get_data(), data) + assert_array_equal(ni_img.get_fdata(), data) def test_array_proxy_slicing(self): # Test slicing of array proxy for tp in self.test_files: img = load(tp['fname']) - arr = img.get_data() + arr = img.get_fdata() prox = img.dataobj - assert_true(prox.is_proxy) + assert prox.is_proxy for sliceobj in slicer_samples(img.shape): assert_array_equal(arr[sliceobj], prox[sliceobj]) @@ -219,7 +191,7 @@ def test_compressed(self): fobj.write(content) fobj.close() img = self.module.load(fname) - data = img.get_data() + data = img.get_fdata() assert_data_similar(data, tp) del img @@ -229,8 +201,10 @@ def test_header_data_io(): bio = BytesIO() hdr = MincHeader() arr = np.arange(24).reshape((2, 3, 4)) - assert_raises(NotImplementedError, hdr.data_to_fileobj, arr, bio) - assert_raises(NotImplementedError, hdr.data_from_fileobj, bio) + with pytest.raises(NotImplementedError): + hdr.data_to_fileobj(arr, bio) + with pytest.raises(NotImplementedError): + hdr.data_from_fileobj(bio) class TestMinc1Image(tsi.TestSpatialImage): @@ -244,7 +218,7 @@ def test_data_to_from_fileobj(self): img = self.module.load(fpath) bio = BytesIO() arr = np.arange(24).reshape((2, 3, 4)) - assert_raises(NotImplementedError, - img.header.data_to_fileobj, arr, bio) - assert_raises(NotImplementedError, - img.header.data_from_fileobj, bio) + with pytest.raises(NotImplementedError): + img.header.data_to_fileobj(arr, bio) + with pytest.raises(NotImplementedError): + img.header.data_from_fileobj(bio) diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index c4cb9341ca..2c2f5c6e51 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -6,20 +6,14 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from __future__ import division, print_function, absolute_import from os.path import join as pjoin import numpy as np -from ..optpkg import optional_package - -h5py, have_h5py, setup_module = optional_package('h5py') - from .. import minc2 from ..minc2 import Minc2File, Minc2Image - -from nose.tools import (assert_true, assert_equal, assert_false, assert_raises) +from .._h5py_compat import h5py, have_h5py, setup_module from ..testing import data_path diff --git a/nibabel/tests/test_minc2_data.py b/nibabel/tests/test_minc2_data.py index 1ec4999a43..6d5a4b0e35 100644 --- a/nibabel/tests/test_minc2_data.py +++ b/nibabel/tests/test_minc2_data.py @@ -8,21 +8,17 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """ Test we can correctly import example MINC2_PATH files """ -from __future__ import print_function, absolute_import import os from os.path import join as pjoin import numpy as np -from nibabel.optpkg import optional_package - -h5py, have_h5py, setup_module = optional_package('h5py') +from .._h5py_compat import h5py, have_h5py, setup_module from .nibabel_data import get_nibabel_data, needs_nibabel_data from .. import load as top_load, Nifti1Image -from nose.tools import assert_equal from numpy.testing import (assert_array_equal, assert_almost_equal) MINC2_PATH = pjoin(get_nibabel_data(), 'nitest-minc2') @@ -61,14 +57,14 @@ class TestEPIFrame(object): def test_load(self): # Check highest level load of minc works img = self.opener(self.example_params['fname']) - assert_equal(img.shape, self.example_params['shape']) + assert img.shape == self.example_params['shape'] assert_almost_equal(img.header.get_zooms(), self.example_params['zooms'], 5) assert_almost_equal(img.affine, self.example_params['affine'], 4) - assert_equal(img.get_data_dtype().type, self.example_params['type']) + assert img.get_data_dtype().type == self.example_params['type'] # Check correspondence of data and recorded shape - data = img.get_data() - assert_equal(data.shape, self.example_params['shape']) + data = img.get_fdata() + assert data.shape == self.example_params['shape'] # min, max, mean values from read in SPM2 assert_almost_equal(data.min(), self.example_params['min'], 4) assert_almost_equal(data.max(), self.example_params['max'], 4) @@ -77,7 +73,7 @@ def test_load(self): ni_img = Nifti1Image.from_image(img) assert_almost_equal(ni_img.get_affine(), self.example_params['affine'], 2) - assert_array_equal(ni_img.get_data(), data) + assert_array_equal(ni_img.get_fdata(), data) class TestB0(TestEPIFrame): diff --git a/nibabel/tests/test_mriutils.py b/nibabel/tests/test_mriutils.py index 6978d9c253..8c6b198c95 100644 --- a/nibabel/tests/test_mriutils.py +++ b/nibabel/tests/test_mriutils.py @@ -8,15 +8,10 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """ Testing mriutils module """ -from __future__ import division -from numpy.testing import (assert_almost_equal, - assert_array_equal) - -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) - +from numpy.testing import assert_almost_equal +import pytest from ..mriutils import calculate_dwell_time, MRIError @@ -29,5 +24,7 @@ def test_calculate_dwell_time(): 3.3 / (42.576 * 3.4 * 3 * 3)) # Echo train length of 1 is valid, but returns 0 dwell time assert_almost_equal(calculate_dwell_time(3.3, 1, 3), 0) - assert_raises(MRIError, calculate_dwell_time, 3.3, 0, 3.0) - assert_raises(MRIError, calculate_dwell_time, 3.3, 2, -0.1) + with pytest.raises(MRIError): + calculate_dwell_time(3.3, 0, 3.0) + with pytest.raises(MRIError): + calculate_dwell_time(3.3, 2, -0.1) diff --git a/nibabel/tests/test_nibabel_data.py b/nibabel/tests/test_nibabel_data.py index f804f7499f..86e94f5c34 100644 --- a/nibabel/tests/test_nibabel_data.py +++ b/nibabel/tests/test_nibabel_data.py @@ -6,7 +6,6 @@ from . import nibabel_data as nibd -from nose.tools import assert_equal MY_DIR = dirname(__file__) @@ -23,10 +22,10 @@ def test_get_nibabel_data(): # Test getting directory local_data = realpath(pjoin(MY_DIR, '..', '..', 'nibabel-data')) if isdir(local_data): - assert_equal(nibd.get_nibabel_data(), local_data) + assert nibd.get_nibabel_data() == local_data else: - assert_equal(nibd.get_nibabel_data(), '') + assert nibd.get_nibabel_data() == '' nibd.environ['NIBABEL_DATA_DIR'] = 'not_a_path' - assert_equal(nibd.get_nibabel_data(), '') + assert nibd.get_nibabel_data() == '' nibd.environ['NIBABEL_DATA_DIR'] = MY_DIR - assert_equal(nibd.get_nibabel_data(), MY_DIR) + assert nibd.get_nibabel_data() == MY_DIR diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 78f876ec7d..9b4747bd5d 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -7,13 +7,10 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Tests for nifti reading package ''' -from __future__ import division, print_function, absolute_import import os import warnings import struct -import six - import numpy as np from nibabel import nifti1 as nifti1 @@ -28,22 +25,24 @@ from nibabel.spatialimages import HeaderDataError from nibabel.tmpdirs import InTemporaryDirectory from ..freesurfer import load as mghload +from ..orientations import aff2axcodes from .test_arraywriters import rt_err_estimate, IUINT_TYPES -from .test_helpers import bytesio_filemap, bytesio_round_trip +from .test_orientations import ALL_ORNTS from .nibabel_data import get_nibabel_data, needs_nibabel_data from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_almost_equal) -from nose.tools import (assert_true, assert_false, assert_equal, - assert_raises) from ..testing import ( clear_and_catch_warnings, data_path, runif_extra_has, suppress_warnings, + bytesio_filemap, + bytesio_round_trip ) +import pytest from . import test_analyze as tana from . import test_spm99analyze as tspm @@ -51,7 +50,8 @@ header_file = os.path.join(data_path, 'nifti1.hdr') image_file = os.path.join(data_path, 'example4d.nii.gz') -from nibabel.pydicom_compat import pydicom, dicom_test +from ..pydicom_compat import pydicom +from ..nicom.tests import dicom_test # Example transformation matrix @@ -83,15 +83,15 @@ class TestNifti1PairHeader(tana.TestAnalyzeHeader, tspm.HeaderScalingMixin): def test_empty(self): tana.TestAnalyzeHeader.test_empty(self) hdr = self.header_class() - assert_equal(hdr['magic'], hdr.pair_magic) - assert_equal(hdr['scl_slope'], 1) - assert_equal(hdr['vox_offset'], 0) + assert hdr['magic'] == hdr.pair_magic + assert hdr['scl_slope'] == 1 + assert hdr['vox_offset'] == 0 def test_from_eg_file(self): hdr = self.header_class.from_fileobj(open(self.example_file, 'rb')) - assert_equal(hdr.endianness, '<') - assert_equal(hdr['magic'], hdr.pair_magic) - assert_equal(hdr['sizeof_hdr'], self.sizeof_hdr) + assert hdr.endianness == '<' + assert hdr['magic'] == hdr.pair_magic + assert hdr['sizeof_hdr'] == self.sizeof_hdr def test_data_scaling(self): # Test scaling in header @@ -110,7 +110,7 @@ def test_data_scaling(self): hdr.set_data_dtype(np.int8) hdr.set_slope_inter(1, 0) hdr.data_to_fileobj(data, S, rescale=True) - assert_false(np.allclose(hdr.get_slope_inter(), (1, 0))) + assert not np.allclose(hdr.get_slope_inter(), (1, 0)) rdata = hdr.data_from_fileobj(S) assert_array_almost_equal(data, rdata) # Without scaling does rounding, doesn't alter scaling @@ -134,13 +134,13 @@ def test_big_scaling(self): data = np.array([finf['min'], finf['max']], dtype=dtt)[:, None, None] hdr.data_to_fileobj(data, sio) data_back = hdr.data_from_fileobj(sio) - assert_true(np.allclose(data, data_back)) + assert np.allclose(data, data_back) def test_slope_inter(self): hdr = self.header_class() nan, inf, minf = np.nan, np.inf, -np.inf HDE = HeaderDataError - assert_equal(hdr.get_slope_inter(), (1.0, 0.0)) + assert hdr.get_slope_inter() == (1.0, 0.0) for in_tup, exp_err, out_tup, raw_values in ( # Null scalings ((None, None), None, (None, None), (nan, nan)), @@ -178,20 +178,21 @@ def test_slope_inter(self): ((2, 1), None, (2, 1), (2, 1))): hdr = self.header_class() if not exp_err is None: - assert_raises(exp_err, hdr.set_slope_inter, *in_tup) + with pytest.raises(exp_err): + hdr.set_slope_inter(*in_tup) in_list = [v if not v is None else np.nan for v in in_tup] hdr['scl_slope'], hdr['scl_inter'] = in_list else: hdr.set_slope_inter(*in_tup) if isinstance(out_tup, Exception): - assert_raises(out_tup, hdr.get_slope_inter) + with pytest.raises(out_tup): + hdr.get_slope_inter() else: - assert_equal(hdr.get_slope_inter(), out_tup) + assert hdr.get_slope_inter() == out_tup # Check set survives through checking hdr = self.header_class.from_header(hdr, check=True) - assert_equal(hdr.get_slope_inter(), out_tup) - assert_array_equal([hdr['scl_slope'], hdr['scl_inter']], - raw_values) + assert hdr.get_slope_inter() == out_tup + assert_array_equal([hdr['scl_slope'], hdr['scl_inter']], raw_values) def test_nifti_qfac_checks(self): # Test qfac is 1 or -1 @@ -204,10 +205,8 @@ def test_nifti_qfac_checks(self): # 0 is not hdr['pixdim'][0] = 0 fhdr, message, raiser = self.log_chk(hdr, 20) - assert_equal(fhdr['pixdim'][0], 1) - assert_equal(message, - 'pixdim[0] (qfac) should be 1 ' - '(default) or -1; setting qfac to 1') + assert fhdr['pixdim'][0] == 1 + assert message == 'pixdim[0] (qfac) should be 1 (default) or -1; setting qfac to 1' def test_nifti_qsform_checks(self): # qform, sform checks @@ -216,15 +215,30 @@ def test_nifti_qsform_checks(self): hdr = HC() hdr['qform_code'] = -1 fhdr, message, raiser = self.log_chk(hdr, 30) - assert_equal(fhdr['qform_code'], 0) - assert_equal(message, - 'qform_code -1 not valid; setting to 0') + assert fhdr['qform_code'] == 0 + assert message == 'qform_code -1 not valid; setting to 0' hdr = HC() hdr['sform_code'] = -1 fhdr, message, raiser = self.log_chk(hdr, 30) - assert_equal(fhdr['sform_code'], 0) - assert_equal(message, - 'sform_code -1 not valid; setting to 0') + assert fhdr['sform_code'] == 0 + assert message == 'sform_code -1 not valid; setting to 0' + + def test_nifti_xform_codes(self): + # Verify that all xform codes can be set in both qform and sform + hdr = self.header_class() + affine = np.eye(4) + for code in nifti1.xform_codes.keys(): + hdr.set_qform(affine, code) + assert hdr['qform_code'] == nifti1.xform_codes[code] + hdr.set_sform(affine, code) + assert hdr['sform_code'] == nifti1.xform_codes[code] + + # Raise KeyError on unknown code + for bad_code in (-1, 6, 10): + with pytest.raises(KeyError): + hdr.set_qform(affine, bad_code) + with pytest.raises(KeyError): + hdr.set_sform(affine, bad_code) def test_magic_offset_checks(self): # magic and offset @@ -232,10 +246,10 @@ def test_magic_offset_checks(self): hdr = HC() hdr['magic'] = 'ooh' fhdr, message, raiser = self.log_chk(hdr, 45) - assert_equal(fhdr['magic'], b'ooh') - assert_equal(message, - 'magic string "ooh" is not valid; ' - 'leaving as is, but future errors are likely') + assert fhdr['magic'] == b'ooh' + assert (message == + 'magic string "ooh" is not valid; ' + 'leaving as is, but future errors are likely') # For pairs, any offset is OK, but should be divisible by 16 # Singles need offset of at least 352 (nifti1) or 540 (nifti2) bytes, # with the divide by 16 rule @@ -249,20 +263,20 @@ def test_magic_offset_checks(self): self.assert_no_log_err(hdr) hdr['vox_offset'] = bad_spm fhdr, message, raiser = self.log_chk(hdr, 30) - assert_equal(fhdr['vox_offset'], bad_spm) - assert_equal(message, - 'vox offset (={0:g}) not divisible by 16, ' - 'not SPM compatible; leaving at current ' - 'value'.format(bad_spm)) + assert fhdr['vox_offset'] == bad_spm + assert (message == + 'vox offset (={0:g}) not divisible by 16, ' + 'not SPM compatible; leaving at current ' + 'value'.format(bad_spm)) # Check minimum offset (if offset set) hdr['magic'] = hdr.single_magic hdr['vox_offset'] = 10 fhdr, message, raiser = self.log_chk(hdr, 40) - assert_equal(fhdr['vox_offset'], hdr.single_vox_offset) - assert_equal(message, - 'vox offset 10 too low for single ' - 'file nifti1; setting to minimum value ' - 'of ' + str(hdr.single_vox_offset)) + assert fhdr['vox_offset'] == hdr.single_vox_offset + assert (message == + 'vox offset 10 too low for single ' + 'file nifti1; setting to minimum value ' + 'of ' + str(hdr.single_vox_offset)) def test_freesurfer_large_vector_hack(self): # For large vector images, Freesurfer appears to set dim[1] to -1 and @@ -271,14 +285,14 @@ def test_freesurfer_large_vector_hack(self): # The standard case hdr = HC() hdr.set_data_shape((2, 3, 4)) - assert_equal(hdr.get_data_shape(), (2, 3, 4)) - assert_equal(hdr['glmin'], 0) + assert hdr.get_data_shape() == (2, 3, 4) + assert hdr['glmin'] == 0 # Just left of the freesurfer case dim_type = hdr.template_dtype['dim'].base glmin = hdr.template_dtype['glmin'].base too_big = int(np.iinfo(dim_type).max) + 1 hdr.set_data_shape((too_big - 1, 1, 1)) - assert_equal(hdr.get_data_shape(), (too_big - 1, 1, 1)) + assert hdr.get_data_shape() == (too_big - 1, 1, 1) # The freesurfer case full_shape = (too_big, 1, 1, 1, 1, 1, 1) for dim in range(3, 8): @@ -286,39 +300,41 @@ def test_freesurfer_large_vector_hack(self): expected_dim = np.array([dim, -1, 1, 1, 1, 1, 1, 1]) with suppress_warnings(): hdr.set_data_shape(full_shape[:dim]) - assert_equal(hdr.get_data_shape(), full_shape[:dim]) + assert hdr.get_data_shape() == full_shape[:dim] assert_array_equal(hdr['dim'], expected_dim) - assert_equal(hdr['glmin'], too_big) + assert hdr['glmin'] == too_big # Allow the fourth dimension to vary with suppress_warnings(): hdr.set_data_shape((too_big, 1, 1, 4)) - assert_equal(hdr.get_data_shape(), (too_big, 1, 1, 4)) + assert hdr.get_data_shape() == (too_big, 1, 1, 4) assert_array_equal(hdr['dim'][:5], np.array([4, -1, 1, 1, 4])) # This only works when the first 3 dimensions are -1, 1, 1 - assert_raises(HeaderDataError, hdr.set_data_shape, (too_big,)) - assert_raises(HeaderDataError, hdr.set_data_shape, (too_big, 1)) - assert_raises(HeaderDataError, hdr.set_data_shape, (too_big, 1, 2)) - assert_raises(HeaderDataError, hdr.set_data_shape, (too_big, 2, 1)) - assert_raises(HeaderDataError, hdr.set_data_shape, (1, too_big)) - assert_raises(HeaderDataError, hdr.set_data_shape, (1, too_big, 1)) - assert_raises(HeaderDataError, hdr.set_data_shape, (1, 1, too_big)) - assert_raises(HeaderDataError, hdr.set_data_shape, (1, 1, 1, too_big)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (too_big,)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (too_big, 1)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (too_big, 1, 2)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (too_big, 2, 1)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (1, too_big)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (1, too_big, 1)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (1, 1, too_big)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (1, 1, 1, too_big)) # Outside range of glmin raises error far_too_big = int(np.iinfo(glmin).max) + 1 with suppress_warnings(): hdr.set_data_shape((far_too_big - 1, 1, 1)) - assert_equal(hdr.get_data_shape(), (far_too_big - 1, 1, 1)) - assert_raises(HeaderDataError, hdr.set_data_shape, (far_too_big, 1, 1)) + assert hdr.get_data_shape() == (far_too_big - 1, 1, 1) + with pytest.raises(HeaderDataError): + hdr.set_data_shape((far_too_big, 1, 1)) # glmin of zero raises error (implausible vector length) hdr.set_data_shape((-1, 1, 1)) hdr['glmin'] = 0 - assert_raises(HeaderDataError, hdr.get_data_shape) + with pytest.raises(HeaderDataError): + hdr.get_data_shape() # Lists or tuples or arrays will work for setting shape for shape in ((too_big - 1, 1, 1), (too_big, 1, 1)): for constructor in (list, tuple, np.array): with suppress_warnings(): hdr.set_data_shape(constructor(shape)) - assert_equal(hdr.get_data_shape(), shape) + assert hdr.get_data_shape() == shape @needs_nibabel_data('nitest-freesurfer') def test_freesurfer_ico7_hack(self): @@ -329,34 +345,34 @@ def test_freesurfer_ico7_hack(self): for dim in range(3, 8): expected_dim = np.array([dim, 27307, 1, 6, 1, 1, 1, 1]) hdr.set_data_shape(full_shape[:dim]) - assert_equal(hdr.get_data_shape(), full_shape[:dim]) + assert hdr.get_data_shape() == full_shape[:dim] assert_array_equal(hdr._structarr['dim'], expected_dim) # Only works on dimensions >= 3 - assert_raises(HeaderDataError, hdr.set_data_shape, full_shape[:1]) - assert_raises(HeaderDataError, hdr.set_data_shape, full_shape[:2]) + pytest.raises(HeaderDataError, hdr.set_data_shape, full_shape[:1]) + pytest.raises(HeaderDataError, hdr.set_data_shape, full_shape[:2]) # Bad shapes - assert_raises(HeaderDataError, hdr.set_data_shape, (163842, 2, 1)) - assert_raises(HeaderDataError, hdr.set_data_shape, (163842, 1, 2)) - assert_raises(HeaderDataError, hdr.set_data_shape, (1, 163842, 1)) - assert_raises(HeaderDataError, hdr.set_data_shape, (1, 1, 163842)) - assert_raises(HeaderDataError, hdr.set_data_shape, (1, 1, 1, 163842)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (163842, 2, 1)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (163842, 1, 2)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (1, 163842, 1)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (1, 1, 163842)) + pytest.raises(HeaderDataError, hdr.set_data_shape, (1, 1, 1, 163842)) # Test consistency of data in .mgh and mri_convert produced .nii nitest_path = os.path.join(get_nibabel_data(), 'nitest-freesurfer') mgh = mghload(os.path.join(nitest_path, 'fsaverage', 'surf', 'lh.orig.avg.area.mgh')) nii = load(os.path.join(nitest_path, 'derivative', 'fsaverage', 'surf', 'lh.orig.avg.area.nii')) - assert_equal(mgh.shape, nii.shape) - assert_array_equal(mgh.get_data(), nii.get_data()) + assert mgh.shape == nii.shape + assert_array_equal(mgh.get_fdata(), nii.get_fdata()) assert_array_equal(nii.header._structarr['dim'][1:4], np.array([27307, 1, 6])) # Test writing produces consistent nii files with InTemporaryDirectory(): nii.to_filename('test.nii') nii2 = load('test.nii') - assert_equal(nii.shape, nii2.shape) - assert_array_equal(nii.get_data(), nii2.get_data()) - assert_array_equal(nii.get_affine(), nii2.get_affine()) + assert nii.shape == nii2.shape + assert_array_equal(nii.get_fdata(), nii2.get_fdata()) + assert_array_equal(nii.affine, nii2.affine) def test_qform_sform(self): HC = self.header_class @@ -365,8 +381,8 @@ def test_qform_sform(self): empty_sform = np.zeros((4, 4)) empty_sform[-1, -1] = 1 assert_array_equal(hdr.get_sform(), empty_sform) - assert_equal(hdr.get_qform(coded=True), (None, 0)) - assert_equal(hdr.get_sform(coded=True), (None, 0)) + assert hdr.get_qform(coded=True) == (None, 0) + assert hdr.get_sform(coded=True) == (None, 0) # Affines with no shears nice_aff = np.diag([2, 3, 4, 1]) another_aff = np.diag([3, 4, 5, 1]) @@ -374,39 +390,39 @@ def test_qform_sform(self): nasty_aff = from_matvec(np.arange(9).reshape((3, 3)), [9, 10, 11]) nasty_aff[0, 0] = 1 # Make full rank fixed_aff = unshear_44(nasty_aff) - assert_false(np.allclose(fixed_aff, nasty_aff)) + assert not np.allclose(fixed_aff, nasty_aff) for in_meth, out_meth in ((hdr.set_qform, hdr.get_qform), (hdr.set_sform, hdr.get_sform)): in_meth(nice_aff, 2) aff, code = out_meth(coded=True) assert_array_equal(aff, nice_aff) - assert_equal(code, 2) + assert code == 2 assert_array_equal(out_meth(), nice_aff) # non coded # Affine may be passed if code == 0, and will get set into header, # but the returned affine with 'coded=True' will be None. in_meth(another_aff, 0) - assert_equal(out_meth(coded=True), (None, 0)) # coded -> None + assert out_meth(coded=True) == (None, 0) # coded -> None assert_array_almost_equal(out_meth(), another_aff) # else -> input # Default qform code when previous == 0 is 2 in_meth(nice_aff) aff, code = out_meth(coded=True) - assert_equal(code, 2) + assert code == 2 # Unless code was non-zero before in_meth(nice_aff, 1) in_meth(nice_aff) aff, code = out_meth(coded=True) - assert_equal(code, 1) + assert code == 1 # Can set code without modifying affine, by passing affine=None assert_array_equal(aff, nice_aff) # affine same as before in_meth(None, 3) aff, code = out_meth(coded=True) assert_array_equal(aff, nice_aff) # affine same as before - assert_equal(code, 3) + assert code == 3 # affine is None on its own, or with code==0, resets code to 0 in_meth(None, 0) - assert_equal(out_meth(coded=True), (None, 0)) + assert out_meth(coded=True) == (None, 0) in_meth(None) - assert_equal(out_meth(coded=True), (None, 0)) + assert out_meth(coded=True) == (None, 0) # List works as input in_meth(nice_aff.tolist()) assert_array_equal(out_meth(), nice_aff) @@ -415,17 +431,18 @@ def test_qform_sform(self): hdr.set_qform(nasty_aff, 1) assert_array_almost_equal(hdr.get_qform(), fixed_aff) # Unless allow_shears is False - assert_raises(HeaderDataError, hdr.set_qform, nasty_aff, 1, False) + with pytest.raises(HeaderDataError): + hdr.set_qform(nasty_aff, 1, False) # Reset sform, give qform a code, to test sform hdr.set_sform(None) hdr.set_qform(nice_aff, 1) # Check sform unchanged by setting qform - assert_equal(hdr.get_sform(coded=True), (None, 0)) + assert hdr.get_sform(coded=True) == (None, 0) # Setting does change the sform ouput hdr.set_sform(nasty_aff, 1) aff, code = hdr.get_sform(coded=True) assert_array_equal(aff, nasty_aff) - assert_equal(code, 1) + assert code == 1 def test_datatypes(self): hdr = self.header_class() @@ -434,9 +451,7 @@ def test_datatypes(self): if dt == np.void: continue hdr.set_data_dtype(code) - (assert_equal, - hdr.get_data_dtype(), - data_type_codes.dtype[code]) + assert hdr.get_data_dtype() == data_type_codes.dtype[code] # Check that checks also see new datatypes hdr.set_data_dtype(np.complex128) hdr.check_fix() @@ -446,11 +461,11 @@ def test_quaternion(self): hdr['quatern_b'] = 0 hdr['quatern_c'] = 0 hdr['quatern_d'] = 0 - assert_true(np.allclose(hdr.get_qform_quaternion(), [1.0, 0, 0, 0])) + assert np.allclose(hdr.get_qform_quaternion(), [1.0, 0, 0, 0]) hdr['quatern_b'] = 1 hdr['quatern_c'] = 0 hdr['quatern_d'] = 0 - assert_true(np.allclose(hdr.get_qform_quaternion(), [0, 1, 0, 0])) + assert np.allclose(hdr.get_qform_quaternion(), [0, 1, 0, 0]) # Check threshold set correctly for float32 hdr['quatern_b'] = 1 + np.finfo(self.quat_dtype).eps assert_array_almost_equal(hdr.get_qform_quaternion(), [0, 1, 0, 0]) @@ -460,35 +475,36 @@ def test_qform(self): ehdr = self.header_class() ehdr.set_qform(A) qA = ehdr.get_qform() - assert_true, np.allclose(A, qA, atol=1e-5) - assert_true, np.allclose(Z, ehdr['pixdim'][1:4]) + assert np.allclose(A, qA, atol=1e-5) + assert np.allclose(Z, ehdr['pixdim'][1:4]) xfas = nifti1.xform_codes - assert_true, ehdr['qform_code'] == xfas['aligned'] + assert ehdr['qform_code'] == xfas['aligned'] ehdr.set_qform(A, 'scanner') - assert_true, ehdr['qform_code'] == xfas['scanner'] + assert ehdr['qform_code'] == xfas['scanner'] ehdr.set_qform(A, xfas['aligned']) - assert_true, ehdr['qform_code'] == xfas['aligned'] + assert ehdr['qform_code'] == xfas['aligned'] # Test pixdims[1,2,3] are checked for negatives for dims in ((-1, 1, 1), (1, -1, 1), (1, 1, -1)): ehdr['pixdim'][1:4] = dims - assert_raises(HeaderDataError, ehdr.get_qform) + with pytest.raises(HeaderDataError): + ehdr.get_qform() def test_sform(self): # Test roundtrip case ehdr = self.header_class() ehdr.set_sform(A) sA = ehdr.get_sform() - assert_true, np.allclose(A, sA, atol=1e-5) + assert np.allclose(A, sA, atol=1e-5) xfas = nifti1.xform_codes - assert_true, ehdr['sform_code'] == xfas['aligned'] + assert ehdr['sform_code'] == xfas['aligned'] ehdr.set_sform(A, 'scanner') - assert_true, ehdr['sform_code'] == xfas['scanner'] + assert ehdr['sform_code'] == xfas['scanner'] ehdr.set_sform(A, xfas['aligned']) - assert_true, ehdr['sform_code'] == xfas['aligned'] + assert ehdr['sform_code'] == xfas['aligned'] def test_dim_info(self): ehdr = self.header_class() - assert_true(ehdr.get_dim_info() == (None, None, None)) + assert ehdr.get_dim_info() == (None, None, None) for info in ((0, 2, 1), (None, None, None), (0, 2, None), @@ -497,18 +513,21 @@ def test_dim_info(self): (None, None, 1), ): ehdr.set_dim_info(*info) - assert_true(ehdr.get_dim_info() == info) + assert ehdr.get_dim_info() == info def test_slice_times(self): hdr = self.header_class() # error if slice dimension not specified - assert_raises(HeaderDataError, hdr.get_slice_times) + with pytest.raises(HeaderDataError): + hdr.get_slice_times() hdr.set_dim_info(slice=2) # error if slice dimension outside shape - assert_raises(HeaderDataError, hdr.get_slice_times) + with pytest.raises(HeaderDataError): + hdr.get_slice_times() hdr.set_data_shape((1, 1, 7)) # error if slice duration not set - assert_raises(HeaderDataError, hdr.get_slice_times) + with pytest.raises(HeaderDataError): + hdr.get_slice_times() hdr.set_slice_duration(0.1) # We need a function to print out the Nones and floating point # values in a predictable way, for the tests below. @@ -516,51 +535,56 @@ def test_slice_times(self): _print_me = lambda s: list(map(_stringer, s)) # The following examples are from the nifti1.h documentation. hdr['slice_code'] = slice_order_codes['sequential increasing'] - assert_equal(_print_me(hdr.get_slice_times()), - ['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6']) + assert (_print_me(hdr.get_slice_times()) == + ['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6']) hdr['slice_start'] = 1 hdr['slice_end'] = 5 - assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.0', '0.1', '0.2', '0.3', '0.4', None]) + assert (_print_me(hdr.get_slice_times()) == + [None, '0.0', '0.1', '0.2', '0.3', '0.4', None]) hdr['slice_code'] = slice_order_codes['sequential decreasing'] - assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.4', '0.3', '0.2', '0.1', '0.0', None]) + assert (_print_me(hdr.get_slice_times()) == + [None, '0.4', '0.3', '0.2', '0.1', '0.0', None]) hdr['slice_code'] = slice_order_codes['alternating increasing'] - assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.0', '0.3', '0.1', '0.4', '0.2', None]) + assert (_print_me(hdr.get_slice_times()) == + [None, '0.0', '0.3', '0.1', '0.4', '0.2', None]) hdr['slice_code'] = slice_order_codes['alternating decreasing'] - assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.2', '0.4', '0.1', '0.3', '0.0', None]) + assert (_print_me(hdr.get_slice_times()) == + [None, '0.2', '0.4', '0.1', '0.3', '0.0', None]) hdr['slice_code'] = slice_order_codes['alternating increasing 2'] - assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.2', '0.0', '0.3', '0.1', '0.4', None]) + assert (_print_me(hdr.get_slice_times()) == + [None, '0.2', '0.0', '0.3', '0.1', '0.4', None]) hdr['slice_code'] = slice_order_codes['alternating decreasing 2'] - assert_equal(_print_me(hdr.get_slice_times()), - [None, '0.4', '0.1', '0.3', '0.0', '0.2', None]) + assert (_print_me(hdr.get_slice_times()) == + [None, '0.4', '0.1', '0.3', '0.0', '0.2', None]) # test set hdr = self.header_class() hdr.set_dim_info(slice=2) # need slice dim to correspond with shape times = [None, 0.2, 0.4, 0.1, 0.3, 0.0, None] - assert_raises(HeaderDataError, hdr.set_slice_times, times) + with pytest.raises(HeaderDataError): + hdr.set_slice_times(times) hdr.set_data_shape([1, 1, 7]) - assert_raises(HeaderDataError, hdr.set_slice_times, - times[:-1]) # wrong length - assert_raises(HeaderDataError, hdr.set_slice_times, - (None,) * len(times)) # all None + with pytest.raises(HeaderDataError): + # wrong length + hdr.set_slice_times(times[:-1]) + with pytest.raises(HeaderDataError): + # all None + hdr.set_slice_times((None,) * len(times)) n_mid_times = times[:] n_mid_times[3] = None - assert_raises(HeaderDataError, hdr.set_slice_times, - n_mid_times) # None in middle + with pytest.raises(HeaderDataError): + # None in middle + hdr.set_slice_times(n_mid_times) funny_times = times[:] funny_times[3] = 0.05 - assert_raises(HeaderDataError, hdr.set_slice_times, - funny_times) # can't get single slice duration + with pytest.raises(HeaderDataError): + # can't get single slice duration + hdr.set_slice_times(funny_times) hdr.set_slice_times(times) - assert_equal(hdr.get_value_label('slice_code'), + assert (hdr.get_value_label('slice_code') == 'alternating decreasing') - assert_equal(hdr['slice_start'], 1) - assert_equal(hdr['slice_end'], 5) + assert hdr['slice_start'] == 1 + assert hdr['slice_end'] == 5 assert_array_almost_equal(hdr['slice_duration'], 0.1) # Ambiguous case @@ -573,52 +597,54 @@ def test_slice_times(self): hdr2.set_slice_times([0.1, 0]) assert len(w) == 1 # but always must be choosing sequential one first - assert_equal(hdr2.get_value_label('slice_code'), 'sequential decreasing') + assert hdr2.get_value_label('slice_code') == 'sequential decreasing' # and the other direction hdr2.set_slice_times([0, 0.1]) - assert_equal(hdr2.get_value_label('slice_code'), 'sequential increasing') + assert hdr2.get_value_label('slice_code') == 'sequential increasing' def test_intents(self): ehdr = self.header_class() ehdr.set_intent('t test', (10,), name='some score') - assert_equal(ehdr.get_intent(), - ('t test', (10.0,), 'some score')) + assert ehdr.get_intent() == ('t test', (10.0,), 'some score') # unknown intent name or code - unknown name will fail even when # allow_unknown=True - assert_raises(KeyError, ehdr.set_intent, 'no intention') - assert_raises(KeyError, ehdr.set_intent, 'no intention', - allow_unknown=True) - assert_raises(KeyError, ehdr.set_intent, 32767) + with pytest.raises(KeyError): + ehdr.set_intent('no intention') + with pytest.raises(KeyError): + ehdr.set_intent('no intention', allow_unknown=True) + with pytest.raises(KeyError): + ehdr.set_intent(32767) # too many parameters - assert_raises(HeaderDataError, ehdr.set_intent, 't test', (10, 10)) + with pytest.raises(HeaderDataError): + ehdr.set_intent('t test', (10, 10)) # too few parameters - assert_raises(HeaderDataError, ehdr.set_intent, 'f test', (10,)) + with pytest.raises(HeaderDataError): + ehdr.set_intent('f test', (10,)) # check unset parameters are set to 0, and name to '' ehdr.set_intent('t test') - assert_equal((ehdr['intent_p1'], ehdr['intent_p2'], ehdr['intent_p3']), + assert ((ehdr['intent_p1'], ehdr['intent_p2'], ehdr['intent_p3']) == (0, 0, 0)) - assert_equal(ehdr['intent_name'], b'') + assert ehdr['intent_name'] == b'' ehdr.set_intent('t test', (10,)) - assert_equal((ehdr['intent_p2'], ehdr['intent_p3']), (0, 0)) + assert (ehdr['intent_p2'], ehdr['intent_p3']) == (0, 0) # store intent that is not in nifti1.intent_codes recoder ehdr.set_intent(9999, allow_unknown=True) - assert_equal(ehdr.get_intent(), ('unknown code 9999', (), '')) - assert_equal(ehdr.get_intent('code'), (9999, (), '')) + assert ehdr.get_intent() == ('unknown code 9999', (), '') + assert ehdr.get_intent('code') == (9999, (), '') ehdr.set_intent(9999, name='custom intent', allow_unknown=True) - assert_equal(ehdr.get_intent(), - ('unknown code 9999', (), 'custom intent')) - assert_equal(ehdr.get_intent('code'), (9999, (), 'custom intent')) + assert ehdr.get_intent() == ('unknown code 9999', (), 'custom intent') + assert ehdr.get_intent('code') == (9999, (), 'custom intent') # store unknown intent with parameters. set_intent will set the # parameters, but get_intent won't return them ehdr.set_intent(code=9999, params=(1, 2, 3), allow_unknown=True) - assert_equal(ehdr.get_intent(), ('unknown code 9999', (), '')) - assert_equal(ehdr.get_intent('code'), (9999, (), '')) + assert ehdr.get_intent() == ('unknown code 9999', (), '') + assert ehdr.get_intent('code') == (9999, (), '') # unknown intent requires either zero, or three, parameters - assert_raises(HeaderDataError, ehdr.set_intent, 999, (1,), - allow_unknown=True) - assert_raises(HeaderDataError, ehdr.set_intent, 999, (1,2), - allow_unknown=True) + with pytest.raises(HeaderDataError): + ehdr.set_intent(999, (1,), allow_unknown=True) + with pytest.raises(HeaderDataError): + ehdr.set_intent(999, (1,2), allow_unknown=True) def test_set_slice_times(self): hdr = self.header_class() @@ -626,69 +652,68 @@ def test_set_slice_times(self): hdr.set_data_shape([1, 1, 7]) hdr.set_slice_duration(0.1) times = [0] * 6 - assert_raises(HeaderDataError, hdr.set_slice_times, times) + pytest.raises(HeaderDataError, hdr.set_slice_times, times) times = [None] * 7 - assert_raises(HeaderDataError, hdr.set_slice_times, times) + pytest.raises(HeaderDataError, hdr.set_slice_times, times) times = [None, 0, 1, None, 3, 4, None] - assert_raises(HeaderDataError, hdr.set_slice_times, times) + pytest.raises(HeaderDataError, hdr.set_slice_times, times) times = [None, 0, 1, 2.1, 3, 4, None] - assert_raises(HeaderDataError, hdr.set_slice_times, times) + pytest.raises(HeaderDataError, hdr.set_slice_times, times) times = [None, 0, 4, 3, 2, 1, None] - assert_raises(HeaderDataError, hdr.set_slice_times, times) + pytest.raises(HeaderDataError, hdr.set_slice_times, times) times = [0, 1, 2, 3, 4, 5, 6] hdr.set_slice_times(times) - assert_equal(hdr['slice_code'], 1) - assert_equal(hdr['slice_start'], 0) - assert_equal(hdr['slice_end'], 6) - assert_equal(hdr['slice_duration'], 1.0) + assert hdr['slice_code'] == 1 + assert hdr['slice_start'] == 0 + assert hdr['slice_end'] == 6 + assert hdr['slice_duration'] == 1.0 times = [None, 0, 1, 2, 3, 4, None] hdr.set_slice_times(times) - assert_equal(hdr['slice_code'], 1) - assert_equal(hdr['slice_start'], 1) - assert_equal(hdr['slice_end'], 5) - assert_equal(hdr['slice_duration'], 1.0) + assert hdr['slice_code'] == 1 + assert hdr['slice_start'] == 1 + assert hdr['slice_end'] == 5 + assert hdr['slice_duration'] == 1.0 times = [None, 0.4, 0.3, 0.2, 0.1, 0, None] hdr.set_slice_times(times) - assert_true(np.allclose(hdr['slice_duration'], 0.1)) + assert np.allclose(hdr['slice_duration'], 0.1) times = [None, 4, 3, 2, 1, 0, None] hdr.set_slice_times(times) - assert_equal(hdr['slice_code'], 2) + assert hdr['slice_code'] == 2 times = [None, 0, 3, 1, 4, 2, None] hdr.set_slice_times(times) - assert_equal(hdr['slice_code'], 3) + assert hdr['slice_code'] == 3 times = [None, 2, 4, 1, 3, 0, None] hdr.set_slice_times(times) - assert_equal(hdr['slice_code'], 4) + assert hdr['slice_code'] == 4 times = [None, 2, 0, 3, 1, 4, None] hdr.set_slice_times(times) - assert_equal(hdr['slice_code'], 5) + assert hdr['slice_code'] == 5 times = [None, 4, 1, 3, 0, 2, None] hdr.set_slice_times(times) - assert_equal(hdr['slice_code'], 6) + assert hdr['slice_code'] == 6 def test_xyzt_units(self): hdr = self.header_class() - assert_equal(hdr.get_xyzt_units(), ('unknown', 'unknown')) + assert hdr.get_xyzt_units() == ('unknown', 'unknown') hdr.set_xyzt_units('mm', 'sec') - assert_equal(hdr.get_xyzt_units(), ('mm', 'sec')) + assert hdr.get_xyzt_units() == ('mm', 'sec') hdr.set_xyzt_units() - assert_equal(hdr.get_xyzt_units(), ('unknown', 'unknown')) + assert hdr.get_xyzt_units() == ('unknown', 'unknown') def test_recoded_fields(self): hdr = self.header_class() - assert_equal(hdr.get_value_label('qform_code'), 'unknown') + assert hdr.get_value_label('qform_code') == 'unknown' hdr['qform_code'] = 3 - assert_equal(hdr.get_value_label('qform_code'), 'talairach') - assert_equal(hdr.get_value_label('sform_code'), 'unknown') + assert hdr.get_value_label('qform_code') == 'talairach' + assert hdr.get_value_label('sform_code') == 'unknown' hdr['sform_code'] = 3 - assert_equal(hdr.get_value_label('sform_code'), 'talairach') - assert_equal(hdr.get_value_label('intent_code'), 'none') + assert hdr.get_value_label('sform_code') == 'talairach' + assert hdr.get_value_label('intent_code') == 'none' hdr.set_intent('t test', (10,), name='some score') - assert_equal(hdr.get_value_label('intent_code'), 't test') - assert_equal(hdr.get_value_label('slice_code'), 'unknown') + assert hdr.get_value_label('intent_code') == 't test' + assert hdr.get_value_label('slice_code') == 'unknown' hdr['slice_code'] = 4 # alternating decreasing - assert_equal(hdr.get_value_label('slice_code'), - 'alternating decreasing') + assert hdr.get_value_label('slice_code') == 'alternating decreasing' def unshear_44(affine): @@ -707,9 +732,9 @@ class TestNifti1SingleHeader(TestNifti1PairHeader): def test_empty(self): tana.TestAnalyzeHeader.test_empty(self) hdr = self.header_class() - assert_equal(hdr['magic'], hdr.single_magic) - assert_equal(hdr['scl_slope'], 1) - assert_equal(hdr['vox_offset'], 0) + assert hdr['magic'] == hdr.single_magic + assert hdr['scl_slope'] == 1 + assert hdr['vox_offset'] == 0 def test_binblock_is_file(self): # Override test that binary string is the same as the file on disk; in @@ -718,7 +743,7 @@ def test_binblock_is_file(self): hdr = self.header_class() str_io = BytesIO() hdr.write_to(str_io) - assert_equal(str_io.getvalue(), hdr.binaryblock + b'\x00' * 4) + assert str_io.getvalue() == hdr.binaryblock + b'\x00' * 4 def test_float128(self): hdr = self.header_class() @@ -726,9 +751,10 @@ def test_float128(self): ld_dt = np.dtype(np.longdouble) if have_binary128() or ld_dt == np.dtype(np.float64): hdr.set_data_dtype(np.longdouble) - assert_equal(hdr.get_data_dtype(), ld_dt) + assert hdr.get_data_dtype() == ld_dt else: - assert_raises(HeaderDataError, hdr.set_data_dtype, np.longdouble) + with pytest.raises(HeaderDataError): + hdr.set_data_dtype(np.longdouble) class TestNifti1Pair(tana.TestAnalyzeImage, tspm.ImageScalingMixin): @@ -778,13 +804,13 @@ def test_qform_cycle(self): # None affine img = img_klass(np.zeros((2, 3, 4)), None) hdr_back = self._qform_rt(img).header - assert_equal(hdr_back['qform_code'], 3) - assert_equal(hdr_back['sform_code'], 4) + assert hdr_back['qform_code'] == 3 + assert hdr_back['sform_code'] == 4 # Try non-None affine img = img_klass(np.zeros((2, 3, 4)), np.eye(4)) hdr_back = self._qform_rt(img).header - assert_equal(hdr_back['qform_code'], 3) - assert_equal(hdr_back['sform_code'], 4) + assert hdr_back['qform_code'] == 3 + assert hdr_back['sform_code'] == 4 # Modify affine in-place - does it hold? img.affine[0, 0] = 9 img.to_file_map() @@ -804,8 +830,8 @@ def test_header_update_affine(self): hdr.set_qform(aff, 2) hdr.set_sform(aff, 2) img.update_header() - assert_equal(hdr['sform_code'], 2) - assert_equal(hdr['qform_code'], 2) + assert hdr['sform_code'] == 2 + assert hdr['qform_code'] == 2 def test_set_qform(self): img = self.image_class(np.zeros((2, 3, 4)), @@ -821,12 +847,12 @@ def test_set_qform(self): # Set qform using new_affine img.set_qform(new_affine, 1) assert_array_almost_equal(img.get_qform(), new_affine) - assert_equal(hdr['qform_code'], 1) + assert hdr['qform_code'] == 1 # Image get is same as header get assert_array_almost_equal(img.get_qform(), new_affine) # Coded version of get gets same information qaff, code = img.get_qform(coded=True) - assert_equal(code, 1) + assert code == 1 assert_array_almost_equal(qaff, new_affine) # Image affine now reset to best affine (which is sform) assert_array_almost_equal(img.affine, hdr.get_best_affine()) @@ -838,7 +864,7 @@ def test_set_qform(self): assert_array_almost_equal(hdr.get_zooms(), [1.1, 1.1, 1.1]) img.set_qform(None) qaff, code = img.get_qform(coded=True) - assert_equal((qaff, code), (None, 0)) + assert (qaff, code) == (None, 0) assert_array_almost_equal(hdr.get_zooms(), [1.1, 1.1, 1.1]) # Best affine similarly assert_array_almost_equal(img.affine, hdr.get_best_affine()) @@ -846,14 +872,16 @@ def test_set_qform(self): img.set_sform(None) img.set_qform(new_affine, 1) qaff, code = img.get_qform(coded=True) - assert_equal(code, 1) + assert code == 1 assert_array_almost_equal(img.affine, new_affine) new_affine[0, 1] = 2 # If affine has has shear, should raise Error if strip_shears=False img.set_qform(new_affine, 2) - assert_raises(HeaderDataError, img.set_qform, new_affine, 2, False) + with pytest.raises(HeaderDataError): + img.set_qform(new_affine, 2, False) # Unexpected keyword raises error - assert_raises(TypeError, img.get_qform, strange=True) + with pytest.raises(TypeError): + img.get_qform(strange=True) # updating None affine, None header does not work, because None header # results in setting the sform to default img = self.image_class(np.zeros((2, 3, 4)), None) @@ -875,16 +903,16 @@ def test_set_sform(self): img.affine[:] = aff_affine assert_array_almost_equal(img.affine, aff_affine) # Sform, Qform codes are 'aligned', 'unknown' by default - assert_equal((hdr['sform_code'], hdr['qform_code']), (2, 0)) + assert (hdr['sform_code'], hdr['qform_code']) == (2, 0) # Set sform using new_affine when qform is 0 img.set_sform(new_affine, 1) - assert_equal(hdr['sform_code'], 1) + assert hdr['sform_code'] == 1 assert_array_almost_equal(hdr.get_sform(), new_affine) # Image get is same as header get assert_array_almost_equal(img.get_sform(), new_affine) # Coded version gives same result saff, code = img.get_sform(coded=True) - assert_equal(code, 1) + assert code == 1 assert_array_almost_equal(saff, new_affine) # Because we've reset the sform with update_affine, the affine changes assert_array_almost_equal(img.affine, hdr.get_best_affine()) @@ -901,22 +929,23 @@ def test_set_sform(self): img.set_qform(qform_affine, 1) img.set_sform(new_affine, 1) saff, code = img.get_sform(coded=True) - assert_equal(code, 1) + assert code == 1 assert_array_almost_equal(saff, new_affine) assert_array_almost_equal(img.affine, new_affine) # zooms follow qform assert_array_almost_equal(hdr.get_zooms(), [1.2, 1.2, 1.2]) # Clear sform using None, best_affine should fall back on qform img.set_sform(None) - assert_equal(hdr['sform_code'], 0) - assert_equal(hdr['qform_code'], 1) + assert hdr['sform_code'] == 0 + assert hdr['qform_code'] == 1 # Sform holds previous affine from last set assert_array_almost_equal(hdr.get_sform(), saff) # Image affine follows qform assert_array_almost_equal(img.affine, qform_affine) assert_array_almost_equal(hdr.get_best_affine(), img.affine) # Unexpected keyword raises error - assert_raises(TypeError, img.get_sform, strange=True) + with pytest.raises(TypeError): + img.get_sform(strange=True) # updating None affine should also work img = self.image_class(np.zeros((2, 3, 4)), None) new_affine = np.eye(4) @@ -926,16 +955,16 @@ def test_set_sform(self): def test_sqform_code_type(self): # make sure get_s/qform returns codes as integers img = self.image_class(np.zeros((2, 3, 4)), None) - assert isinstance(img.get_sform(coded=True)[1], six.integer_types) - assert isinstance(img.get_qform(coded=True)[1], six.integer_types) + assert isinstance(img.get_sform(coded=True)[1], int) + assert isinstance(img.get_qform(coded=True)[1], int) img.set_sform(None, 3) img.set_qform(None, 3) - assert isinstance(img.get_sform(coded=True)[1], six.integer_types) - assert isinstance(img.get_qform(coded=True)[1], six.integer_types) + assert isinstance(img.get_sform(coded=True)[1], int) + assert isinstance(img.get_qform(coded=True)[1], int) img.set_sform(None, 2.0) img.set_qform(None, 4.0) - assert isinstance(img.get_sform(coded=True)[1], six.integer_types) - assert isinstance(img.get_qform(coded=True)[1], six.integer_types) + assert isinstance(img.get_sform(coded=True)[1], int) + assert isinstance(img.get_qform(coded=True)[1], int) img.set_sform(None, img.get_sform(coded=True)[1]) img.set_qform(None, img.get_qform(coded=True)[1]) @@ -957,21 +986,21 @@ def test_load_save(self): data = np.arange(np.prod(shape), dtype=npt).reshape(shape) affine = np.diag([1, 2, 3, 1]) img = IC(data, affine) - assert_equal(img.header.get_data_offset(), 0) - assert_equal(img.shape, shape) + assert img.header.get_data_offset() == 0 + assert img.shape == shape img.set_data_dtype(npt) img2 = bytesio_round_trip(img) - assert_array_equal(img2.get_data(), data) + assert_array_equal(img2.get_fdata(), data) with InTemporaryDirectory() as tmpdir: for ext in ('', '.gz', '.bz2'): fname = os.path.join(tmpdir, 'test' + img_ext + ext) img.to_filename(fname) img3 = IC.load(fname) - assert_true(isinstance(img3, img.__class__)) - assert_array_equal(img3.get_data(), data) - assert_equal(img3.header, img.header) - assert_true(isinstance(img3.get_data(), - np.memmap if ext == '' else np.ndarray)) + assert isinstance(img3, img.__class__) + assert_array_equal(img3.get_fdata(), data) + assert img3.header == img.header + assert isinstance(np.asanyarray(img3.dataobj), + np.memmap if ext == '' else np.ndarray) # del to avoid windows errors of form 'The process cannot # access the file because it is being used' del img3 @@ -996,7 +1025,7 @@ def test_load_pixdims(self): assert_array_equal(img_hdr.get_zooms(), [2, 3, 4]) # Save to stringio re_simg = bytesio_round_trip(simg) - assert_array_equal(re_simg.get_data(), arr) + assert_array_equal(re_simg.get_fdata(), arr) # Check qform, sform, pixdims are the same rimg_hdr = re_simg.header assert_array_equal(rimg_hdr.get_qform(), qaff) @@ -1013,8 +1042,8 @@ def test_affines_init(self): # Default is sform set, qform not set img = IC(arr, aff) hdr = img.header - assert_equal(hdr['qform_code'], 0) - assert_equal(hdr['sform_code'], 2) + assert hdr['qform_code'] == 0 + assert hdr['sform_code'] == 2 assert_array_equal(hdr.get_zooms(), [2, 3, 4]) # This is also true for affines with header passed qaff = np.diag([3, 4, 5, 1]) @@ -1025,16 +1054,16 @@ def test_affines_init(self): img = IC(arr, aff, hdr) new_hdr = img.header # Again affine is sort of anonymous space - assert_equal(new_hdr['qform_code'], 0) - assert_equal(new_hdr['sform_code'], 2) + assert new_hdr['qform_code'] == 0 + assert new_hdr['sform_code'] == 2 assert_array_equal(new_hdr.get_sform(), aff) assert_array_equal(new_hdr.get_zooms(), [2, 3, 4]) # But if no affine passed, codes and matrices stay the same img = IC(arr, None, hdr) new_hdr = img.header - assert_equal(new_hdr['qform_code'], 1) # scanner + assert new_hdr['qform_code'] == 1 # scanner assert_array_equal(new_hdr.get_qform(), qaff) - assert_equal(new_hdr['sform_code'], 3) # Still talairach + assert new_hdr['sform_code'] == 3 # Still talairach assert_array_equal(new_hdr.get_sform(), saff) # Pixdims as in the original header assert_array_equal(new_hdr.get_zooms(), [3, 4, 5]) @@ -1043,13 +1072,13 @@ def test_read_no_extensions(self): IC = self.image_class arr = np.arange(24).reshape((2, 3, 4)) img = IC(arr, np.eye(4)) - assert_equal(len(img.header.extensions), 0) + assert len(img.header.extensions) == 0 img_rt = bytesio_round_trip(img) - assert_equal(len(img_rt.header.extensions), 0) + assert len(img_rt.header.extensions) == 0 # Check simple round trip with large offset img.header.set_data_offset(1024) img_rt = bytesio_round_trip(img) - assert_equal(len(img_rt.header.extensions), 0) + assert len(img_rt.header.extensions) == 0 def _get_raw_scaling(self, hdr): return hdr['scl_slope'], hdr['scl_inter'] @@ -1080,34 +1109,35 @@ def test_offset_errors(self): IC = self.image_class arr = np.arange(24).reshape((2, 3, 4)) img = IC(arr, np.eye(4)) - assert_equal(img.header.get_data_offset(), 0) + assert img.header.get_data_offset() == 0 # Saving with zero offset is OK img_rt = bytesio_round_trip(img) - assert_equal(img_rt.header.get_data_offset(), 0) + assert img_rt.header.get_data_offset() == 0 # Saving with too low offset explicitly set gives error fm = bytesio_filemap(IC) img.header.set_data_offset(16) - assert_raises(HeaderDataError, img.to_file_map, fm) + with pytest.raises(HeaderDataError): + img.to_file_map(fm) def test_extension_basics(): raw = '123' ext = Nifti1Extension('comment', raw) - assert_true(ext.get_sizeondisk() == 16) - assert_true(ext.get_content() == raw) - assert_true(ext.get_code() == 6) + assert ext.get_sizeondisk() == 16 + assert ext.get_content() == raw + assert ext.get_code() == 6 # Test that extensions already aligned to 16 bytes are not padded ext = Nifti1Extension('comment', b'x' * 24) - assert_true(ext.get_sizeondisk() == 32) + assert ext.get_sizeondisk() == 32 def test_ext_eq(): ext = Nifti1Extension('comment', '123') - assert_true(ext == ext) - assert_false(ext != ext) + assert ext == ext + assert not ext != ext ext2 = Nifti1Extension('comment', '124') - assert_false(ext == ext2) - assert_true(ext != ext2) + assert ext != ext2 + assert not ext == ext2 def test_extension_codes(): @@ -1118,12 +1148,12 @@ def test_extension_codes(): def test_extension_list(): ext_c0 = Nifti1Extensions() ext_c1 = Nifti1Extensions() - assert_equal(ext_c0, ext_c1) + assert ext_c0 == ext_c1 ext = Nifti1Extension('comment', '123') ext_c1.append(ext) - assert_false(ext_c0 == ext_c1) + assert not ext_c0 == ext_c1 ext_c0.append(ext) - assert_true(ext_c0 == ext_c1) + assert ext_c0 == ext_c1 def test_extension_io(): @@ -1132,23 +1162,23 @@ def test_extension_io(): ext1.write_to(bio, False) bio.seek(0) ebacks = Nifti1Extensions.from_fileobj(bio, -1, False) - assert_equal(len(ebacks), 1) - assert_equal(ext1, ebacks[0]) + assert len(ebacks) == 1 + assert ext1 == ebacks[0] # Check the start is what we expect exp_dtype = np.dtype([('esize', 'i4'), ('ecode', 'i4')]) bio.seek(0) buff = np.ndarray(shape=(), dtype=exp_dtype, buffer=bio.read(16)) - assert_equal(buff['esize'], 32) - assert_equal(buff['ecode'], 6) + assert buff['esize'] == 32 + assert buff['ecode'] == 6 # Try another extension on top bio.seek(32) ext2 = Nifti1Extension(6, b'Comment') ext2.write_to(bio, False) bio.seek(0) ebacks = Nifti1Extensions.from_fileobj(bio, -1, False) - assert_equal(len(ebacks), 2) - assert_equal(ext1, ebacks[0]) - assert_equal(ext2, ebacks[1]) + assert len(ebacks) == 2 + assert ext1 == ebacks[0] + assert ext2 == ebacks[1] # Rewrite but deliberately setting esize wrongly bio.truncate(0) bio.seek(0) @@ -1164,11 +1194,11 @@ def test_extension_io(): bio.seek(0) with warnings.catch_warnings(record=True) as warns: ebacks = Nifti1Extensions.from_fileobj(bio, -1, False) - assert_equal(len(warns), 1) - assert_equal(warns[0].category, UserWarning) - assert_equal(len(ebacks), 2) - assert_equal(ext1, ebacks[0]) - assert_equal(ext2, ebacks[1]) + assert len(warns) == 1 + assert warns[0].category == UserWarning + assert len(ebacks) == 2 + assert ext1 == ebacks[0] + assert ext2 == ebacks[1] def test_nifti_extensions(): @@ -1176,25 +1206,25 @@ def test_nifti_extensions(): # basic checks of the available extensions hdr = nim.header exts_container = hdr.extensions - assert_equal(len(exts_container), 2) - assert_equal(exts_container.count('comment'), 2) - assert_equal(exts_container.count('afni'), 0) - assert_equal(exts_container.get_codes(), [6, 6]) - assert_equal((exts_container.get_sizeondisk()) % 16, 0) + assert len(exts_container) == 2 + assert exts_container.count('comment') == 2 + assert exts_container.count('afni') == 0 + assert exts_container.get_codes() == [6, 6] + assert (exts_container.get_sizeondisk()) % 16 == 0 # first extension should be short one - assert_equal(exts_container[0].get_content(), b'extcomment1') + assert exts_container[0].get_content() == b'extcomment1' # add one afniext = Nifti1Extension('afni', '') exts_container.append(afniext) - assert_true(exts_container.get_codes() == [6, 6, 4]) - assert_true(exts_container.count('comment') == 2) - assert_true(exts_container.count('afni') == 1) - assert_true((exts_container.get_sizeondisk()) % 16 == 0) + assert exts_container.get_codes() == [6, 6, 4] + assert exts_container.count('comment') == 2 + assert exts_container.count('afni') == 1 + assert (exts_container.get_sizeondisk()) % 16 == 0 # delete one del exts_container[1] - assert_true(exts_container.get_codes() == [6, 4]) - assert_true(exts_container.count('comment') == 1) - assert_true(exts_container.count('afni') == 1) + assert exts_container.get_codes() == [6, 4] + assert exts_container.count('comment') == 1 + assert exts_container.count('afni') == 1 @dicom_test @@ -1205,47 +1235,47 @@ def test_nifti_dicom_extension(): # create an empty dataset if no content provided (to write a new header) dcmext = Nifti1DicomExtension(2, b'') - assert_equal(dcmext.get_content().__class__, pydicom.dataset.Dataset) - assert_equal(len(dcmext.get_content().values()), 0) + assert dcmext.get_content().__class__ == pydicom.dataset.Dataset + assert len(dcmext.get_content().values()) == 0 # create an empty dataset if no content provided (to write a new header) dcmext = Nifti1DicomExtension(2, None) - assert_equal(dcmext.get_content().__class__, pydicom.dataset.Dataset) - assert_equal(len(dcmext.get_content().values()), 0) + assert dcmext.get_content().__class__ == pydicom.dataset.Dataset + assert len(dcmext.get_content().values()) == 0 # use a dataset if provided ds = pydicom.dataset.Dataset() ds.add_new((0x10, 0x20), 'LO', 'NiPy') dcmext = Nifti1DicomExtension(2, ds) - assert_equal(dcmext.get_content().__class__, pydicom.dataset.Dataset) - assert_equal(len(dcmext.get_content().values()), 1) - assert_equal(dcmext.get_content().PatientID, 'NiPy') + assert dcmext.get_content().__class__ == pydicom.dataset.Dataset + assert len(dcmext.get_content().values()) == 1 + assert dcmext.get_content().PatientID == 'NiPy' # create a single dicom tag (Patient ID, [0010,0020]) with Explicit VR / LE dcmbytes_explicit = struct.pack('2H2sH4s', 0x10, 0x20, @@ -1253,30 +1283,31 @@ def test_nifti_dicom_extension(): 'NiPy'.encode('utf-8')) hdr_be = Nifti1Header(endianness='>') # Big Endian Nifti1Header dcmext = Nifti1DicomExtension(2, dcmbytes_explicit_be, parent_hdr=hdr_be) - assert_equal(dcmext.__class__, Nifti1DicomExtension) - assert_equal(dcmext._guess_implicit_VR(), False) - assert_equal(dcmext.get_code(), 2) - assert_equal(dcmext.get_content().PatientID, 'NiPy') - assert_equal(dcmext.get_content()[0x10, 0x20].value, 'NiPy') - assert_equal(len(dcmext.get_content().values()), 1) - assert_equal(dcmext._mangle(dcmext.get_content()), dcmbytes_explicit_be) - assert_equal(dcmext.get_sizeondisk() % 16, 0) + assert dcmext.__class__ == Nifti1DicomExtension + assert dcmext._guess_implicit_VR() is False + assert dcmext.get_code() == 2 + assert dcmext.get_content().PatientID == 'NiPy' + assert dcmext.get_content()[0x10, 0x20].value == 'NiPy' + assert len(dcmext.get_content().values()) == 1 + assert dcmext._mangle(dcmext.get_content()) == dcmbytes_explicit_be + assert dcmext.get_sizeondisk() % 16 == 0 # Check that a dicom dataset is written w/ BE encoding when not created # using BE bytestring when given a BE nifti header dcmext = Nifti1DicomExtension(2, ds, parent_hdr=hdr_be) - assert_equal(dcmext._mangle(dcmext.get_content()), dcmbytes_explicit_be) + assert dcmext._mangle(dcmext.get_content()) == dcmbytes_explicit_be # dicom extension access from nifti extensions - assert_equal(exts_container.count('dicom'), 0) + assert exts_container.count('dicom') == 0 exts_container.append(dcmext) - assert_equal(exts_container.count('dicom'), 1) - assert_equal(exts_container.get_codes(), [6, 6, 2]) - assert_equal(dcmext._mangle(dcmext.get_content()), dcmbytes_explicit_be) - assert_equal(dcmext.get_sizeondisk() % 16, 0) + assert exts_container.count('dicom') == 1 + assert exts_container.get_codes() == [6, 6, 2] + assert dcmext._mangle(dcmext.get_content()) == dcmbytes_explicit_be + assert dcmext.get_sizeondisk() % 16 == 0 # creating an extension with bad content should raise - assert_raises(TypeError, Nifti1DicomExtension, 2, 0) + with pytest.raises(TypeError): + Nifti1DicomExtension(2, 0) class TestNifti1General(object): @@ -1295,38 +1326,38 @@ def test_loadsave_cycle(self): # ensure we have extensions hdr = nim.header exts_container = hdr.extensions - assert_true(len(exts_container) > 0) + assert len(exts_container) > 0 # write into the air ;-) lnim = bytesio_round_trip(nim) hdr = lnim.header lexts_container = hdr.extensions - assert_equal(exts_container, lexts_container) + assert exts_container == lexts_container # build int16 image data = np.ones((2, 3, 4, 5), dtype='int16') img = self.single_class(data, np.eye(4)) hdr = img.header - assert_equal(hdr.get_data_dtype(), np.int16) + assert hdr.get_data_dtype() == np.int16 # default should have no scaling assert_array_equal(hdr.get_slope_inter(), (None, None)) # set scaling hdr.set_slope_inter(2, 8) - assert_equal(hdr.get_slope_inter(), (2, 8)) + assert hdr.get_slope_inter() == (2, 8) # now build new image with updated header wnim = self.single_class(data, np.eye(4), header=hdr) - assert_equal(wnim.get_data_dtype(), np.int16) + assert wnim.get_data_dtype() == np.int16 # Header scaling reset to default by image creation - assert_equal(wnim.header.get_slope_inter(), (None, None)) + assert wnim.header.get_slope_inter() == (None, None) # But we can reset it again after image creation wnim.header.set_slope_inter(2, 8) - assert_equal(wnim.header.get_slope_inter(), (2, 8)) + assert wnim.header.get_slope_inter() == (2, 8) # write into the air again ;-) lnim = bytesio_round_trip(wnim) - assert_equal(lnim.get_data_dtype(), np.int16) + assert lnim.get_data_dtype() == np.int16 # Scaling applied - assert_array_equal(lnim.get_data(), data * 2. + 8.) + assert_array_equal(lnim.get_fdata(), data * 2. + 8.) # slope, inter reset by image creation, but saved in proxy - assert_equal(lnim.header.get_slope_inter(), (None, None)) - assert_equal((lnim.dataobj.slope, lnim.dataobj.inter), (2, 8)) + assert lnim.header.get_slope_inter() == (None, None) + assert (lnim.dataobj.slope, lnim.dataobj.inter) == (2, 8) def test_load(self): # test module level load. We try to load a nii and an .img and a .hdr @@ -1340,11 +1371,11 @@ def test_load(self): with InTemporaryDirectory(): for img in (simg, pimg): save(img, 'test.nii') - assert_array_equal(arr, load('test.nii').get_data()) + assert_array_equal(arr, load('test.nii').get_fdata()) save(simg, 'test.img') - assert_array_equal(arr, load('test.img').get_data()) + assert_array_equal(arr, load('test.img').get_fdata()) save(simg, 'test.hdr') - assert_array_equal(arr, load('test.hdr').get_data()) + assert_array_equal(arr, load('test.hdr').get_fdata()) def test_float_int_min_max(self): # Conversion between float and int @@ -1356,8 +1387,8 @@ def test_float_int_min_max(self): for out_dt in IUINT_TYPES: img = self.single_class(arr, aff) img_back = bytesio_round_trip(img) - arr_back_sc = img_back.get_data() - assert_true(np.allclose(arr, arr_back_sc)) + arr_back_sc = img_back.get_fdata() + assert np.allclose(arr, arr_back_sc) def test_float_int_spread(self): # Test rounding error for spread of values @@ -1370,7 +1401,7 @@ def test_float_int_spread(self): for out_dt in IUINT_TYPES: img = self.single_class(arr_t, aff) img_back = bytesio_round_trip(img) - arr_back_sc = img_back.get_data() + arr_back_sc = img_back.get_fdata() slope, inter = img_back.header.get_slope_inter() # Get estimate for error max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, @@ -1378,7 +1409,7 @@ def test_float_int_spread(self): # Simulate allclose test with large atol diff = np.abs(arr_t - arr_back_sc) rdiff = diff / np.abs(arr_t) - assert_true(np.all((diff <= max_miss) | (rdiff <= 1e-5))) + assert np.all((diff <= max_miss) | (rdiff <= 1e-5)) def test_rt_bias(self): # Check for bias in round trip @@ -1393,7 +1424,7 @@ def test_rt_bias(self): for out_dt in IUINT_TYPES: img = self.single_class(arr_t, aff) img_back = bytesio_round_trip(img) - arr_back_sc = img_back.get_data() + arr_back_sc = img_back.get_fdata() slope, inter = img_back.header.get_slope_inter() bias = np.mean(arr_t - arr_back_sc) # Get estimate for error @@ -1401,7 +1432,37 @@ def test_rt_bias(self): inter) # Hokey use of max_miss as a std estimate bias_thresh = np.max([max_miss / np.sqrt(count), eps]) - assert_true(np.abs(bias) < bias_thresh) + assert np.abs(bias) < bias_thresh + + def test_reoriented_dim_info(self): + # Check that dim_info is reoriented correctly + arr = np.arange(24).reshape((2, 3, 4)) + # Start as RAS + aff = np.diag([2, 3, 4, 1]) + simg = self.single_class(arr, aff) + for freq, phas, slic in ((0, 1, 2), + (0, 2, 1), + (1, 0, 2), + (2, 0, 1), + (None, None, None), + (0, 2, None), + (0, None, None), + (None, 2, 1), + (None, None, 1), + ): + simg.header.set_dim_info(freq, phas, slic) + fdir = 'RAS'[freq] if freq is not None else None + pdir = 'RAS'[phas] if phas is not None else None + sdir = 'RAS'[slic] if slic is not None else None + for ornt in ALL_ORNTS: + rimg = simg.as_reoriented(np.array(ornt)) + axcode = aff2axcodes(rimg.affine) + dirs = ''.join(axcode).replace('P', 'A').replace('I', 'S').replace('L', 'R') + new_freq, new_phas, new_slic = rimg.header.get_dim_info() + new_fdir = dirs[new_freq] if new_freq is not None else None + new_pdir = dirs[new_phas] if new_phas is not None else None + new_sdir = dirs[new_slic] if new_slic is not None else None + assert (new_fdir, new_pdir, new_sdir) == (fdir, pdir, sdir) @runif_extra_has('slow') @@ -1413,8 +1474,8 @@ def test_large_nifti1(): with InTemporaryDirectory(): img.to_filename('test.nii.gz') del img - data = load('test.nii.gz').get_data() + data = load('test.nii.gz').get_fdata() # Check that the data are all ones - assert_equal(image_shape, data.shape) + assert image_shape == data.shape n_ones = np.sum((data == 1.)) - assert_equal(np.prod(image_shape), n_ones) + assert np.prod(image_shape) == n_ones diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index 8c7afd9ea4..ca6e7d8125 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Tests for nifti2 reading package ''' -from __future__ import division, print_function, absolute_import import os import numpy as np @@ -20,7 +19,6 @@ from .test_nifti1 import (TestNifti1PairHeader, TestNifti1SingleHeader, TestNifti1Pair, TestNifti1Image, TestNifti1General) -from nose.tools import assert_equal from numpy.testing import assert_array_equal from ..testing import data_path @@ -51,16 +49,14 @@ def test_eol_check(self): hdr['eol_check'] = 0 fhdr, message, raiser = self.log_chk(hdr, 20) assert_array_equal(fhdr['eol_check'], good_eol) - assert_equal(message, - 'EOL check all 0; ' - 'setting EOL check to 13, 10, 26, 10') + assert message == 'EOL check all 0; setting EOL check to 13, 10, 26, 10' hdr['eol_check'] = (13, 10, 0, 10) fhdr, message, raiser = self.log_chk(hdr, 40) assert_array_equal(fhdr['eol_check'], good_eol) - assert_equal(message, - 'EOL check not 0 or 13, 10, 26, 10; ' - 'data may be corrupted by EOL conversion; ' - 'setting EOL check to 13, 10, 26, 10') + assert (message == + 'EOL check not 0 or 13, 10, 26, 10; ' + 'data may be corrupted by EOL conversion; ' + 'setting EOL check to 13, 10, 26, 10') class TestNifti2PairHeader(_Nifti2Mixin, TestNifti1PairHeader): @@ -111,6 +107,6 @@ def test_nifti12_conversion(): in_hdr.set_data_dtype(dtype_type) in_hdr.extensions[:] = [ext1, ext2] out_hdr = out_type.from_header(in_hdr) - assert_equal(out_hdr.get_data_shape(), shape) - assert_equal(out_hdr.get_data_dtype(), dtype_type) - assert_equal(in_hdr.extensions, out_hdr.extensions) + assert out_hdr.get_data_shape() == shape + assert out_hdr.get_data_dtype() == dtype_type + assert in_hdr.extensions == out_hdr.extensions diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 6aeb66aaf7..85a8f4a0a7 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -13,14 +13,14 @@ from io import BytesIO, UnsupportedOperation from distutils.version import StrictVersion -from ..py3k import asstr, asbytes +from numpy.compat.py3k import asstr, asbytes from ..openers import Opener, ImageOpener, HAVE_INDEXED_GZIP, BZ2File from ..tmpdirs import InTemporaryDirectory from ..volumeutils import BinOpener -import mock -from nose.tools import (assert_true, assert_false, assert_equal, - assert_not_equal, assert_raises) +import unittest +from unittest import mock +import pytest from ..testing import error_warnings @@ -41,24 +41,24 @@ def read(self): def test_Opener(): # Test default mode is 'rb' fobj = Opener(__file__) - assert_equal(fobj.mode, 'rb') + assert fobj.mode == 'rb' fobj.close() # That it's a context manager with Opener(__file__) as fobj: - assert_equal(fobj.mode, 'rb') + assert fobj.mode == 'rb' # That we can set the mode with Opener(__file__, 'r') as fobj: - assert_equal(fobj.mode, 'r') + assert fobj.mode == 'r' # with keyword arguments with Opener(__file__, mode='r') as fobj: - assert_equal(fobj.mode, 'r') + assert fobj.mode == 'r' # fileobj returns fileobj passed through message = b"Wine? Wouldn't you?" for obj in (BytesIO(message), Lunk(message)): with Opener(obj) as fobj: - assert_equal(fobj.read(), message) + assert fobj.read() == message # Which does not close the object - assert_false(obj.closed) + assert not obj.closed # mode is gently ignored fobj = Opener(obj, mode='r') @@ -77,31 +77,34 @@ def test_Opener_various(): sobj): with Opener(input, 'wb') as fobj: fobj.write(message) - assert_equal(fobj.tell(), len(message)) + assert fobj.tell() == len(message) if input == sobj: input.seek(0) with Opener(input, 'rb') as fobj: message_back = fobj.read() - assert_equal(message, message_back) + assert message == message_back if input == sobj: # Fileno is unsupported for BytesIO - assert_raises(UnsupportedOperation, fobj.fileno) + with pytest.raises(UnsupportedOperation): + fobj.fileno() elif input.endswith('.bz2') and not bz2_fileno: - assert_raises(AttributeError, fobj.fileno) + with pytest.raises(AttributeError): + fobj.fileno() # indexed gzip is used by default, and drops file # handles by default, so we don't have a fileno. elif input.endswith('gz') and HAVE_INDEXED_GZIP and \ StrictVersion(igzip.__version__) >= StrictVersion('0.7.0'): - assert_raises(igzip.NoHandleError, fobj.fileno) + with pytest.raises(igzip.NoHandleError): + fobj.fileno() else: # Just check there is a fileno - assert_not_equal(fobj.fileno(), 0) + assert fobj.fileno() != 0 def test_BinOpener(): with error_warnings(): - assert_raises(DeprecationWarning, - BinOpener, 'test.txt', 'r') + with pytest.raises(DeprecationWarning): + BinOpener('test.txt', 'r') class MockIndexedGzipFile(GzipFile): @@ -159,22 +162,16 @@ def test_Opener_gzip_type(): assert isinstance(Opener(fname, **kwargs).fobj, expected) -class TestImageOpener: - - def setUp(self): - self.compress_ext_map = ImageOpener.compress_ext_map.copy() - - def teardown(self): - ImageOpener.compress_ext_map = self.compress_ext_map - +class TestImageOpener(unittest.TestCase): def test_vanilla(self): # Test that ImageOpener does add '.mgz' as gzipped file type with InTemporaryDirectory(): with ImageOpener('test.gz', 'w') as fobj: - assert_true(hasattr(fobj.fobj, 'compress')) + assert hasattr(fobj.fobj, 'compress') with ImageOpener('test.mgz', 'w') as fobj: - assert_true(hasattr(fobj.fobj, 'compress')) + assert hasattr(fobj.fobj, 'compress') + @mock.patch.dict('nibabel.openers.ImageOpener.compress_ext_map') def test_new_association(self): def file_opener(fileish, mode): return open(fileish, mode) @@ -182,16 +179,16 @@ def file_opener(fileish, mode): # Add the association n_associations = len(ImageOpener.compress_ext_map) ImageOpener.compress_ext_map['.foo'] = (file_opener, ('mode',)) - assert_equal(n_associations + 1, len(ImageOpener.compress_ext_map)) - assert_true('.foo' in ImageOpener.compress_ext_map) + assert n_associations + 1 == len(ImageOpener.compress_ext_map) + assert '.foo' in ImageOpener.compress_ext_map with InTemporaryDirectory(): with ImageOpener('test.foo', 'w'): pass - assert_true(os.path.exists('test.foo')) + assert os.path.exists('test.foo') # Check this doesn't add anything to parent - assert_false('.foo' in Opener.compress_ext_map) + assert '.foo' not in Opener.compress_ext_map def test_file_like_wrapper(): @@ -199,17 +196,17 @@ def test_file_like_wrapper(): message = b"History of the nude in" sobj = BytesIO() fobj = Opener(sobj) - assert_equal(fobj.tell(), 0) + assert fobj.tell() == 0 fobj.write(message) - assert_equal(fobj.tell(), len(message)) + assert fobj.tell() == len(message) fobj.seek(0) - assert_equal(fobj.tell(), 0) - assert_equal(fobj.read(6), message[:6]) - assert_false(fobj.closed) + assert fobj.tell() == 0 + assert fobj.read(6) == message[:6] + assert not fobj.closed fobj.close() - assert_true(fobj.closed) + assert fobj.closed # Added the fileobj name - assert_equal(fobj.name, None) + assert fobj.name is None def test_compressionlevel(): @@ -236,8 +233,8 @@ class MyOpener(Opener): with open(fname, 'rb') as fobj: my_selves_smaller = fobj.read() sizes[compresslevel] = len(my_selves_smaller) - assert_equal(sizes['default'], sizes[default_val]) - assert_true(sizes[1] > sizes[5]) + assert sizes['default'] == sizes[default_val] + assert sizes[1] > sizes[5] def test_compressed_ext_case(): @@ -256,23 +253,23 @@ class StrictOpener(Opener): with Opener(fname, 'wb') as fobj: fobj.write(contents) with Opener(fname, 'rb') as fobj: - assert_equal(fobj.read(), contents) + assert fobj.read() == contents os.unlink(fname) with StrictOpener(fname, 'wb') as fobj: fobj.write(contents) with StrictOpener(fname, 'rb') as fobj: - assert_equal(fobj.read(), contents) + assert fobj.read() == contents lext = ext.lower() if lext != ext: # extension should not be recognized -> file - assert_true(isinstance(fobj.fobj, file_class)) + assert isinstance(fobj.fobj, file_class) elif lext == 'gz': try: from ..openers import IndexedGzipFile except ImportError: IndexedGzipFile = GzipFile - assert_true(isinstance(fobj.fobj, (GzipFile, IndexedGzipFile))) + assert isinstance(fobj.fobj, (GzipFile, IndexedGzipFile)) else: - assert_true(isinstance(fobj.fobj, BZ2File)) + assert isinstance(fobj.fobj, BZ2File) def test_name(): @@ -287,22 +284,22 @@ def test_name(): lunk): exp_name = input if type(input) == type('') else None with Opener(input, 'wb') as fobj: - assert_equal(fobj.name, exp_name) + assert fobj.name == exp_name def test_set_extensions(): # Test that we can add extensions that are compressed with InTemporaryDirectory(): with Opener('test.gz', 'w') as fobj: - assert_true(hasattr(fobj.fobj, 'compress')) + assert hasattr(fobj.fobj, 'compress') with Opener('test.glrph', 'w') as fobj: - assert_false(hasattr(fobj.fobj, 'compress')) + assert not hasattr(fobj.fobj, 'compress') class MyOpener(Opener): compress_ext_map = Opener.compress_ext_map.copy() compress_ext_map['.glrph'] = Opener.gz_def with MyOpener('test.glrph', 'w') as fobj: - assert_true(hasattr(fobj.fobj, 'compress')) + assert hasattr(fobj.fobj, 'compress') def test_close_if_mine(): @@ -319,11 +316,11 @@ def test_close_if_mine(): # gzip objects have no 'closed' attribute has_closed = hasattr(fobj.fobj, 'closed') if has_closed: - assert_false(fobj.closed) + assert not fobj.closed fobj.close_if_mine() is_str = type(input) is type('') if has_closed: - assert_equal(fobj.closed, is_str) + assert fobj.closed == is_str def test_iter(): @@ -345,11 +342,12 @@ def test_iter(): fobj.write(asbytes(line + os.linesep)) with Opener(input, 'rb') as fobj: for back_line, line in zip(fobj, lines): - assert_equal(asstr(back_line).rstrip(), line) + assert asstr(back_line).rstrip() == line if not does_t: continue with Opener(input, 'rt') as fobj: for back_line, line in zip(fobj, lines): - assert_equal(back_line.rstrip(), line) + assert back_line.rstrip() == line lobj = Opener(Lunk('')) - assert_raises(TypeError, list, lobj) + with pytest.raises(TypeError): + list(lobj) diff --git a/nibabel/tests/test_optpkg.py b/nibabel/tests/test_optpkg.py index c0930e848a..925180ce6b 100644 --- a/nibabel/tests/test_optpkg.py +++ b/nibabel/tests/test_optpkg.py @@ -1,34 +1,34 @@ """ Testing optpkg module """ -import mock +from unittest import mock import types import sys +import builtins from distutils.version import LooseVersion -from nose import SkipTest -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal) +from unittest import SkipTest +import pytest - -from nibabel.py3k import builtins from nibabel.optpkg import optional_package from nibabel.tripwire import TripWire, TripWireError def assert_good(pkg_name, min_version=None): pkg, have_pkg, setup = optional_package(pkg_name, min_version=min_version) - assert_true(have_pkg) - assert_equal(sys.modules[pkg_name], pkg) - assert_equal(setup(), None) + assert have_pkg + assert sys.modules[pkg_name] == pkg + assert setup() is None def assert_bad(pkg_name, min_version=None): pkg, have_pkg, setup = optional_package(pkg_name, min_version=min_version) - assert_false(have_pkg) - assert_true(isinstance(pkg, TripWire)) - assert_raises(TripWireError, getattr, pkg, 'a_method') - assert_raises(SkipTest, setup) + assert not have_pkg + assert isinstance(pkg, TripWire) + with pytest.raises(TripWireError): + pkg.a_method + with pytest.raises(SkipTest): + setup() def test_basic(): @@ -39,10 +39,10 @@ def test_basic(): # We never have package _not_a_package assert_bad('_not_a_package') - # setup_module imports nose, so make sure we don't disrupt that + # setup_module imports unittest, so make sure we don't disrupt that orig_import = builtins.__import__ def raise_Exception(*args, **kwargs): - if args[0] == 'nose': + if args[0] == 'unittest': return orig_import(*args, **kwargs) raise Exception( "non ImportError could be thrown by some malfunctioning module " @@ -54,7 +54,7 @@ def raise_Exception(*args, **kwargs): def test_versions(): fake_name = '_a_fake_package' fake_pkg = types.ModuleType(fake_name) - assert_false('fake_pkg' in sys.modules) + assert 'fake_pkg' not in sys.modules # Not inserted yet assert_bad(fake_name) try: @@ -77,7 +77,6 @@ def test_versions(): try: pkg.some_method except TripWireError as err: - assert_equal(str(err), - 'These functions need _a_fake_package version >= 3.0') + assert str(err) == 'These functions need _a_fake_package version >= 3.0' finally: del sys.modules[fake_name] diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 58c5e5f9e2..a3ad215488 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -11,7 +11,7 @@ import numpy as np import warnings -from nose.tools import assert_true, assert_equal, assert_raises +import pytest from numpy.testing import assert_array_equal @@ -83,6 +83,18 @@ OUT_ORNTS = [np.array(ornt) for ornt in OUT_ORNTS] +_LABELS = ['RL', 'AP', 'SI'] +ALL_AXCODES = [(_LABELS[i0][j0], _LABELS[i1][j1], _LABELS[i2][j2]) + for i0 in range(3) for i1 in range(3) for i2 in range(3) + if i0 != i1 != i2 != i0 + for j0 in range(2) for j1 in range(2) for j2 in range(2)] + +ALL_ORNTS = [[[i0, j0], [i1, j1], [i2, j2]] + for i0 in range(3) for i1 in range(3) for i2 in range(3) + if i0 != i1 != i2 != i0 + for j0 in [1, -1] for j1 in [1, -1] for j2 in [1, -1]] + + def same_transform(taff, ornt, shape): # Applying transformations implied by `ornt` to a made-up array # ``arr`` of shape `shape`, results in ``t_arr``. When the point @@ -105,7 +117,7 @@ def same_transform(taff, ornt, shape): o2t_pts = np.dot(itaff[:3, :3], arr_pts) + itaff[:3, 3][:, None] assert np.allclose(np.round(o2t_pts), o2t_pts) # fancy index out the t_arr values - vals = t_arr[list(o2t_pts.astype('i'))] + vals = t_arr[tuple(o2t_pts.astype('i'))] return np.all(vals == arr.ravel()) @@ -116,15 +128,16 @@ def test_apply(): # Test 4D with an example orientation ornt = OUT_ORNTS[-1] t_arr = apply_orientation(a[:, :, :, None], ornt) - assert_equal(t_arr.ndim, 4) + assert t_arr.ndim == 4 # Orientation errors - assert_raises(OrientationError, - apply_orientation, - a[:, :, 1], ornt) - assert_raises(OrientationError, - apply_orientation, - a, - [[0, 1], [np.nan, np.nan], [2, 1]]) + with pytest.raises(OrientationError): + apply_orientation(a[:, :, 1], ornt) + with pytest.raises(OrientationError): + apply_orientation(a, [[0, 1], [np.nan, np.nan], [2, 1]]) + shape = np.array(a.shape) + for ornt in ALL_ORNTS: + t_arr = apply_orientation(a, ornt) + assert_array_equal(a.shape, np.array(t_arr.shape)[np.array(ornt)[:, 0]]) def test_flip_axis(): @@ -155,7 +168,7 @@ def test_io_orientation(): ornt = io_orientation(in_arr) assert_array_equal(ornt, out_ornt) taff = inv_ornt_aff(ornt, shape) - assert_true(same_transform(taff, ornt, shape)) + assert same_transform(taff, ornt, shape) for axno in range(3): arr = in_arr.copy() ex_ornt = out_ornt.copy() @@ -166,7 +179,7 @@ def test_io_orientation(): ornt = io_orientation(arr) assert_array_equal(ornt, ex_ornt) taff = inv_ornt_aff(ornt, shape) - assert_true(same_transform(taff, ornt, shape)) + assert same_transform(taff, ornt, shape) # Test nasty hang for zero columns rzs = np.c_[np.diag([2, 3, 4, 5]), np.zeros((4, 3))] arr = from_matvec(rzs, [15, 16, 17, 18]) @@ -236,51 +249,52 @@ def test_ornt_transform(): [[1, -1], [2, 1], [0, 1]] ) # Must have same shape - assert_raises(ValueError, - ornt_transform, - [[0, 1], [1, 1]], - [[0, 1], [1, 1], [2, 1]]) + with pytest.raises(ValueError): + ornt_transform([[0, 1], [1, 1]], [[0, 1], [1, 1], [2, 1]]) # Must be (N,2) in shape - assert_raises(ValueError, - ornt_transform, - [[0, 1, 1], [1, 1, 1]], - [[0, 1, 1], [1, 1, 1]]) + with pytest.raises(ValueError): + ornt_transform([[0, 1, 1], [1, 1, 1]], + [[0, 1, 1], [1, 1, 1]]) # Target axes must exist in source - assert_raises(ValueError, - ornt_transform, - [[0, 1], [1, 1], [1, 1]], - [[0, 1], [1, 1], [2, 1]]) + with pytest.raises(ValueError): + ornt_transform([[0, 1], [1, 1], [1, 1]], + [[0, 1], [1, 1], [2, 1]]) def test_ornt2axcodes(): # Recoding orientation to axis codes labels = (('left', 'right'), ('back', 'front'), ('down', 'up')) - assert_equal(ornt2axcodes([[0, 1], - [1, 1], - [2, 1]], labels), ('right', 'front', 'up')) - assert_equal(ornt2axcodes([[0, -1], - [1, -1], - [2, -1]], labels), ('left', 'back', 'down')) - assert_equal(ornt2axcodes([[2, -1], - [1, -1], - [0, -1]], labels), ('down', 'back', 'left')) - assert_equal(ornt2axcodes([[1, 1], - [2, -1], - [0, 1]], labels), ('front', 'down', 'right')) + assert ornt2axcodes([[0, 1], + [1, 1], + [2, 1]], labels) == ('right', 'front', 'up') + assert ornt2axcodes([[0, -1], + [1, -1], + [2, -1]], labels) == ('left', 'back', 'down') + assert ornt2axcodes([[2, -1], + [1, -1], + [0, -1]], labels) == ('down', 'back', 'left') + assert ornt2axcodes([[1, 1], + [2, -1], + [0, 1]], labels) == ('front', 'down', 'right') # default is RAS output directions - assert_equal(ornt2axcodes([[0, 1], - [1, 1], - [2, 1]]), ('R', 'A', 'S')) + assert ornt2axcodes([[0, 1], + [1, 1], + [2, 1]]) == ('R', 'A', 'S') # dropped axes produce None - assert_equal(ornt2axcodes([[0, 1], - [np.nan, np.nan], - [2, 1]]), ('R', None, 'S')) + assert ornt2axcodes([[0, 1], + [np.nan, np.nan], + [2, 1]]) == ('R', None, 'S') # Non integer axes raises error - assert_raises(ValueError, ornt2axcodes, [[0.1, 1]]) + with pytest.raises(ValueError): + ornt2axcodes([[0.1, 1]]) # As do directions not in range - assert_raises(ValueError, ornt2axcodes, [[0, 0]]) + with pytest.raises(ValueError): + ornt2axcodes([[0, 0]]) + + for axcodes, ornt in zip(ALL_AXCODES, ALL_ORNTS): + assert ornt2axcodes(ornt) == axcodes def test_axcodes2ornt(): @@ -320,48 +334,50 @@ def test_axcodes2ornt(): # Missing axcodes raise an error assert_array_equal(axcodes2ornt('RAS'), default) - assert_raises(ValueError, axcodes2ornt, 'rAS') + with pytest.raises(ValueError): + axcodes2ornt('rAS') # None is OK as axis code assert_array_equal(axcodes2ornt(('R', None, 'S')), [[0, 1], [np.nan, np.nan], [2, 1]]) # Bad axis code with None also raises error. - assert_raises(ValueError, axcodes2ornt, ('R', None, 's')) + with pytest.raises(ValueError): + axcodes2ornt(('R', None, 's')) # Axis codes checked with custom labels labels = ('SD', 'BF', 'lh') assert_array_equal(axcodes2ornt('BlD', labels), [[1, -1], [2, -1], [0, 1]]) - assert_raises(ValueError, axcodes2ornt, 'blD', labels) + with pytest.raises(ValueError): + axcodes2ornt('blD', labels) # Duplicate labels - assert_raises(ValueError, axcodes2ornt, 'blD', ('SD', 'BF', 'lD')) - assert_raises(ValueError, axcodes2ornt, 'blD', ('SD', 'SF', 'lD')) + for labels in [('SD', 'BF', 'lD'),('SD', 'SF', 'lD')]: + with pytest.raises(ValueError): + axcodes2ornt('blD', labels) + + for axcodes, ornt in zip(ALL_AXCODES, ALL_ORNTS): + assert_array_equal(axcodes2ornt(axcodes), ornt) def test_aff2axcodes(): - assert_equal(aff2axcodes(np.eye(4)), tuple('RAS')) + assert aff2axcodes(np.eye(4)) == tuple('RAS') aff = [[0, 1, 0, 10], [-1, 0, 0, 20], [0, 0, 1, 30], [0, 0, 0, 1]] - assert_equal(aff2axcodes(aff, (('L', 'R'), ('B', 'F'), ('D', 'U'))), - ('B', 'R', 'U')) - assert_equal(aff2axcodes(aff, (('L', 'R'), ('B', 'F'), ('D', 'U'))), - ('B', 'R', 'U')) + assert aff2axcodes(aff, (('L', 'R'), ('B', 'F'), ('D', 'U'))) == ('B', 'R', 'U') + assert aff2axcodes(aff, (('L', 'R'), ('B', 'F'), ('D', 'U'))) == ('B', 'R', 'U') def test_inv_ornt_aff(): # Extra tests for inv_ornt_aff routines (also tested in # io_orientations test) - assert_raises(OrientationError, inv_ornt_aff, - [[0, 1], [1, -1], [np.nan, np.nan]], (3, 4, 5)) + with pytest.raises(OrientationError): + inv_ornt_aff([[0, 1], [1, -1], [np.nan, np.nan]], (3, 4, 5)) def test_orientation_affine_deprecation(): aff1 = inv_ornt_aff([[0, 1], [1, -1], [2, 1]], (3, 4, 5)) - with warnings.catch_warnings(record=True) as warns: - warnings.simplefilter('always') + with pytest.deprecated_call(): aff2 = orientation_affine([[0, 1], [1, -1], [2, 1]], (3, 4, 5)) - assert_equal(len(warns), 1) - assert_equal(warns[0].category, DeprecationWarning) assert_array_equal(aff1, aff2) diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 917bc417c6..d39f2a097f 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -20,9 +20,7 @@ from numpy.testing import (assert_almost_equal, assert_array_equal) -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal) - +import pytest from ..testing import (clear_and_catch_warnings, suppress_warnings, assert_arr_dict_equal) @@ -177,15 +175,15 @@ def test_header(): v41_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=strict_sort) for hdr in (v42_hdr, v41_hdr, v4_hdr): hdr = PARRECHeader(HDR_INFO, HDR_DEFS) - assert_equal(hdr.get_data_shape(), (64, 64, 9, 3)) - assert_equal(hdr.get_data_dtype(), np.dtype('= ver), \ + "nibabel.info.VERSION does not match latest tag information" + + +def test_cmp_pkg_version_0(): + # Test version comparator + assert cmp_pkg_version(nib.__version__) == 0 + assert cmp_pkg_version('0.0') == -1 + assert cmp_pkg_version('1000.1000.1') == 1 + assert cmp_pkg_version(nib.__version__, nib.__version__) == 0 + + # Check dev/RC sequence + seq = ('3.0.0dev', '3.0.0rc1', '3.0.0rc1.post.dev', '3.0.0rc2', '3.0.0rc2.post.dev', '3.0.0') + for stage1, stage2 in zip(seq[:-1], seq[1:]): + assert cmp_pkg_version(stage1, stage2) == -1 + assert cmp_pkg_version(stage2, stage1) == 1 + + +@pytest.mark.parametrize("test_ver, pkg_ver, exp_out", + [ + ('1.0', '1.0', 0), + ('1.0.0', '1.0', 0), + ('1.0', '1.0.0', 0), + ('1.1', '1.1', 0), + ('1.2', '1.1', 1), + ('1.1', '1.2', -1), + ('1.1.1', '1.1.1', 0), + ('1.1.2', '1.1.1', 1), + ('1.1.1', '1.1.2', -1), + ('1.1', '1.1dev', 1), + ('1.1dev', '1.1', -1), + ('1.2.1', '1.2.1rc1', 1), + ('1.2.1rc1', '1.2.1', -1), + ('1.2.1rc1', '1.2.1rc', 1), + ('1.2.1rc', '1.2.1rc1', -1), + ('1.2.1rc1', '1.2.1rc', 1), + ('1.2.1rc', '1.2.1rc1', -1), + ('1.2.1b', '1.2.1a', 1), + ('1.2.1a', '1.2.1b', -1), + ('1.2.0+1', '1.2', 1), + ('1.2', '1.2.0+1', -1), + ('1.2.1+1', '1.2.1', 1), + ('1.2.1', '1.2.1+1', -1), + ('1.2.1rc1+1', '1.2.1', -1), + ('1.2.1', '1.2.1rc1+1', 1), + ('1.2.1rc1+1', '1.2.1+1', -1), + ('1.2.1+1', '1.2.1rc1+1', 1), + ]) +def test_cmp_pkg_version_1(test_ver, pkg_ver, exp_out): + # Test version comparator + assert cmp_pkg_version(test_ver, pkg_ver) == exp_out + + +@pytest.mark.parametrize("args", [['foo.2'], ['foo.2', '1.0'], ['1.0', 'foo.2'], ['foo']]) +def test_cmp_pkg_version_error(args): + with pytest.raises(ValueError): + cmp_pkg_version(*args) diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index 34b30f14c8..1e9e94091e 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """ Testing processing module """ -from __future__ import division, print_function from os.path import dirname, join as pjoin @@ -28,17 +27,14 @@ voxel_sizes) from nibabel.eulerangles import euler2mat -from numpy.testing import (assert_almost_equal, - assert_array_equal) -from ..testing import skipif - -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) +from numpy.testing import assert_almost_equal, assert_array_equal +import unittest +import pytest from nibabel.tests.test_spaces import assert_all_in, get_outspace_params from nibabel.testing import assert_allclose_safely -needs_scipy = skipif(not have_scipy, 'These tests need scipy') +needs_scipy = unittest.skipUnless(have_scipy, 'These tests need scipy') DATA_DIR = pjoin(dirname(__file__), 'data') @@ -60,8 +56,8 @@ def test_sigma2fwhm(): # direct test fwhm2sigma and sigma2fwhm are inverses of each other fwhm = np.arange(1.0, 5.0, 0.1) sigma = np.arange(1.0, 5.0, 0.1) - assert_true(np.allclose(sigma2fwhm(fwhm2sigma(fwhm)), fwhm)) - assert_true(np.allclose(fwhm2sigma(sigma2fwhm(sigma)), sigma)) + assert np.allclose(sigma2fwhm(fwhm2sigma(fwhm)), fwhm) + assert np.allclose(fwhm2sigma(sigma2fwhm(sigma)), sigma) def test_adapt_affine(): @@ -159,24 +155,25 @@ def test_resample_from_to(): assert_almost_equal(out.dataobj, exp_out) # Out class out = resample_from_to(img, trans_img) - assert_equal(out.__class__, Nifti1Image) + assert out.__class__ == Nifti1Image # By default, type of from_img makes no difference n1_img = Nifti2Image(data, affine) out = resample_from_to(n1_img, trans_img) - assert_equal(out.__class__, Nifti1Image) + assert out.__class__ == Nifti1Image # Passed as keyword arg out = resample_from_to(img, trans_img, out_class=Nifti2Image) - assert_equal(out.__class__, Nifti2Image) + assert out.__class__ == Nifti2Image # If keyword arg is None, use type of from_img out = resample_from_to(n1_img, trans_img, out_class=None) - assert_equal(out.__class__, Nifti2Image) + assert out.__class__ == Nifti2Image # to_img type irrelevant in all cases n1_trans_img = Nifti2Image(data, trans_aff) out = resample_from_to(img, n1_trans_img, out_class=None) - assert_equal(out.__class__, Nifti1Image) + assert out.__class__ == Nifti1Image # From 2D to 3D, error, the fixed affine is not invertible img_2d = Nifti1Image(data[:, :, 0], affine) - assert_raises(AffineError, resample_from_to, img_2d, img) + with pytest.raises(AffineError): + resample_from_to(img_2d, img) # 3D to 2D, we don't need to invert the fixed matrix out = resample_from_to(img, img_2d) assert_array_equal(out.dataobj, data[:, :, 0]) @@ -190,8 +187,10 @@ def test_resample_from_to(): assert_almost_equal(data_4d, out.dataobj) assert_array_equal(img_4d.affine, out.affine) # Errors trying to match 3D to 4D - assert_raises(ValueError, resample_from_to, img_4d, img) - assert_raises(ValueError, resample_from_to, img, img_4d) + with pytest.raises(ValueError): + resample_from_to(img_4d, img) + with pytest.raises(ValueError): + resample_from_to(img, img_4d) @needs_scipy @@ -227,7 +226,8 @@ def test_resample_to_output(): assert_array_equal(img3.dataobj, data[0, 0][..., None, None]) # But 4D does not img_4d = Nifti1Image(data.reshape(2, 3, 2, 2), np.eye(4)) - assert_raises(ValueError, resample_to_output, img_4d) + with pytest.raises(ValueError): + resample_to_output(img_4d) # Run vox2vox_out tests, checking output shape, coordinate transform for in_shape, in_aff, vox, out_shape, out_aff in get_outspace_params(): # Allow for expansion of image shape from < 3D @@ -241,7 +241,7 @@ def test_resample_to_output(): img = Nifti1Image(np.ones(in_shape), in_aff) out_img = resample_to_output(img, vox) assert_all_in(in_shape, in_aff, out_img.shape, out_img.affine) - assert_equal(out_img.shape, out_shape) + assert out_img.shape == out_shape assert_almost_equal(out_img.affine, out_aff) # Check data is as expected with some transforms # Flip first axis @@ -262,7 +262,7 @@ def test_resample_to_output(): rot_3_img = Nifti1Image(data, rot_3) out_img = resample_to_output(rot_3_img) exp_shape = (4, 4, 4) - assert_equal(out_img.shape, exp_shape) + assert out_img.shape == exp_shape exp_aff = np.array([[1, 0, 0, -2 * np.cos(np.pi / 4)], [0, 1, 0, 0], [0, 0, 1, 0], @@ -287,17 +287,11 @@ def test_resample_to_output(): img_ni1 = Nifti2Image(data, np.eye(4)) img_ni2 = Nifti2Image(data, np.eye(4)) # Default is Nifti1Image - assert_equal( - resample_to_output(img_ni2).__class__, - Nifti1Image) + assert resample_to_output(img_ni2).__class__ == Nifti1Image # Can be overriden - assert_equal( - resample_to_output(img_ni1, out_class=Nifti2Image).__class__, - Nifti2Image) + assert resample_to_output(img_ni1, out_class=Nifti2Image).__class__ == Nifti2Image # None specifies out_class from input - assert_equal( - resample_to_output(img_ni2, out_class=None).__class__, - Nifti2Image) + assert resample_to_output(img_ni2, out_class=None).__class__ == Nifti2Image @needs_scipy @@ -316,7 +310,8 @@ def test_smooth_image(): exp_out = spnd.gaussian_filter(data, sd, mode='nearest') assert_array_equal(smooth_image(img, 8).dataobj, exp_out) assert_array_equal(smooth_image(img, [8, 8, 8]).dataobj, exp_out) - assert_raises(ValueError, smooth_image, img, [8, 8]) + with pytest.raises(ValueError): + smooth_image(img, [8, 8]) # Not isotropic mixed_sd = fwhm2sigma(np.true_divide([8, 7, 6], [4, 5, 6])) exp_out = spnd.gaussian_filter(data, mixed_sd, mode='nearest') @@ -326,14 +321,16 @@ def test_smooth_image(): exp_out = spnd.gaussian_filter(data[0], sd[:2], mode='nearest') assert_array_equal(smooth_image(img_2d, 8).dataobj, exp_out) assert_array_equal(smooth_image(img_2d, [8, 8]).dataobj, exp_out) - assert_raises(ValueError, smooth_image, img_2d, [8, 8, 8]) + with pytest.raises(ValueError): + smooth_image(img_2d, [8, 8, 8]) # Isotropic in 4D has zero for last dimension in scalar case data_4d = np.arange(24 * 5).reshape((2, 3, 4, 5)) img_4d = Nifti1Image(data_4d, aff) exp_out = spnd.gaussian_filter(data_4d, list(sd) + [0], mode='nearest') assert_array_equal(smooth_image(img_4d, 8).dataobj, exp_out) # But raises error for vector case - assert_raises(ValueError, smooth_image, img_4d, [8, 8, 8]) + with pytest.raises(ValueError): + smooth_image(img_4d, [8, 8, 8]) # mode, cval exp_out = spnd.gaussian_filter(data, sd, mode='constant') assert_array_equal(smooth_image(img, 8, mode='constant').dataobj, exp_out) @@ -344,17 +341,11 @@ def test_smooth_image(): img_ni1 = Nifti2Image(data, np.eye(4)) img_ni2 = Nifti2Image(data, np.eye(4)) # Default is Nifti1Image - assert_equal( - smooth_image(img_ni2, 0).__class__, - Nifti1Image) + assert smooth_image(img_ni2, 0).__class__ == Nifti1Image # Can be overriden - assert_equal( - smooth_image(img_ni1, 0, out_class=Nifti2Image).__class__, - Nifti2Image) + assert smooth_image(img_ni1, 0, out_class=Nifti2Image).__class__ == Nifti2Image # None specifies out_class from input - assert_equal( - smooth_image(img_ni2, 0, out_class=None).__class__, - Nifti2Image) + assert smooth_image(img_ni2, 0, out_class=None).__class__ == Nifti2Image @needs_scipy @@ -371,10 +362,12 @@ def test_spatial_axes_check(): out = resample_to_output(img, voxel_sizes(img.affine)) for fname in MINC_4DS: img = nib.load(pjoin(DATA_DIR, fname)) - assert_raises(ValueError, smooth_image, img, 0) - assert_raises(ValueError, resample_from_to, img, img, mode='nearest') - assert_raises(ValueError, - resample_to_output, img, voxel_sizes(img.affine)) + with pytest.raises(ValueError): + smooth_image(img, 0) + with pytest.raises(ValueError): + resample_from_to(img, img, mode='nearest') + with pytest.raises(ValueError): + resample_to_output(img, voxel_sizes(img.affine)) def assert_spm_resampling_close(from_img, our_resampled, spm_resampled): @@ -412,7 +405,7 @@ def test_against_spm_resample(): func = nib.load(pjoin(DATA_DIR, 'functional.nii')) some_rotations = euler2mat(0.1, 0.2, 0.3) extra_affine = from_matvec(some_rotations, [3, 4, 5]) - moved_anat = nib.Nifti1Image(anat.get_data().astype(float), + moved_anat = nib.Nifti1Image(anat.get_fdata(), extra_affine.dot(anat.affine), anat.header) one_func = nib.Nifti1Image(func.dataobj[..., 0], diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 7280c5552d..cccd7b729f 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -27,7 +27,6 @@ These last are to allow the proxy to be re-used with different images. """ -from __future__ import division, print_function, absolute_import from os.path import join as pjoin import warnings @@ -36,7 +35,6 @@ import numpy as np -from six import string_types from ..volumeutils import apply_read_scaling from ..analyze import AnalyzeHeader from ..spm99analyze import Spm99AnalyzeHeader @@ -46,20 +44,19 @@ from .. import minc1 from ..externals.netcdf import netcdf_file from .. import minc2 -from ..optpkg import optional_package -h5py, have_h5py, _ = optional_package('h5py') +from .._h5py_compat import h5py, have_h5py from .. import ecat from .. import parrec +from ..casting import have_binary128 from ..arrayproxy import ArrayProxy, is_proxy -from nose import SkipTest -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) +import unittest +import pytest +from numpy.testing import assert_almost_equal, assert_array_equal, assert_allclose -from numpy.testing import (assert_almost_equal, assert_array_equal) - -from ..testing import data_path as DATA_PATH, assert_dt_equal +from ..testing import data_path as DATA_PATH, assert_dt_equal, clear_and_catch_warnings +from ..deprecator import ExpiredDeprecationError from ..tmpdirs import InTemporaryDirectory @@ -106,24 +103,26 @@ def validate_shape(self, pmaker, params): prox, fio, hdr = pmaker() assert_array_equal(prox.shape, params['shape']) # Read only - assert_raises(AttributeError, setattr, prox, 'shape', params['shape']) + with pytest.raises(AttributeError): + prox.shape = params['shape'] def validate_ndim(self, pmaker, params): # Check shape prox, fio, hdr = pmaker() - assert_equal(prox.ndim, len(params['shape'])) + assert prox.ndim == len(params['shape']) # Read only - assert_raises(AttributeError, setattr, prox, - 'ndim', len(params['shape'])) + with pytest.raises(AttributeError): + prox.ndim = len(params['shape']) def validate_is_proxy(self, pmaker, params): # Check shape prox, fio, hdr = pmaker() - assert_true(prox.is_proxy) - assert_true(is_proxy(prox)) - assert_false(is_proxy(np.arange(10))) + assert prox.is_proxy + assert is_proxy(prox) + assert not is_proxy(np.arange(10)) # Read only - assert_raises(AttributeError, setattr, prox, 'is_proxy', False) + with pytest.raises(AttributeError): + prox.is_proxy = False def validate_asarray(self, pmaker, params): # Check proxy returns expected array from asarray @@ -132,7 +131,40 @@ def validate_asarray(self, pmaker, params): assert_array_equal(out, params['arr_out']) assert_dt_equal(out.dtype, params['dtype_out']) # Shape matches expected shape - assert_equal(out.shape, params['shape']) + assert out.shape == params['shape'] + + def validate_array_interface_with_dtype(self, pmaker, params): + # Check proxy returns expected array from asarray + prox, fio, hdr = pmaker() + orig = np.array(prox, dtype=None) + assert_array_equal(orig, params['arr_out']) + assert_dt_equal(orig.dtype, params['dtype_out']) + + context = None + if np.issubdtype(orig.dtype, np.complexfloating): + context = clear_and_catch_warnings() + context.__enter__() + warnings.simplefilter('ignore', np.ComplexWarning) + + for dtype in np.sctypes['float'] + np.sctypes['int'] + np.sctypes['uint']: + # Directly coerce with a dtype + direct = dtype(prox) + # Half-precision is imprecise. Obviously. It's a bad idea, but don't break + # the test over it. + rtol = 1e-03 if dtype == np.float16 else 1e-05 + assert_allclose(direct, orig.astype(dtype), rtol=rtol, atol=1e-08) + assert_dt_equal(direct.dtype, np.dtype(dtype)) + assert direct.shape == params['shape'] + # All three methods should produce equivalent results + for arrmethod in (np.array, np.asarray, np.asanyarray): + out = arrmethod(prox, dtype=dtype) + assert_array_equal(out, direct) + assert_dt_equal(out.dtype, np.dtype(dtype)) + # Shape matches expected shape + assert out.shape == params['shape'] + + if context is not None: + context.__exit__() def validate_header_isolated(self, pmaker, params): # Confirm altering input header has no effect @@ -153,7 +185,7 @@ def validate_header_isolated(self, pmaker, params): def validate_fileobj_isolated(self, pmaker, params): # Check file position of read independent of file-like object prox, fio, hdr = pmaker() - if isinstance(fio, string_types): + if isinstance(fio, str): return assert_array_equal(prox, params['arr_out']) fio.read() # move to end of file @@ -179,6 +211,7 @@ class TestAnalyzeProxyAPI(_TestProxyAPI): shapes = ((2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)) has_slope = False has_inter = False + data_dtypes = (np.uint8, np.int16, np.int32, np.float32, np.complex64, np.float64) array_order = 'F' # Cannot set offset for Freesurfer settable_offset = True @@ -203,11 +236,12 @@ def obj_params(self): offsets = (self.header_class().get_data_offset(),) else: offsets = (0, 16) - slopes = (1., 2.) if self.has_slope else (1.,) - inters = (0., 10.) if self.has_inter else (0.,) - dtypes = (np.uint8, np.int16, np.float32) + # For non-integral parameters, cast to float32 value can be losslessly cast + # later, enabling exact checks, then back to float for consistency + slopes = (1., 2., float(np.float32(3.1416))) if self.has_slope else (1.,) + inters = (0., 10., float(np.float32(2.7183))) if self.has_inter else (0.,) for shape, dtype, offset, slope, inter in product(self.shapes, - dtypes, + self.data_dtypes, offsets, slopes, inters): @@ -249,7 +283,7 @@ def sio_func(): dtype=dtype, dtype_out=dtype_out, arr=arr.copy(), - arr_out=arr * slope + inter, + arr_out=arr.astype(dtype_out) * slope + inter, shape=shape, offset=offset, slope=slope, @@ -276,8 +310,8 @@ def validate_dtype(self, pmaker, params): # Read-only dtype attribute prox, fio, hdr = pmaker() assert_dt_equal(prox.dtype, params['dtype']) - assert_raises(AttributeError, - prox.__setattr__, 'dtype', np.dtype(prox.dtype)) + with pytest.raises(AttributeError): + prox.dtype = np.dtype(prox.dtype) def validate_slope_inter_offset(self, pmaker, params): # Check slope, inter, offset @@ -286,17 +320,13 @@ def validate_slope_inter_offset(self, pmaker, params): expected = params[attr_name] assert_array_equal(getattr(prox, attr_name), expected) # Read only - assert_raises(AttributeError, - setattr, prox, attr_name, expected) + with pytest.raises(AttributeError): + setattr(prox, attr_name, expected) def validate_deprecated_header(self, pmaker, params): prox, fio, hdr = pmaker() - with warnings.catch_warnings(record=True) as warns: - warnings.simplefilter("always") - # Header is a copy of original - assert_false(prox.header is hdr) - assert_equal(prox.header, hdr) - assert_equal(warns.pop(0).category, DeprecationWarning) + with pytest.raises(ExpiredDeprecationError): + prox.header class TestSpm99AnalyzeProxyAPI(TestAnalyzeProxyAPI): @@ -312,6 +342,10 @@ class TestSpm2AnalyzeProxyAPI(TestSpm99AnalyzeProxyAPI): class TestNifti1ProxyAPI(TestSpm99AnalyzeProxyAPI): header_class = Nifti1Header has_inter = True + data_dtypes = (np.uint8, np.int16, np.int32, np.float32, np.complex64, np.float64, + np.int8, np.uint16, np.uint32, np.int64, np.uint64, np.complex128) + if have_binary128(): + data_dtypes += (np.float128, np.complex256) class TestMGHAPI(TestAnalyzeProxyAPI): @@ -321,6 +355,7 @@ class TestMGHAPI(TestAnalyzeProxyAPI): has_inter = False settable_offset = False data_endian = '>' + data_dtypes = (np.uint8, np.int16, np.int32, np.float32) class TestMinc1API(_TestProxyAPI): @@ -380,7 +415,7 @@ class TestEcatAPI(_TestProxyAPI): def obj_params(self): eg_path = pjoin(DATA_PATH, self.eg_fname) img = ecat.load(eg_path) - arr_out = img.get_data() + arr_out = img.get_fdata() def eg_func(): img = ecat.load(eg_path) @@ -394,14 +429,14 @@ def eg_func(): arr_out=arr_out)) def validate_header_isolated(self, pmaker, params): - raise SkipTest('ECAT header does not support dtype get') + raise unittest.SkipTest('ECAT header does not support dtype get') class TestPARRECAPI(_TestProxyAPI): def _func_dict(self, rec_name): img = parrec.load(rec_name) - arr_out = img.get_data() + arr_out = img.get_fdata() def eg_func(): img = parrec.load(rec_name) diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index a3f2ba9546..cb24c7d0ce 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -11,11 +11,9 @@ import numpy as np from numpy import pi -from ..testing import slow -from nose.tools import assert_raises, assert_true, assert_false, \ - assert_equal +import pytest -from numpy.testing import assert_array_almost_equal, assert_array_equal +from numpy.testing import assert_array_almost_equal, assert_array_equal, dec from .. import quaternions as nq from .. import eulerangles as nea @@ -55,129 +53,135 @@ def test_fillpos(): # Takes np array xyz = np.zeros((3,)) w, x, y, z = nq.fillpositive(xyz) - yield assert_true, w == 1 + assert w == 1 # Or lists xyz = [0] * 3 w, x, y, z = nq.fillpositive(xyz) - yield assert_true, w == 1 + assert w == 1 # Errors with wrong number of values - yield assert_raises, ValueError, nq.fillpositive, [0, 0] - yield assert_raises, ValueError, nq.fillpositive, [0] * 4 + with pytest.raises(ValueError): + nq.fillpositive([0, 0]) + with pytest.raises(ValueError): + nq.fillpositive([0] * 4) # Errors with negative w2 - yield assert_raises, ValueError, nq.fillpositive, [1.0] * 3 + with pytest.raises(ValueError): + nq.fillpositive([1.0] * 3) # Test corner case where w is near zero wxyz = nq.fillpositive([1, 0, 0]) - yield assert_true, wxyz[0] == 0.0 + assert wxyz[0] == 0.0 def test_conjugate(): # Takes sequence cq = nq.conjugate((1, 0, 0, 0)) # Returns float type - yield assert_true, cq.dtype.kind == 'f' + assert cq.dtype.kind == 'f' def test_quat2mat(): # also tested in roundtrip case below M = nq.quat2mat([1, 0, 0, 0]) - yield assert_array_almost_equal, M, np.eye(3) + assert_array_almost_equal, M, np.eye(3) M = nq.quat2mat([3, 0, 0, 0]) - yield assert_array_almost_equal, M, np.eye(3) + assert_array_almost_equal, M, np.eye(3) M = nq.quat2mat([0, 1, 0, 0]) - yield assert_array_almost_equal, M, np.diag([1, -1, -1]) + assert_array_almost_equal, M, np.diag([1, -1, -1]) M = nq.quat2mat([0, 2, 0, 0]) - yield assert_array_almost_equal, M, np.diag([1, -1, -1]) + assert_array_almost_equal, M, np.diag([1, -1, -1]) M = nq.quat2mat([0, 0, 0, 0]) - yield assert_array_almost_equal, M, np.eye(3) + assert_array_almost_equal, M, np.eye(3) -def test_inverse(): +def test_inverse_0(): # Takes sequence iq = nq.inverse((1, 0, 0, 0)) # Returns float type - yield assert_true, iq.dtype.kind == 'f' - for M, q in eg_pairs: - iq = nq.inverse(q) - iqM = nq.quat2mat(iq) - iM = np.linalg.inv(M) - yield assert_true, np.allclose(iM, iqM) + assert iq.dtype.kind == 'f' + + +@pytest.mark.parametrize("M, q", eg_pairs) +def test_inverse_1(M, q): + iq = nq.inverse(q) + iqM = nq.quat2mat(iq) + iM = np.linalg.inv(M) + assert np.allclose(iM, iqM) def test_eye(): qi = nq.eye() - yield assert_true, qi.dtype.kind == 'f' - yield assert_true, np.all([1, 0, 0, 0] == qi) - yield assert_true, np.allclose(nq.quat2mat(qi), np.eye(3)) + assert qi.dtype.kind == 'f' + assert np.all([1, 0, 0, 0] == qi) + assert np.allclose(nq.quat2mat(qi), np.eye(3)) def test_norm(): qi = nq.eye() - yield assert_true, nq.norm(qi) == 1 - yield assert_true, nq.isunit(qi) + assert nq.norm(qi) == 1 + assert nq.isunit(qi) qi[1] = 0.2 - yield assert_true, not nq.isunit(qi) + assert not nq.isunit(qi) -@slow -def test_mult(): +@dec.slow +@pytest.mark.parametrize("M1, q1", eg_pairs[0::4]) +@pytest.mark.parametrize("M2, q2", eg_pairs[1::4]) +def test_mult(M1, q1, M2, q2): # Test that quaternion * same as matrix * - for M1, q1 in eg_pairs[0::4]: - for M2, q2 in eg_pairs[1::4]: - q21 = nq.mult(q2, q1) - yield assert_array_almost_equal, np.dot(M2, M1), nq.quat2mat(q21) + q21 = nq.mult(q2, q1) + assert_array_almost_equal, np.dot(M2, M1), nq.quat2mat(q21) -def test_inverse(): - for M, q in eg_pairs: - iq = nq.inverse(q) - iqM = nq.quat2mat(iq) - iM = np.linalg.inv(M) - yield assert_true, np.allclose(iM, iqM) +@pytest.mark.parametrize("M, q", eg_pairs) +def test_inverse(M, q): + iq = nq.inverse(q) + iqM = nq.quat2mat(iq) + iM = np.linalg.inv(M) + assert np.allclose(iM, iqM) def test_eye(): qi = nq.eye() - yield assert_true, np.all([1, 0, 0, 0] == qi) - yield assert_true, np.allclose(nq.quat2mat(qi), np.eye(3)) + assert np.all([1, 0, 0, 0] == qi) + assert np.allclose(nq.quat2mat(qi), np.eye(3)) -def test_qrotate(): - for vec in np.eye(3): - for M, q in eg_pairs: - vdash = nq.rotate_vector(vec, q) - vM = np.dot(M, vec) - yield assert_array_almost_equal, vdash, vM +@pytest.mark.parametrize("vec", np.eye(3)) +@pytest.mark.parametrize("M, q", eg_pairs) +def test_qrotate(vec, M, q): + vdash = nq.rotate_vector(vec, q) + vM = np.dot(M, vec) + assert_array_almost_equal(vdash, vM) -def test_quaternion_reconstruction(): +@pytest.mark.parametrize("q", unit_quats) +def test_quaternion_reconstruction(q): # Test reconstruction of arbitrary unit quaternions - for q in unit_quats: - M = nq.quat2mat(q) - qt = nq.mat2quat(M) - # Accept positive or negative match - posm = np.allclose(q, qt) - negm = np.allclose(q, -qt) - yield assert_true, posm or negm + M = nq.quat2mat(q) + qt = nq.mat2quat(M) + # Accept positive or negative match + posm = np.allclose(q, qt) + negm = np.allclose(q, -qt) + assert (posm or negm) def test_angle_axis2quat(): q = nq.angle_axis2quat(0, [1, 0, 0]) - yield assert_array_equal, q, [1, 0, 0, 0] + assert_array_equal(q, [1, 0, 0, 0]) q = nq.angle_axis2quat(np.pi, [1, 0, 0]) - yield assert_array_almost_equal, q, [0, 1, 0, 0] + assert_array_almost_equal(q, [0, 1, 0, 0]) q = nq.angle_axis2quat(np.pi, [1, 0, 0], True) - yield assert_array_almost_equal, q, [0, 1, 0, 0] + assert_array_almost_equal(q, [0, 1, 0, 0]) q = nq.angle_axis2quat(np.pi, [2, 0, 0], False) - yield assert_array_almost_equal, q, [0, 1, 0, 0] + assert_array_almost_equal(q, [0, 1, 0, 0]) def test_angle_axis(): for M, q in eg_pairs: theta, vec = nq.quat2angle_axis(q) q2 = nq.angle_axis2quat(theta, vec) - yield nq.nearly_equivalent, q, q2 + nq.nearly_equivalent(q, q2) aa_mat = nq.angle_axis2mat(theta, vec) - yield assert_array_almost_equal, aa_mat, M + assert_array_almost_equal(aa_mat, M) unit_vec = vec / np.sqrt(vec.dot(vec)) aa_mat2 = nq.angle_axis2mat(theta, unit_vec, is_normalized=True) - yield assert_array_almost_equal, aa_mat2, M + assert_array_almost_equal(aa_mat2, M) diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index e340936ff0..d6206df978 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -12,50 +12,75 @@ from ..volumeutils import Recoder, DtypeMapper, native_code, swapped_code -from nose.tools import assert_equal, assert_raises, assert_true, assert_false +import pytest -def test_recoder(): +def test_recoder_1(): # simplest case, no aliases codes = ((1,), (2,)) rc = Recoder(codes) - yield assert_equal, rc.code[1], 1 - yield assert_equal, rc.code[2], 2 - yield assert_raises, KeyError, rc.code.__getitem__, 3 + assert rc.code[1] == 1 + assert rc.code[2] == 2 + with pytest.raises(KeyError): + rc.code[3] + +def test_recoder_2(): # with explicit name for code + codes = ((1,), (2,)) rc = Recoder(codes, ['code1']) - yield assert_raises, AttributeError, rc.__getattribute__, 'code' - yield assert_equal, rc.code1[1], 1 - yield assert_equal, rc.code1[2], 2 + with pytest.raises(AttributeError): + rc.code + assert rc.code1[1] == 1 + assert rc.code1[2] == 2 + + +def test_recoder_3(): # code and label codes = ((1, 'one'), (2, 'two')) rc = Recoder(codes) # just with implicit alias - yield assert_equal, rc.code[1], 1 - yield assert_equal, rc.code[2], 2 - yield assert_raises, KeyError, rc.code.__getitem__, 3 - yield assert_equal, rc.code['one'], 1 - yield assert_equal, rc.code['two'], 2 - yield assert_raises, KeyError, rc.code.__getitem__, 'three' - yield assert_raises, AttributeError, rc.__getattribute__, 'label' - rc = Recoder(codes, ['code1', 'label']) # with explicit column names - yield assert_raises, AttributeError, rc.__getattribute__, 'code' - yield assert_equal, rc.code1[1], 1 - yield assert_equal, rc.code1['one'], 1 - yield assert_equal, rc.label[1], 'one' - yield assert_equal, rc.label['one'], 'one' + assert rc.code[1] == 1 + assert rc.code[2] == 2 + with pytest.raises(KeyError): + rc.code[3] + assert rc.code['one'] == 1 + assert rc.code['two'] == 2 + with pytest.raises(KeyError): + rc.code['three'] + with pytest.raises(AttributeError): + rc.label + +def test_recoder_4(): + # with explicit column names + codes = ((1, 'one'), (2, 'two')) + rc = Recoder(codes, ['code1', 'label']) + with pytest.raises(AttributeError): + rc.code + assert rc.code1[1] == 1 + assert rc.code1['one'] == 1 + assert rc.label[1] == 'one' + assert rc.label['one'] == 'one' + + +def test_recoder_5(): # code, label, aliases codes = ((1, 'one', '1', 'first'), (2, 'two')) rc = Recoder(codes) # just with implicit alias - yield assert_equal, rc.code[1], 1 - yield assert_equal, rc.code['one'], 1 - yield assert_equal, rc.code['first'], 1 - rc = Recoder(codes, ['code1', 'label']) # with explicit column names - yield assert_equal, rc.code1[1], 1 - yield assert_equal, rc.code1['first'], 1 - yield assert_equal, rc.label[1], 'one' - yield assert_equal, rc.label['first'], 'one' + assert rc.code[1] == 1 + assert rc.code['one'] == 1 + assert rc.code['first'] == 1 + + +def test_recoder_6(): + # with explicit column names + codes = ((1, 'one', '1', 'first'), (2, 'two')) + rc = Recoder(codes, ['code1', 'label']) + assert rc.code1[1] == 1 + assert rc.code1['first'] == 1 + assert rc.label[1] == 'one' + assert rc.label['first'] == 'one' # Don't allow funny names - yield assert_raises, KeyError, Recoder, codes, ['field1'] + with pytest.raises(KeyError): + Recoder(codes, ['field1']) def test_custom_dicter(): @@ -81,22 +106,23 @@ def values(self): # code, label, aliases codes = ((1, 'one', '1', 'first'), (2, 'two')) rc = Recoder(codes, map_maker=MyDict) - yield assert_equal, rc.code[1], 'spam' - yield assert_equal, rc.code['one'], 'spam' - yield assert_equal, rc.code['first'], 'spam' - yield assert_equal, rc.code['bizarre'], 'eggs' - yield assert_equal, rc.value_set(), set(['funny', 'list']) - yield assert_equal, list(rc.keys()), ['some', 'keys'] + assert rc.code[1] == 'spam' + assert rc.code['one'] == 'spam' + assert rc.code['first'] == 'spam' + assert rc.code['bizarre'] == 'eggs' + assert rc.value_set() == set(['funny', 'list']) + assert list(rc.keys()) == ['some', 'keys'] def test_add_codes(): codes = ((1, 'one', '1', 'first'), (2, 'two')) rc = Recoder(codes) - yield assert_equal, rc.code['two'], 2 - yield assert_raises, KeyError, rc.code.__getitem__, 'three' + assert rc.code['two'] == 2 + with pytest.raises(KeyError): + rc.code['three'] rc.add_codes(((3, 'three'), (1, 'number 1'))) - yield assert_equal, rc.code['three'], 3 - yield assert_equal, rc.code['number 1'], 1 + assert rc.code['three'] == 3 + assert rc.code['number 1'] == 1 def test_sugar(): @@ -104,31 +130,32 @@ def test_sugar(): codes = ((1, 'one', '1', 'first'), (2, 'two')) rc = Recoder(codes) # Field1 is synonym for first named dict - yield assert_equal, rc.code, rc.field1 + assert rc.code == rc.field1 rc = Recoder(codes, fields=('code1', 'label')) - yield assert_equal, rc.code1, rc.field1 + assert rc.code1 == rc.field1 # Direct key access identical to key access for first named - yield assert_equal, rc[1], rc.field1[1] - yield assert_equal, rc['two'], rc.field1['two'] + assert rc[1] == rc.field1[1] + assert rc['two'] == rc.field1['two'] # keys gets all keys - yield assert_equal, set(rc.keys()), set((1, 'one', '1', 'first', 2, 'two')) + assert set(rc.keys()) == set((1, 'one', '1', 'first', 2, 'two')) # value_set gets set of values from first column - yield assert_equal, rc.value_set(), set((1, 2)) + assert rc.value_set() == set((1, 2)) # or named column if given - yield assert_equal, rc.value_set('label'), set(('one', 'two')) + assert rc.value_set('label') == set(('one', 'two')) # "in" works for values in and outside the set - yield assert_true, 'one' in rc - yield assert_false, 'three' in rc + assert 'one' in rc + assert 'three' not in rc def test_dtmapper(): # dict-like that will lookup on dtypes, even if they don't hash properly d = DtypeMapper() - assert_raises(KeyError, d.__getitem__, 1) + with pytest.raises(KeyError): + d[1] d[1] = 'something' - assert_equal(d[1], 'something') - assert_equal(list(d.keys()), [1]) - assert_equal(list(d.values()), ['something']) + assert d[1] == 'something' + assert list(d.keys()) == [1] + assert list(d.values()) == ['something'] intp_dt = np.dtype('intp') if intp_dt == np.dtype('int32'): canonical_dt = np.dtype('int32') @@ -139,21 +166,23 @@ def test_dtmapper(): native_dt = canonical_dt.newbyteorder('=') explicit_dt = canonical_dt.newbyteorder(native_code) d[canonical_dt] = 'spam' - assert_equal(d[canonical_dt], 'spam') - assert_equal(d[native_dt], 'spam') - assert_equal(d[explicit_dt], 'spam') + assert d[canonical_dt] == 'spam' + assert d[native_dt] == 'spam' + assert d[explicit_dt] == 'spam' + # Test keys, values d = DtypeMapper() - assert_equal(list(d.keys()), []) - assert_equal(list(d.keys()), []) + assert list(d.keys()) == [] + assert list(d.keys()) == [] d[canonical_dt] = 'spam' - assert_equal(list(d.keys()), [canonical_dt]) - assert_equal(list(d.values()), ['spam']) + assert list(d.keys()) == [canonical_dt] + assert list(d.values()) == ['spam'] # With other byte order d = DtypeMapper() sw_dt = canonical_dt.newbyteorder(swapped_code) d[sw_dt] = 'spam' - assert_raises(KeyError, d.__getitem__, canonical_dt) - assert_equal(d[sw_dt], 'spam') + with pytest.raises(KeyError): + d[canonical_dt] + assert d[sw_dt] == 'spam' sw_intp_dt = intp_dt.newbyteorder(swapped_code) - assert_equal(d[sw_intp_dt], 'spam') + assert d[sw_intp_dt] == 'spam' diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py new file mode 100644 index 0000000000..28144f3af4 --- /dev/null +++ b/nibabel/tests/test_removalschedule.py @@ -0,0 +1,85 @@ +from ..pkg_info import cmp_pkg_version +import unittest +from unittest import mock +import pytest + +MODULE_SCHEDULE = [ + ("5.0.0", ["nibabel.keywordonly"]), + ("4.0.0", ["nibabel.trackvis"]), + ("3.0.0", ["nibabel.minc", "nibabel.checkwarns"]), + # Verify that the test will be quiet if the schedule outlives the modules + ("1.0.0", ["nibabel.nosuchmod"]), +] + +OBJECT_SCHEDULE = [ + ("5.0.0", [("nibabel.pydicom_compat", "dicom_test")]), + ("3.0.0", [("nibabel.testing", "catch_warn_reset")]), + # Verify that the test will be quiet if the schedule outlives the modules + ("1.0.0", [("nibabel.nosuchmod", "anyobj"), ("nibabel.nifti1", "nosuchobj")]), +] + +ATTRIBUTE_SCHEDULE = [ + ("5.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_data")]), + # Verify that the test will be quiet if the schedule outlives the modules + ("1.0.0", [("nibabel.nosuchmod", "anyobj", "anyattr"), + ("nibabel.nifti1", "nosuchobj", "anyattr"), + ("nibabel.nifti1", "Nifti1Image", "nosuchattr")]), +] + + +def _filter(schedule): + return [entry for ver, entries in schedule if cmp_pkg_version(ver) < 1 for entry in entries] + + +def test_module_removal(): + for module in _filter(MODULE_SCHEDULE): + with pytest.raises(ImportError): + __import__(module) + assert False, "Time to remove %s" % module + + +def test_object_removal(): + for module_name, obj in _filter(OBJECT_SCHEDULE): + try: + module = __import__(module_name) + except ImportError: + continue + assert not hasattr(module, obj), "Time to remove %s.%s" % (module_name, obj,) + + +def test_attribute_removal(): + for module_name, cls, attr in _filter(ATTRIBUTE_SCHEDULE): + try: + module = __import__(module_name) + except ImportError: + continue + try: + klass = getattr(module, cls) + except AttributeError: + continue + assert not hasattr(klass, attr), "Time to remove %s.%s.%s" % (module_name, cls, attr,) + + +# +# Test the tests, making sure that we will get errors when the time comes +# + +_sched = "nibabel.tests.test_removalschedule.{}_SCHEDULE".format + + +@mock.patch(_sched("MODULE"), [("3.0.0", ["nibabel.nifti1"])]) +def test_unremoved_module(): + with pytest.raises(AssertionError): + test_module_removal() + + +@mock.patch(_sched("OBJECT"), [("3.0.0", [("nibabel.nifti1", "Nifti1Image")])]) +def test_unremoved_object(): + with pytest.raises(AssertionError): + test_object_removal() + + +@mock.patch(_sched("ATTRIBUTE"), [("3.0.0", [("nibabel.nifti1", "Nifti1Image", "affine")])]) +def test_unremoved_attr(): + with pytest.raises(AssertionError): + test_attribute_removal() diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index d216a03cdd..79d785932d 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -11,8 +11,6 @@ from ..arraywriters import ScalingError from ..casting import best_float, ulp, type_info -from nose.tools import assert_true - from numpy.testing import assert_array_equal DEBUG = True @@ -25,7 +23,7 @@ def round_trip(arr, out_dtype): img.to_file_map() back = Nifti1Image.from_file_map(img.file_map) # Recover array and calculated scaling from array proxy object - return back.get_data(), back.dataobj.slope, back.dataobj.inter + return back.get_fdata(), back.dataobj.slope, back.dataobj.inter def check_params(in_arr, in_type, out_type): @@ -193,4 +191,4 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): slope, inter) # To help debugging failures with --pdb-failure np.nonzero(all_fails) - assert_true(this_test) + assert this_test diff --git a/nibabel/tests/test_rstutils.py b/nibabel/tests/test_rstutils.py index 9fd708ba64..4fb83d3170 100644 --- a/nibabel/tests/test_rstutils.py +++ b/nibabel/tests/test_rstutils.py @@ -1,59 +1,50 @@ """ Test printable table """ -from __future__ import division, print_function -import sys import numpy as np from ..rstutils import rst_table -from nose import SkipTest -from nose.tools import assert_equal, assert_raises +import pytest def test_rst_table(): # Tests for printable table function R, C = 3, 4 cell_values = np.arange(R * C).reshape((R, C)) - if (sys.version_info[:3] == (3, 2, 3) and np.__version__ == '1.6.1'): - raise SkipTest("Known (later fixed) bug in python3.2/numpy " - "treating np.int64 as str") - assert_equal(rst_table(cell_values), + assert (rst_table(cell_values) == """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | -+--------+--------+--------+--------+--------+""" - ) - assert_equal(rst_table(cell_values, ['a', 'b', 'c']), ++--------+--------+--------+--------+--------+""") + assert (rst_table(cell_values, ['a', 'b', 'c']) == """+---+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +===+========+========+========+========+ | a | 0.00 | 1.00 | 2.00 | 3.00 | | b | 4.00 | 5.00 | 6.00 | 7.00 | | c | 8.00 | 9.00 | 10.00 | 11.00 | -+---+--------+--------+--------+--------+""" - ) - assert_raises(ValueError, - rst_table, cell_values, ['a', 'b']) - assert_raises(ValueError, - rst_table, cell_values, ['a', 'b', 'c', 'd']) - assert_equal(rst_table(cell_values, None, ['1', '2', '3', '4']), ++---+--------+--------+--------+--------+""") + with pytest.raises(ValueError): + rst_table(cell_values, ['a', 'b']) + with pytest.raises(ValueError): + rst_table(cell_values, ['a', 'b', 'c', 'd']) + assert (rst_table(cell_values, None, ['1', '2', '3', '4']) == """+--------+-------+-------+-------+-------+ | | 1 | 2 | 3 | 4 | +========+=======+=======+=======+=======+ | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | -+--------+-------+-------+-------+-------+""" - ) - assert_raises(ValueError, - rst_table, cell_values, None, ['1', '2', '3']) - assert_raises(ValueError, - rst_table, cell_values, None, list('12345')) - assert_equal(rst_table(cell_values, title='A title'), ++--------+-------+-------+-------+-------+""") + with pytest.raises(ValueError): + rst_table(cell_values, None, ['1', '2', '3']) + with pytest.raises(ValueError): + rst_table(cell_values, None, list('12345')) + assert (rst_table(cell_values, title='A title') == """******* A title ******* @@ -64,29 +55,26 @@ def test_rst_table(): | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | -+--------+--------+--------+--------+--------+""" - ) - assert_equal(rst_table(cell_values, val_fmt='{0}'), ++--------+--------+--------+--------+--------+""") + assert (rst_table(cell_values, val_fmt='{0}') == """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0 | 1 | 2 | 3 | | row[1] | 4 | 5 | 6 | 7 | | row[2] | 8 | 9 | 10 | 11 | -+--------+--------+--------+--------+--------+""" - ) ++--------+--------+--------+--------+--------+""") # Doing a fancy cell format cell_values_back = np.arange(R * C)[::-1].reshape((R, C)) cell_3d = np.dstack((cell_values, cell_values_back)) - assert_equal(rst_table(cell_3d, val_fmt='{0[0]}-{0[1]}'), + assert (rst_table(cell_3d, val_fmt='{0[0]}-{0[1]}') == """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0-11 | 1-10 | 2-9 | 3-8 | | row[1] | 4-7 | 5-6 | 6-5 | 7-4 | | row[2] | 8-3 | 9-2 | 10-1 | 11-0 | -+--------+--------+--------+--------+--------+""" - ) ++--------+--------+--------+--------+--------+""") # Test formatting characters formats = dict( down='!', @@ -94,7 +82,7 @@ def test_rst_table(): thick_long='~', cross='%', title_heading='#') - assert_equal(rst_table(cell_values, title='A title', format_chars=formats), + assert (rst_table(cell_values, title='A title', format_chars=formats) == """####### A title ####### @@ -105,10 +93,7 @@ def test_rst_table(): ! row[0] ! 0.00 ! 1.00 ! 2.00 ! 3.00 ! ! row[1] ! 4.00 ! 5.00 ! 6.00 ! 7.00 ! ! row[2] ! 8.00 ! 9.00 ! 10.00 ! 11.00 ! -%________%________%________%________%________%""" - ) +%________%________%________%________%________%""") formats['funny_value'] = '!' - assert_raises(ValueError, - rst_table, - cell_values, title='A title', format_chars=formats) - return + with pytest.raises(ValueError): + rst_table(cell_values, title='A title', format_chars=formats) diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index d318c9f810..f314e6b572 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -7,173 +7,105 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Test for scaling / rounding in volumeutils module ''' -from __future__ import division, print_function, absolute_import import numpy as np from io import BytesIO -from ..volumeutils import (calculate_scale, scale_min_max, finite_range, - apply_read_scaling, array_to_file, array_from_file) +from ..volumeutils import finite_range, apply_read_scaling, array_to_file, array_from_file from ..casting import type_info from ..testing import suppress_warnings -from numpy.testing import (assert_array_almost_equal, assert_array_equal) +from .test_volumeutils import _calculate_scale -from nose.tools import (assert_true, assert_equal, assert_raises, - assert_not_equal) +from numpy.testing import (assert_array_almost_equal, assert_array_equal) +import pytest # Debug print statements DEBUG = True -def test_scale_min_max(): - mx_dt = np.maximum_sctype(np.float) - for tp in np.sctypes['uint'] + np.sctypes['int']: - info = np.iinfo(tp) - # Need to pump up to max fp type to contain python longs - imin = np.array(info.min, dtype=mx_dt) - imax = np.array(info.max, dtype=mx_dt) - value_pairs = ( - (0, imax), - (imin, 0), - (imin, imax), - (1, 10), - (-1, -1), - (1, 1), - (-10, -1), - (-100, 10)) - for mn, mx in value_pairs: - # with intercept - scale, inter = scale_min_max(mn, mx, tp, True) - if mx - mn: - assert_array_almost_equal, (mx - inter) / scale, imax - assert_array_almost_equal, (mn - inter) / scale, imin - else: - assert_equal, (scale, inter), (1.0, mn) - # without intercept - if imin == 0 and mn < 0 and mx > 0: - (assert_raises, ValueError, - scale_min_max, mn, mx, tp, False) - continue - scale, inter = scale_min_max(mn, mx, tp, False) - assert_equal, inter, 0.0 - if mn == 0 and mx == 0: - assert_equal, scale, 1.0 - continue - sc_mn = mn / scale - sc_mx = mx / scale - assert_true, sc_mn >= imin - assert_true, sc_mx <= imax - if imin == 0: - if mx > 0: # numbers all +ve - assert_array_almost_equal, mx / scale, imax - else: # numbers all -ve - assert_array_almost_equal, mn / scale, imax - continue - if abs(mx) >= abs(mn): - assert_array_almost_equal, mx / scale, imax - else: - assert_array_almost_equal, mn / scale, imin - - -def test_finite_range(): +@pytest.mark.parametrize("in_arr, res", [ + ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), + (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), + ([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices + (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), + ([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices + (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), + ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), + ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), + ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case + ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), + ([np.nan], (np.inf, -np.inf)), + ([np.inf], (np.inf, -np.inf)), + ([-np.inf], (np.inf, -np.inf)), + ([np.inf, 1], (1, 1)), # only look at finite values + ([-np.inf, 1], (1, 1)), + ([[], []], (np.inf, -np.inf)), # empty array + (np.array([[-3, 0, 1], [2, -1, 4]], dtype=np.int), (-3, 4)), + (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), + ([0., 1, 2, 3], (0, 3)), + # Complex comparison works as if they are floats + ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), + ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), +]) +def test_finite_range(in_arr, res): # Finite range utility function - for in_arr, res in ( - ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), - (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), - ([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices - (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), - ([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices - (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), - ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), - ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), - ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case - ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), - ([np.nan], (np.inf, -np.inf)), - ([np.inf], (np.inf, -np.inf)), - ([-np.inf], (np.inf, -np.inf)), - ([np.inf, 1], (1, 1)), # only look at finite values - ([-np.inf, 1], (1, 1)), - ([[], []], (np.inf, -np.inf)), # empty array - (np.array([[-3, 0, 1], [2, -1, 4]], dtype=np.int), (-3, 4)), - (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), - ([0., 1, 2, 3], (0, 3)), - # Complex comparison works as if they are floats - ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), - ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), - ): - assert_equal(finite_range(in_arr), res) - assert_equal(finite_range(in_arr, False), res) - assert_equal(finite_range(in_arr, check_nan=False), res) - has_nan = np.any(np.isnan(in_arr)) - assert_equal(finite_range(in_arr, True), res + (has_nan,)) - assert_equal(finite_range(in_arr, check_nan=True), res + (has_nan,)) - in_arr = np.array(in_arr) - flat_arr = in_arr.ravel() - assert_equal(finite_range(flat_arr), res) - assert_equal(finite_range(flat_arr, True), res + (has_nan,)) - # Check float types work as complex - if in_arr.dtype.kind == 'f': - c_arr = in_arr.astype(np.complex) - assert_equal(finite_range(c_arr), res) - assert_equal(finite_range(c_arr, True), res + (has_nan,)) + assert finite_range(in_arr) == res + assert finite_range(in_arr, False) == res + assert finite_range(in_arr, check_nan=False) == res + has_nan = np.any(np.isnan(in_arr)) + assert finite_range(in_arr, True) == res + (has_nan,) + assert finite_range(in_arr, check_nan=True) == res + (has_nan,) + in_arr = np.array(in_arr) + flat_arr = in_arr.ravel() + assert finite_range(flat_arr) == res + assert finite_range(flat_arr, True) == res + (has_nan,) + # Check float types work as complex + if in_arr.dtype.kind == 'f': + c_arr = in_arr.astype(np.complex) + assert finite_range(c_arr) == res + assert finite_range(c_arr, True) == res + (has_nan,) + + +def test_finite_range_err(): # Test error cases a = np.array([[1., 0, 1], [2, 3, 4]]).view([('f1', 'f')]) - assert_raises(TypeError, finite_range, a) - - -def test_calculate_scale(): - # Test for special cases in scale calculation - npa = np.array - # Here the offset handles it - res = calculate_scale(npa([-2, -1], dtype=np.int8), np.uint8, True) - assert_equal(res, (1.0, -2.0, None, None)) - # Not having offset not a problem obviously - res = calculate_scale(npa([-2, -1], dtype=np.int8), np.uint8, 0) - assert_equal(res, (-1.0, 0.0, None, None)) - # Case where offset handles scaling - res = calculate_scale(npa([-1, 1], dtype=np.int8), np.uint8, 1) - assert_equal(res, (1.0, -1.0, None, None)) - # Can't work for no offset case - assert_raises(ValueError, - calculate_scale, npa([-1, 1], dtype=np.int8), np.uint8, 0) - # Offset trick can't work when max is out of range - res = calculate_scale(npa([-1, 255], dtype=np.int16), np.uint8, 1) - assert_not_equal(res, (1.0, -1.0, None, None)) + with pytest.raises(TypeError): + finite_range(a) -def test_a2f_mn_mx(): +@pytest.mark.parametrize("out_type", [np.int16, np.float32]) +def test_a2f_mn_mx(out_type): # Test array to file mn, mx handling str_io = BytesIO() - for out_type in (np.int16, np.float32): - arr = np.arange(6, dtype=out_type) - arr_orig = arr.copy() # safe backup for testing against - # Basic round trip to warm up - array_to_file(arr, str_io) - data_back = array_from_file(arr.shape, out_type, str_io) - assert_array_equal(arr, data_back) - # Clip low - array_to_file(arr, str_io, mn=2) - data_back = array_from_file(arr.shape, out_type, str_io) - # arr unchanged - assert_array_equal(arr, arr_orig) - # returned value clipped low - assert_array_equal(data_back, [2, 2, 2, 3, 4, 5]) - # Clip high - array_to_file(arr, str_io, mx=4) - data_back = array_from_file(arr.shape, out_type, str_io) - # arr unchanged - assert_array_equal(arr, arr_orig) - # returned value clipped high - assert_array_equal(data_back, [0, 1, 2, 3, 4, 4]) - # Clip both - array_to_file(arr, str_io, mn=2, mx=4) - data_back = array_from_file(arr.shape, out_type, str_io) - # arr unchanged - assert_array_equal(arr, arr_orig) - # returned value clipped high - assert_array_equal(data_back, [2, 2, 2, 3, 4, 4]) + arr = np.arange(6, dtype=out_type) + arr_orig = arr.copy() # safe backup for testing against + # Basic round trip to warm up + array_to_file(arr, str_io) + data_back = array_from_file(arr.shape, out_type, str_io) + assert_array_equal(arr, data_back) + # Clip low + array_to_file(arr, str_io, mn=2) + data_back = array_from_file(arr.shape, out_type, str_io) + # arr unchanged + assert_array_equal(arr, arr_orig) + # returned value clipped low + assert_array_equal(data_back, [2, 2, 2, 3, 4, 5]) + # Clip high + array_to_file(arr, str_io, mx=4) + data_back = array_from_file(arr.shape, out_type, str_io) + # arr unchanged + assert_array_equal(arr, arr_orig) + # returned value clipped high + assert_array_equal(data_back, [0, 1, 2, 3, 4, 4]) + # Clip both + array_to_file(arr, str_io, mn=2, mx=4) + data_back = array_from_file(arr.shape, out_type, str_io) + # arr unchanged + assert_array_equal(arr, arr_orig) + # returned value clipped high + assert_array_equal(data_back, [2, 2, 2, 3, 4, 4]) def test_a2f_nan2zero(): @@ -199,54 +131,51 @@ def test_a2f_nan2zero(): assert_array_equal(data_back, [np.array(np.nan).astype(np.int32), 99]) -def test_array_file_scales(): +@pytest.mark.parametrize("in_type, out_type", [ + (np.int16, np.int16), + (np.int16, np.int8), + (np.uint16, np.uint8), + (np.int32, np.int8), + (np.float32, np.uint8), + (np.float32, np.int16) +]) +def test_array_file_scales(in_type, out_type): # Test scaling works for max, min when going from larger to smaller type, # and from float to integer. bio = BytesIO() - for in_type, out_type, err in ((np.int16, np.int16, None), - (np.int16, np.int8, None), - (np.uint16, np.uint8, None), - (np.int32, np.int8, None), - (np.float32, np.uint8, None), - (np.float32, np.int16, None)): - out_dtype = np.dtype(out_type) - arr = np.zeros((3,), dtype=in_type) - info = type_info(in_type) - arr[0], arr[1] = info['min'], info['max'] - if not err is None: - assert_raises(err, calculate_scale, arr, out_dtype, True) - continue - slope, inter, mn, mx = calculate_scale(arr, out_dtype, True) - array_to_file(arr, bio, out_type, 0, inter, slope, mn, mx) - bio.seek(0) - arr2 = array_from_file(arr.shape, out_dtype, bio) - arr3 = apply_read_scaling(arr2, slope, inter) - # Max rounding error for integer type - max_miss = slope / 2. - assert_true(np.all(np.abs(arr - arr3) <= max_miss)) - bio.truncate(0) - bio.seek(0) - - -def test_scaling_in_abstract(): + out_dtype = np.dtype(out_type) + arr = np.zeros((3,), dtype=in_type) + info = type_info(in_type) + arr[0], arr[1] = info['min'], info['max'] + slope, inter, mn, mx = _calculate_scale(arr, out_dtype, True) + array_to_file(arr, bio, out_type, 0, inter, slope, mn, mx) + bio.seek(0) + arr2 = array_from_file(arr.shape, out_dtype, bio) + arr3 = apply_read_scaling(arr2, slope, inter) + # Max rounding error for integer type + max_miss = slope / 2. + assert np.all(np.abs(arr - arr3) <= max_miss) + + +@pytest.mark.parametrize("category0, category1, overflow",[ # Confirm that, for all ints and uints as input, and all possible outputs, # for any simple way of doing the calculation, the result is near enough - for category0, category1 in (('int', 'int'), - ('uint', 'int'), - ): - for in_type in np.sctypes[category0]: - for out_type in np.sctypes[category1]: - check_int_a2f(in_type, out_type) + ('int', 'int', False), + ('uint', 'int', False), # Converting floats to integer - for category0, category1 in (('float', 'int'), - ('float', 'uint'), - ('complex', 'int'), - ('complex', 'uint'), - ): - for in_type in np.sctypes[category0]: - for out_type in np.sctypes[category1]: - with suppress_warnings(): # overflow + ('float', 'int', True), + ('float', 'uint', True), + ('complex', 'int', True), + ('complex', 'uint', True), +]) +def test_scaling_in_abstract(category0, category1, overflow): + for in_type in np.sctypes[category0]: + for out_type in np.sctypes[category1]: + if overflow: + with suppress_warnings(): check_int_a2f(in_type, out_type) + else: + check_int_a2f(in_type, out_type) def check_int_a2f(in_type, out_type): @@ -267,7 +196,7 @@ def check_int_a2f(in_type, out_type): data[1] = this_max + 0j str_io = BytesIO() try: - scale, inter, mn, mx = calculate_scale(data, out_type, True) + scale, inter, mn, mx = _calculate_scale(data, out_type, True) except ValueError as e: if DEBUG: print(in_type, out_type, e) @@ -275,7 +204,7 @@ def check_int_a2f(in_type, out_type): array_to_file(data, str_io, out_type, 0, inter, scale, mn, mx) data_back = array_from_file(data.shape, out_type, str_io) data_back = apply_read_scaling(data_back, scale, inter) - assert_true(np.allclose(big_floater(data), big_floater(data_back))) + assert np.allclose(big_floater(data), big_floater(data_back)) # Try with analyze-size scale and inter scale32 = np.float32(scale) inter32 = np.float32(inter) @@ -286,5 +215,4 @@ def check_int_a2f(in_type, out_type): # Clip at extremes to remove inf info = type_info(in_type) out_min, out_max = info['min'], info['max'] - assert_true(np.allclose(big_floater(data), - big_floater(np.clip(data_back, out_min, out_max)))) + assert np.allclose(big_floater(data), big_floater(np.clip(data_back, out_min, out_max))) diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 2c17c33fd1..d15403a881 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -4,7 +4,6 @@ Test running scripts """ -from __future__ import division, print_function, absolute_import import sys import os @@ -21,10 +20,9 @@ from ..loadsave import load from ..orientations import flip_axis, aff2axcodes, inv_ornt_aff -from nose.tools import assert_true, assert_false, assert_equal -from nose import SkipTest - -from numpy.testing import assert_almost_equal, assert_array_equal +import unittest +import pytest +from numpy.testing import assert_almost_equal from .scriptrunner import ScriptRunner from .nibabel_data import needs_nibabel_data @@ -32,7 +30,7 @@ from .test_parrec import (DTI_PAR_BVECS, DTI_PAR_BVALS, EXAMPLE_IMAGES as PARREC_EXAMPLES) from .test_parrec_data import BALLS, AFF_OFF -from .test_helpers import assert_data_similar +from ..testing import assert_data_similar def _proc_stdout(stdout): @@ -56,6 +54,14 @@ def script_test(func): DATA_PATH = abspath(pjoin(dirname(__file__), 'data')) +def load_small_file(): + try: + load(pjoin(DATA_PATH, 'small.mnc')) + return True + except: + return False + + def check_nib_ls_example4d(opts=[], hdrs_str="", other_str=""): # test nib-ls script fname = pjoin(DATA_PATH, 'example4d.nii.gz') @@ -64,7 +70,7 @@ def check_nib_ls_example4d(opts=[], hdrs_str="", other_str=""): % (hdrs_str, other_str)) cmd = ['nib-ls'] + opts + [fname] code, stdout, stderr = run_command(cmd) - assert_equal(fname, stdout[:len(fname)]) + assert fname == stdout[:len(fname)] assert_re_in(expected_re, stdout[len(fname):]) @@ -77,45 +83,45 @@ def check_nib_diff_examples(): "quatern_c", "quatern_d", "qoffset_x", "qoffset_y", "qoffset_z", "srow_x", "srow_y", "srow_z", "DATA(md5)", "DATA(diff 1:)"] for item in checked_fields: - assert_true(item in stdout) + assert item in stdout fnames2 = [pjoin(DATA_PATH, f) for f in ('example4d.nii.gz', 'example4d.nii.gz')] code, stdout, stderr = run_command(['nib-diff'] + fnames2, check_code=False) - assert_equal(stdout, "These files are identical.") + assert stdout == "These files are identical." fnames3 = [pjoin(DATA_PATH, f) for f in ('standard.nii.gz', 'example4d.nii.gz', 'example_nifti2.nii.gz')] code, stdout, stderr = run_command(['nib-diff'] + fnames3, check_code=False) for item in checked_fields: - assert_true(item in stdout) + assert item in stdout fnames4 = [pjoin(DATA_PATH, f) for f in ('standard.nii.gz', 'standard.nii.gz', 'standard.nii.gz')] code, stdout, stderr = run_command(['nib-diff'] + fnames4, check_code=False) - assert_equal(stdout, "These files are identical.") + assert stdout == "These files are identical." code, stdout, stderr = run_command(['nib-diff', '--dt', 'float64'] + fnames, check_code=False) for item in checked_fields: - assert_true(item in stdout) + assert item in stdout - -@script_test -def test_nib_ls(): - yield check_nib_ls_example4d - yield check_nib_ls_example4d, \ - ['-H', 'dim,bitpix'], " \[ 4 128 96 24 2 1 1 1\] 16" - yield check_nib_ls_example4d, ['-c'], "", " !1030 uniques. Use --all-counts" - yield check_nib_ls_example4d, ['-c', '--all-counts'], "", " 2:3 3:2 4:1 5:1.*" +@pytest.mark.parametrize("args", [ + [], + [['-H', 'dim,bitpix'], " \[ 4 128 96 24 2 1 1 1\] 16"], + [['-c'], "", " !1030 uniques. Use --all-counts"], + [['-c', '--all-counts'], "", " 2:3 3:2 4:1 5:1.*"], # both stats and counts - yield check_nib_ls_example4d, \ - ['-c', '-s', '--all-counts'], "", " \[229725\] \[2, 1.2e\+03\] 2:3 3:2 4:1 5:1.*" + [['-c', '-s', '--all-counts'], "", " \[229725\] \[2, 1.2e\+03\] 2:3 3:2 4:1 5:1.*"], # and must not error out if we allow for zeros - yield check_nib_ls_example4d, \ - ['-c', '-s', '-z', '--all-counts'], "", " \[589824\] \[0, 1.2e\+03\] 0:360099 2:3 3:2 4:1 5:1.*" + [['-c', '-s', '-z', '--all-counts'], "", " \[589824\] \[0, 1.2e\+03\] 0:360099 2:3 3:2 4:1 5:1.*"], +]) +@script_test +def test_nib_ls(args): + check_nib_ls_example4d(*args) +@unittest.skipUnless(load_small_file(), "Can't load the small.mnc file") @script_test def test_nib_ls_multiple(): # verify that correctly lists/formats for multiple files @@ -126,42 +132,35 @@ def test_nib_ls_multiple(): ] code, stdout, stderr = run_command(['nib-ls'] + fnames) stdout_lines = stdout.split('\n') - assert_equal(len(stdout_lines), 4) - try: - load(pjoin(DATA_PATH, 'small.mnc')) - except: - raise SkipTest("For the other tests should be able to load MINC files") + assert len(stdout_lines) == 4 # they should be indented correctly. Since all files are int type - ln = max(len(f) for f in fnames) i_str = ' i' if sys.byteorder == 'little' else ' -TINY)) - assert_true(np.all(out_grid < np.array(out_shape) + TINY)) + assert np.all(out_grid > -TINY) + assert np.all(out_grid < np.array(out_shape) + TINY) def get_outspace_params(): @@ -84,23 +80,27 @@ def test_vox2out_vox(): # Test world space bounding box # Test basic case, identity, no voxel sizes passed shape, aff = vox2out_vox(((2, 3, 4), np.eye(4))) - assert_array_equal(shape, (2, 3, 4)) - assert_array_equal(aff, np.eye(4)) + assert shape == (2, 3, 4) + assert (aff == np.eye(4)).all() for in_shape, in_aff, vox, out_shape, out_aff in get_outspace_params(): img = Nifti1Image(np.ones(in_shape), in_aff) for input in ((in_shape, in_aff), img): shape, aff = vox2out_vox(input, vox) assert_all_in(in_shape, in_aff, shape, aff) - assert_equal(shape, out_shape) + assert shape == out_shape assert_almost_equal(aff, out_aff) - assert_true(isinstance(shape, tuple)) - assert_true(isinstance(shape[0], int)) + assert isinstance(shape, tuple) + assert isinstance(shape[0], int) # Enforce number of axes - assert_raises(ValueError, vox2out_vox, ((2, 3, 4, 5), np.eye(4))) - assert_raises(ValueError, vox2out_vox, ((2, 3, 4, 5, 6), np.eye(4))) + with pytest.raises(ValueError): + vox2out_vox(((2, 3, 4, 5), np.eye(4))) + with pytest.raises(ValueError): + vox2out_vox(((2, 3, 4, 5, 6), np.eye(4))) # Voxel sizes must be positive - assert_raises(ValueError, vox2out_vox, ((2, 3, 4), np.eye(4), [-1, 1, 1])) - assert_raises(ValueError, vox2out_vox, ((2, 3, 4), np.eye(4), [1, 0, 1])) + with pytest.raises(ValueError): + vox2out_vox(((2, 3, 4), np.eye(4), [-1, 1, 1])) + with pytest.raises(ValueError): + vox2out_vox(((2, 3, 4), np.eye(4), [1, 0, 1])) def test_slice2volume(): @@ -112,7 +112,14 @@ def test_slice2volume(): for val in (0, 5, 10): exp_aff = np.array(def_aff) exp_aff[axis, -1] = val - assert_array_equal(slice2volume(val, axis), exp_aff) - assert_raises(ValueError, slice2volume, -1, 0) - assert_raises(ValueError, slice2volume, 0, -1) - assert_raises(ValueError, slice2volume, 0, 3) + assert (slice2volume(val, axis) == exp_aff).all() + + +@pytest.mark.parametrize("index, axis", [ + [-1, 0], + [0, -1], + [0, 3] +]) +def test_slice2volume_exception(index, axis): + with pytest.raises(ValueError): + slice2volume(index, axis) \ No newline at end of file diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index b0f571023d..58b41d5822 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -15,57 +15,59 @@ import numpy as np from io import BytesIO -from ..spatialimages import (SpatialHeader, SpatialImage, HeaderDataError, - Header, ImageDataError) +from ..spatialimages import SpatialHeader, SpatialImage, HeaderDataError, Header from ..imageclasses import spatial_axes_first +import pytest from unittest import TestCase -from nose.tools import (assert_true, assert_false, assert_equal, - assert_not_equal, assert_raises) -from numpy.testing import assert_array_equal, assert_array_almost_equal +from numpy.testing import assert_array_almost_equal + +from ..testing import ( + bytesio_round_trip, + clear_and_catch_warnings, + suppress_warnings, + memmap_after_ufunc +) -from .test_helpers import bytesio_round_trip -from ..testing import (clear_and_catch_warnings, suppress_warnings, - memmap_after_ufunc) from ..tmpdirs import InTemporaryDirectory +from ..deprecator import ExpiredDeprecationError from .. import load as top_load - def test_header_init(): # test the basic header hdr = Header() - assert_equal(hdr.get_data_dtype(), np.dtype(np.float32)) - assert_equal(hdr.get_data_shape(), (0,)) - assert_equal(hdr.get_zooms(), (1.0,)) + assert hdr.get_data_dtype() == np.dtype(np.float32) + assert hdr.get_data_shape() == (0,) + assert hdr.get_zooms() == (1.0,) hdr = Header(np.float64) - assert_equal(hdr.get_data_dtype(), np.dtype(np.float64)) - assert_equal(hdr.get_data_shape(), (0,)) - assert_equal(hdr.get_zooms(), (1.0,)) + assert hdr.get_data_dtype() == np.dtype(np.float64) + assert hdr.get_data_shape() == (0,) + assert hdr.get_zooms() == (1.0,) hdr = Header(np.float64, shape=(1, 2, 3)) - assert_equal(hdr.get_data_dtype(), np.dtype(np.float64)) - assert_equal(hdr.get_data_shape(), (1, 2, 3)) - assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0)) + assert hdr.get_data_dtype() == np.dtype(np.float64) + assert hdr.get_data_shape() == (1, 2, 3) + assert hdr.get_zooms() == (1.0, 1.0, 1.0) hdr = Header(np.float64, shape=(1, 2, 3), zooms=None) - assert_equal(hdr.get_data_dtype(), np.dtype(np.float64)) - assert_equal(hdr.get_data_shape(), (1, 2, 3)) - assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0)) + assert hdr.get_data_dtype() == np.dtype(np.float64) + assert hdr.get_data_shape() == (1, 2, 3) + assert hdr.get_zooms() == (1.0, 1.0, 1.0) hdr = Header(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) - assert_equal(hdr.get_data_dtype(), np.dtype(np.float64)) - assert_equal(hdr.get_data_shape(), (1, 2, 3)) - assert_equal(hdr.get_zooms(), (3.0, 2.0, 1.0)) + assert hdr.get_data_dtype() == np.dtype(np.float64) + assert hdr.get_data_shape() == (1, 2, 3) + assert hdr.get_zooms() == (3.0, 2.0, 1.0) def test_from_header(): # check from header class method. Note equality checks below, # equality methods used here too. empty = Header.from_header() - assert_equal(Header(), empty) + assert Header() == empty empty = Header.from_header(None) - assert_equal(Header(), empty) + assert Header() == empty hdr = Header(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) copy = Header.from_header(hdr) - assert_equal(hdr, copy) - assert_false(hdr is copy) + assert hdr == copy + assert hdr is not copy class C(object): @@ -75,25 +77,25 @@ def get_data_shape(self): return (5, 4, 3) def get_zooms(self): return (10.0, 9.0, 8.0) converted = Header.from_header(C()) - assert_true(isinstance(converted, Header)) - assert_equal(converted.get_data_dtype(), np.dtype('u2')) - assert_equal(converted.get_data_shape(), (5, 4, 3)) - assert_equal(converted.get_zooms(), (10.0, 9.0, 8.0)) + assert isinstance(converted, Header) + assert converted.get_data_dtype() == np.dtype('u2') + assert converted.get_data_shape() == (5, 4, 3) + assert converted.get_zooms() == (10.0, 9.0, 8.0) def test_eq(): hdr = Header() other = Header() - assert_equal(hdr, other) + assert hdr == other other = Header('u2') - assert_not_equal(hdr, other) + assert hdr != other other = Header(shape=(1, 2, 3)) - assert_not_equal(hdr, other) + assert hdr != other hdr = Header(shape=(1, 2)) other = Header(shape=(1, 2)) - assert_equal(hdr, other) + assert hdr == other other = Header(shape=(1, 2), zooms=(2.0, 3.0)) - assert_not_equal(hdr, other) + assert hdr != other def test_copy(): @@ -101,51 +103,50 @@ def test_copy(): hdr = Header(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) hdr_copy = hdr.copy() hdr.set_data_shape((4, 5, 6)) - assert_equal(hdr.get_data_shape(), (4, 5, 6)) - assert_equal(hdr_copy.get_data_shape(), (1, 2, 3)) + assert hdr.get_data_shape() == (4, 5, 6) + assert hdr_copy.get_data_shape() == (1, 2, 3) hdr.set_zooms((4, 5, 6)) - assert_equal(hdr.get_zooms(), (4, 5, 6)) - assert_equal(hdr_copy.get_zooms(), (3, 2, 1)) + assert hdr.get_zooms() == (4, 5, 6) + assert hdr_copy.get_zooms() == (3, 2, 1) hdr.set_data_dtype(np.uint8) - assert_equal(hdr.get_data_dtype(), np.dtype(np.uint8)) - assert_equal(hdr_copy.get_data_dtype(), np.dtype(np.float64)) + assert hdr.get_data_dtype() == np.dtype(np.uint8) + assert hdr_copy.get_data_dtype() == np.dtype(np.float64) def test_shape_zooms(): hdr = Header() hdr.set_data_shape((1, 2, 3)) - assert_equal(hdr.get_data_shape(), (1, 2, 3)) - assert_equal(hdr.get_zooms(), (1.0, 1.0, 1.0)) + assert hdr.get_data_shape() == (1, 2, 3) + assert hdr.get_zooms() == (1.0, 1.0, 1.0) hdr.set_zooms((4, 3, 2)) - assert_equal(hdr.get_zooms(), (4.0, 3.0, 2.0)) + assert hdr.get_zooms() == (4.0, 3.0, 2.0) hdr.set_data_shape((1, 2)) - assert_equal(hdr.get_data_shape(), (1, 2)) - assert_equal(hdr.get_zooms(), (4.0, 3.0)) + assert hdr.get_data_shape() == (1, 2) + assert hdr.get_zooms() == (4.0, 3.0) hdr.set_data_shape((1, 2, 3)) - assert_equal(hdr.get_data_shape(), (1, 2, 3)) - assert_equal(hdr.get_zooms(), (4.0, 3.0, 1.0)) + assert hdr.get_data_shape() == (1, 2, 3) + assert hdr.get_zooms() == (4.0, 3.0, 1.0) # null shape is (0,) hdr.set_data_shape(()) - assert_equal(hdr.get_data_shape(), (0,)) - assert_equal(hdr.get_zooms(), (1.0,)) + assert hdr.get_data_shape() == (0,) + assert hdr.get_zooms() == (1.0,) # zooms of wrong lengths raise error - assert_raises(HeaderDataError, hdr.set_zooms, (4.0, 3.0)) - assert_raises(HeaderDataError, - hdr.set_zooms, - (4.0, 3.0, 2.0, 1.0)) + with pytest.raises(HeaderDataError): + hdr.set_zooms((4.0, 3.0)) + with pytest.raises(HeaderDataError): + hdr.set_zooms((4.0, 3.0, 2.0, 1.0)) # as do negative zooms - assert_raises(HeaderDataError, - hdr.set_zooms, - (4.0, 3.0, -2.0)) + with pytest.raises(HeaderDataError): + hdr.set_zooms((4.0, 3.0, -2.0)) def test_data_dtype(): hdr = Header() - assert_equal(hdr.get_data_dtype(), np.dtype(np.float32)) + assert hdr.get_data_dtype() == np.dtype(np.float32) hdr.set_data_dtype(np.float64) - assert_equal(hdr.get_data_dtype(), np.dtype(np.float64)) + assert hdr.get_data_dtype() == np.dtype(np.float64) hdr.set_data_dtype('u2') - assert_equal(hdr.get_data_dtype(), np.dtype(np.uint16)) + assert hdr.get_data_dtype() == np.dtype(np.uint16) def test_affine(): @@ -161,8 +162,7 @@ def test_affine(): [0, 2, 0, -1], [0, 0, 1, -1], [0, 0, 0, 1]]) - assert_array_equal(hdr.get_base_affine(), - hdr.get_best_affine()) + assert np.array_equal(hdr.get_base_affine(), hdr.get_best_affine()) def test_read_data(): @@ -173,30 +173,27 @@ class CHeader(SpatialHeader): fobj = BytesIO() data = np.arange(6).reshape((1, 2, 3)) hdr.data_to_fileobj(data, fobj) - assert_equal(fobj.getvalue(), - data.astype(np.int32).tostring(order=order)) + assert fobj.getvalue() == data.astype(np.int32).tostring(order=order) # data_to_fileobj accepts kwarg 'rescale', but no effect in this case fobj.seek(0) hdr.data_to_fileobj(data, fobj, rescale=True) - assert_equal(fobj.getvalue(), - data.astype(np.int32).tostring(order=order)) + assert fobj.getvalue() == data.astype(np.int32).tostring(order=order) # data_to_fileobj can be a list fobj.seek(0) hdr.data_to_fileobj(data.tolist(), fobj, rescale=True) - assert_equal(fobj.getvalue(), - data.astype(np.int32).tostring(order=order)) + assert fobj.getvalue() == data.astype(np.int32).tostring(order=order) # Read data back again fobj.seek(0) data2 = hdr.data_from_fileobj(fobj) - assert_array_equal(data, data2) + assert (data == data2).all() class DataLike(object): # Minimal class implementing 'data' API shape = (3,) - def __array__(self): - return np.arange(3, dtype=np.int16) + def __array__(self, dtype='int16'): + return np.arange(3, dtype=dtype) class TestSpatialImage(TestCase): @@ -210,33 +207,33 @@ def test_isolation(self): arr = np.arange(24, dtype=np.int16).reshape((2, 3, 4)) aff = np.eye(4) img = img_klass(arr, aff) - assert_array_equal(img.affine, aff) + assert (img.affine == aff).all() aff[0, 0] = 99 - assert_false(np.all(img.affine == aff)) + assert not np.all(img.affine == aff) # header, created by image creation ihdr = img.header # Pass it back in img = img_klass(arr, aff, ihdr) # Check modifying header outside does not modify image ihdr.set_zooms((4, 5, 6)) - assert_not_equal(img.header, ihdr) + assert img.header != ihdr def test_float_affine(self): # Check affines get converted to float img_klass = self.image_class arr = np.arange(3, dtype=np.int16) img = img_klass(arr, np.eye(4, dtype=np.float32)) - assert_equal(img.affine.dtype, np.dtype(np.float64)) + assert img.affine.dtype == np.dtype(np.float64) img = img_klass(arr, np.eye(4, dtype=np.int16)) - assert_equal(img.affine.dtype, np.dtype(np.float64)) + assert img.affine.dtype == np.dtype(np.float64) def test_images(self): # Assumes all possible images support int16 # See https://github.com/nipy/nibabel/issues/58 arr = np.arange(24, dtype=np.int16).reshape((2, 3, 4)) img = self.image_class(arr, None) - assert_array_equal(img.get_data(), arr) - assert_equal(img.affine, None) + assert (img.get_fdata() == arr).all() + assert img.affine is None def test_default_header(self): # Check default header is as expected @@ -245,21 +242,21 @@ def test_default_header(self): hdr = self.image_class.header_class() hdr.set_data_shape(arr.shape) hdr.set_data_dtype(arr.dtype) - assert_equal(img.header, hdr) + assert img.header == hdr def test_data_api(self): # Test minimal api data object can initialize img = self.image_class(DataLike(), None) # Shape may be promoted to higher dimension, but may not reorder or # change size - assert_array_equal(img.get_data().flatten(), np.arange(3)) - assert_equal(img.get_shape()[:1], (3,)) - assert_equal(np.prod(img.get_shape()), 3) + assert (img.get_fdata().flatten() == np.arange(3)).all() + assert img.shape[:1] == (3,) + assert np.prod(img.shape) == 3 def check_dtypes(self, expected, actual): # Some images will want dtypes to be equal including endianness, # others may only require the same type - assert_equal(expected, actual) + assert expected == actual def test_data_default(self): # check that the default dtype comes from the data if the header @@ -284,10 +281,10 @@ def test_data_shape(self): img = img_klass(arr, np.eye(4)) # Shape may be promoted to higher dimension, but may not reorder or # change size - assert_equal(img.get_shape()[:1], (4,)) - assert_equal(np.prod(img.get_shape()), 4) + assert img.shape[:1] == (4,) + assert np.prod(img.shape) == 4 img = img_klass(np.zeros((2, 3, 4), dtype=np.float32), np.eye(4)) - assert_equal(img.shape, (2, 3, 4)) + assert img.shape == (2, 3, 4) def test_str(self): # Check something comes back from string representation @@ -296,28 +293,22 @@ def test_str(self): # See https://github.com/nipy/nibabel/issues/58 arr = np.arange(5, dtype=np.int16) img = img_klass(arr, np.eye(4)) - assert_true(len(str(img)) > 0) + assert len(str(img)) > 0 # Shape may be promoted to higher dimension, but may not reorder or # change size - assert_equal(img.shape[:1], (5,)) - assert_equal(np.prod(img.shape), 5) + assert img.shape[:1] == (5,) + assert np.prod(img.shape) == 5 img = img_klass(np.zeros((2, 3, 4), dtype=np.int16), np.eye(4)) - assert_true(len(str(img)) > 0) + assert len(str(img)) > 0 def test_get_shape(self): - # Check there is a get_shape method - # (it is deprecated) + # Check that get_shape raises an ExpiredDeprecationError img_klass = self.image_class # Assumes all possible images support int16 # See https://github.com/nipy/nibabel/issues/58 img = img_klass(np.arange(1, dtype=np.int16), np.eye(4)) - with suppress_warnings(): - # Shape may be promoted to higher dimension, but may not reorder or - # change size - assert_equal(img.get_shape()[:1], (1,)) - assert_equal(np.prod(img.get_shape()), 1) - img = img_klass(np.zeros((2, 3, 4), np.int16), np.eye(4)) - assert_equal(img.get_shape(), (2, 3, 4)) + with pytest.raises(ExpiredDeprecationError): + img.get_shape() def test_get_fdata(self): # Test array image and proxy image interface for floating point data @@ -325,55 +316,57 @@ def test_get_fdata(self): in_data_template = np.arange(24, dtype=np.int16).reshape((2, 3, 4)) in_data = in_data_template.copy() img = img_klass(in_data, None) - assert_true(in_data is img.dataobj) + assert in_data is img.dataobj # The get_fdata method changes the array to floating point type - assert_equal(img.get_fdata(dtype='f4').dtype, np.dtype(np.float32)) + assert img.get_fdata(dtype='f4').dtype == np.dtype(np.float32) fdata_32 = img.get_fdata(dtype=np.float32) - assert_equal(fdata_32.dtype, np.dtype(np.float32)) + assert fdata_32.dtype == np.dtype(np.float32) # Caching is specific to data dtype. If we reload with default data # type, the cache gets reset fdata_32[:] = 99 # Cache has been modified, we pick up the modifications, but only for # the cached data type - assert_array_equal(img.get_fdata(dtype='f4'), 99) + assert (img.get_fdata(dtype='f4') == 99).all() fdata_64 = img.get_fdata() - assert_equal(fdata_64.dtype, np.dtype(np.float64)) - assert_array_equal(fdata_64, in_data) + assert fdata_64.dtype == np.dtype(np.float64) + assert (fdata_64 == in_data).all() fdata_64[:] = 101 - assert_array_equal(img.get_fdata(dtype='f8'), 101) - assert_array_equal(img.get_fdata(), 101) + assert (img.get_fdata(dtype='f8') == 101).all() + assert (img.get_fdata() == 101).all() # Reloading with new data type blew away the float32 cache - assert_array_equal(img.get_fdata(dtype='f4'), in_data) + assert (img.get_fdata(dtype='f4') == in_data).all() img.uncache() # Now recaching, is float64 out_data = img.get_fdata() - assert_equal(out_data.dtype, np.dtype(np.float64)) + assert out_data.dtype == np.dtype(np.float64) # Input dtype needs to be floating point - assert_raises(ValueError, img.get_fdata, dtype=np.int16) - assert_raises(ValueError, img.get_fdata, dtype=np.int32) + with pytest.raises(ValueError): + img.get_fdata(dtype=np.int16) + with pytest.raises(ValueError): + img.get_fdata(dtype=np.int32) # The cache is filled out_data[:] = 42 - assert_true(img.get_fdata() is out_data) + assert img.get_fdata() is out_data img.uncache() - assert_false(img.get_fdata() is out_data) + assert img.get_fdata() is not out_data # The 42 has gone now. - assert_array_equal(img.get_fdata(), in_data_template) + assert (img.get_fdata() == in_data_template).all() # If we can save, we can create a proxy image if not self.can_save: return rt_img = bytesio_round_trip(img) - assert_false(in_data is rt_img.dataobj) - assert_array_equal(rt_img.dataobj, in_data) + assert in_data is not rt_img.dataobj + assert (rt_img.dataobj == in_data).all() out_data = rt_img.get_fdata() - assert_array_equal(out_data, in_data) - assert_false(rt_img.dataobj is out_data) - assert_equal(out_data.dtype, np.dtype(np.float64)) + assert (out_data == in_data).all() + assert rt_img.dataobj is not out_data + assert out_data.dtype == np.dtype(np.float64) # cache - assert_true(rt_img.get_fdata() is out_data) + assert rt_img.get_fdata() is out_data out_data[:] = 42 rt_img.uncache() - assert_false(rt_img.get_fdata() is out_data) - assert_array_equal(rt_img.get_fdata(), in_data) + assert rt_img.get_fdata() is not out_data + assert (rt_img.get_fdata() == in_data).all() def test_get_data(self): # Test array image and proxy image interface @@ -382,36 +375,41 @@ def test_get_data(self): in_data = in_data_template.copy() img = img_klass(in_data, None) # Can't slice into the image object: - with assert_raises(TypeError) as exception_manager: + with pytest.raises(TypeError) as exception_manager: img[0, 0, 0] # Make sure the right message gets raised: - assert_equal(str(exception_manager.exception), - "Cannot slice image objects; consider using " - "`img.slicer[slice]` to generate a sliced image (see " - "documentation for caveats) or slicing image array data " - "with `img.dataobj[slice]` or `img.get_data()[slice]`") - assert_true(in_data is img.dataobj) - out_data = img.get_data() - assert_true(in_data is out_data) + assert (str(exception_manager.value) == + "Cannot slice image objects; consider using " + "`img.slicer[slice]` to generate a sliced image (see " + "documentation for caveats) or slicing image array data " + "with `img.dataobj[slice]` or `img.get_fdata()[slice]`") + assert in_data is img.dataobj + with pytest.deprecated_call(): + out_data = img.get_data() + assert in_data is out_data # and that uncache has no effect img.uncache() - assert_true(in_data is out_data) - assert_array_equal(out_data, in_data_template) + assert in_data is out_data + assert (out_data == in_data_template).all() # If we can save, we can create a proxy image if not self.can_save: return rt_img = bytesio_round_trip(img) - assert_false(in_data is rt_img.dataobj) - assert_array_equal(rt_img.dataobj, in_data) - out_data = rt_img.get_data() - assert_array_equal(out_data, in_data) - assert_false(rt_img.dataobj is out_data) + assert in_data is not rt_img.dataobj + assert (rt_img.dataobj == in_data).all() + with pytest.deprecated_call(): + out_data = rt_img.get_data() + assert (out_data == in_data).all() + assert rt_img.dataobj is not out_data # cache - assert_true(rt_img.get_data() is out_data) + with pytest.deprecated_call(): + assert rt_img.get_data() is out_data out_data[:] = 42 rt_img.uncache() - assert_false(rt_img.get_data() is out_data) - assert_array_equal(rt_img.get_data(), in_data) + with pytest.deprecated_call(): + assert rt_img.get_data() is not out_data + with pytest.deprecated_call(): + assert (rt_img.get_data() == in_data).all() def test_slicer(self): img_klass = self.image_class @@ -424,11 +422,11 @@ def test_slicer(self): img = img_klass(in_data, base_affine.copy()) if not spatial_axes_first(img): - with assert_raises(ValueError): + with pytest.raises(ValueError): img.slicer continue - assert_true(hasattr(img.slicer, '__getitem__')) + assert hasattr(img.slicer, '__getitem__') # Note spatial zooms are always first 3, even when spatial_zooms = img.header.get_zooms()[:3] @@ -437,50 +435,48 @@ def test_slicer(self): sliceobj = [slice(None, None, 2)] * 3 + \ [slice(None)] * (len(dshape) - 3) downsampled_img = img.slicer[tuple(sliceobj)] - assert_array_equal(downsampled_img.header.get_zooms()[:3], - np.array(spatial_zooms) * 2) + assert (downsampled_img.header.get_zooms()[:3] == np.array(spatial_zooms) * 2).all() max4d = (hasattr(img.header, '_structarr') and 'dims' in img.header._structarr.dtype.fields and img.header._structarr['dims'].shape == (4,)) # Check newaxis and single-slice errors - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[None] - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[0] # Axes 1 and 2 are always spatial - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[:, None] - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[:, 0] - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[:, :, None] - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[:, :, 0] if len(img.shape) == 4: if max4d: - with assert_raises(ValueError): + with pytest.raises(ValueError): img.slicer[:, :, :, None] else: # Reorder non-spatial axes - assert_equal(img.slicer[:, :, :, None].shape, - img.shape[:3] + (1,) + img.shape[3:]) + assert (img.slicer[:, :, :, None].shape + == img.shape[:3] + (1,) + img.shape[3:]) # 4D to 3D using ellipsis or slices - assert_equal(img.slicer[..., 0].shape, img.shape[:-1]) - assert_equal(img.slicer[:, :, :, 0].shape, img.shape[:-1]) + assert img.slicer[..., 0].shape == img.shape[:-1] + assert img.slicer[:, :, :, 0].shape == img.shape[:-1] else: # 3D Analyze/NIfTI/MGH to 4D - assert_equal(img.slicer[:, :, :, None].shape, img.shape + (1,)) + assert img.slicer[:, :, :, None].shape == img.shape + (1,) if len(img.shape) == 3: # Slices exceed dimensions - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[:, :, :, :, None] elif max4d: - with assert_raises(ValueError): + with pytest.raises(ValueError): img.slicer[:, :, :, :, None] else: - assert_equal(img.slicer[:, :, :, :, None].shape, - img.shape + (1,)) + assert img.slicer[:, :, :, :, None].shape == img.shape + (1,) # Crop by one voxel in each dimension sliced_i = img.slicer[1:] @@ -489,35 +485,35 @@ def test_slicer(self): sliced_ijk = img.slicer[1:, 1:, 1:] # No scaling change - assert_array_equal(sliced_i.affine[:3, :3], img.affine[:3, :3]) - assert_array_equal(sliced_j.affine[:3, :3], img.affine[:3, :3]) - assert_array_equal(sliced_k.affine[:3, :3], img.affine[:3, :3]) - assert_array_equal(sliced_ijk.affine[:3, :3], img.affine[:3, :3]) + assert (sliced_i.affine[:3, :3] == img.affine[:3, :3]).all() + assert (sliced_j.affine[:3, :3] == img.affine[:3, :3]).all() + assert (sliced_k.affine[:3, :3] == img.affine[:3, :3]).all() + assert (sliced_ijk.affine[:3, :3] == img.affine[:3, :3]).all() # Translation - assert_array_equal(sliced_i.affine[:, 3], [1, 0, 0, 1]) - assert_array_equal(sliced_j.affine[:, 3], [0, 1, 0, 1]) - assert_array_equal(sliced_k.affine[:, 3], [0, 0, 1, 1]) - assert_array_equal(sliced_ijk.affine[:, 3], [1, 1, 1, 1]) + assert (sliced_i.affine[:, 3] == [1, 0, 0, 1]).all() + assert (sliced_j.affine[:, 3] == [0, 1, 0, 1]).all() + assert (sliced_k.affine[:, 3] == [0, 0, 1, 1]).all() + assert (sliced_ijk.affine[:, 3] == [1, 1, 1, 1]).all() # No change to affines with upper-bound slices - assert_array_equal(img.slicer[:1, :1, :1].affine, img.affine) + assert (img.slicer[:1, :1, :1].affine == img.affine).all() # Yell about step = 0 - with assert_raises(ValueError): + with pytest.raises(ValueError): img.slicer[:, ::0] - with assert_raises(ValueError): + with pytest.raises(ValueError): img.slicer.slice_affine((slice(None), slice(None, None, 0))) # Don't permit zero-length slices - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[:0] # No fancy indexing - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[[0]] - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[[-1]] - with assert_raises(IndexError): + with pytest.raises(IndexError): img.slicer[[0], [-1]] # Check data is consistent with slicing numpy arrays @@ -534,10 +530,14 @@ def test_slicer(self): pass else: sliced_data = in_data[sliceobj] - assert_array_equal(sliced_data, sliced_img.get_data()) - assert_array_equal(sliced_data, sliced_img.dataobj) - assert_array_equal(sliced_data, img.dataobj[sliceobj]) - assert_array_equal(sliced_data, img.get_data()[sliceobj]) + with pytest.deprecated_call(): + assert (sliced_data == sliced_img.get_data()).all() + assert (sliced_data == sliced_img.get_fdata()).all() + assert (sliced_data == sliced_img.dataobj).all() + assert (sliced_data == img.dataobj[sliceobj]).all() + with pytest.deprecated_call(): + assert (sliced_data == img.get_data()[sliceobj]).all() + assert (sliced_data == img.get_fdata()[sliceobj]).all() def test_api_deprecations(self): @@ -559,18 +559,14 @@ def from_file_map(self, file_map=None): bio = BytesIO() file_map = FakeImage.make_file_map({'image': bio}) - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) + with pytest.raises(ExpiredDeprecationError): img.to_files(file_map) - assert_equal(len(w), 1) + with pytest.raises(ExpiredDeprecationError): img.to_filespec('an_image') - assert_equal(len(w), 2) - img = FakeImage.from_files(file_map) - assert_equal(len(w), 3) - file_map = FakeImage.filespec_to_files('an_image') - assert_equal(list(file_map), ['image']) - assert_equal(file_map['image'].filename, 'an_image.foo') - assert_equal(len(w), 4) + with pytest.raises(ExpiredDeprecationError): + FakeImage.from_files(file_map) + with pytest.raises(ExpiredDeprecationError): + FakeImage.filespec_to_files('an_image') class MmapImageMixin(object): @@ -632,30 +628,29 @@ def test_load_mmap(self): if mmap is not None: kwargs['mmap'] = mmap back_img = func(param1, **kwargs) - back_data = back_img.get_data() + back_data = np.asanyarray(back_img.dataobj) if expected_mode is None: - assert_false(isinstance(back_data, np.memmap), - 'Should not be a %s' % img_klass.__name__) + assert not isinstance(back_data, np.memmap), 'Should not be a %s' % img_klass.__name__ else: - assert_true(isinstance(back_data, np.memmap), - 'Not a %s' % img_klass.__name__) + assert isinstance(back_data, np.memmap), 'Not a %s' % img_klass.__name__ if self.check_mmap_mode: - assert_equal(back_data.mode, expected_mode) + assert back_data.mode == expected_mode del back_img, back_data # Check that mmap is keyword-only - assert_raises(TypeError, func, param1, True) + with pytest.raises(TypeError): + func(param1, True) # Check invalid values raise error - assert_raises(ValueError, func, param1, mmap='rw') - assert_raises(ValueError, func, param1, mmap='r+') + with pytest.raises(ValueError): + func(param1, mmap='rw') + with pytest.raises(ValueError): + func(param1, mmap='r+') def test_header_deprecated(): - with clear_and_catch_warnings() as w: - warnings.simplefilter('always', DeprecationWarning) - + with pytest.deprecated_call() as w: class MyHeader(Header): pass - assert_equal(len(w), 0) + assert len(w) == 0 MyHeader() - assert_equal(len(w), 1) + assert len(w) == 1 diff --git a/nibabel/tests/test_spm2analyze.py b/nibabel/tests/test_spm2analyze.py index e39e79b96e..a88d3cafd4 100644 --- a/nibabel/tests/test_spm2analyze.py +++ b/nibabel/tests/test_spm2analyze.py @@ -13,19 +13,18 @@ from ..spatialimages import HeaderTypeError, HeaderDataError from ..spm2analyze import Spm2AnalyzeHeader, Spm2AnalyzeImage +import pytest from numpy.testing import assert_array_equal -from ..testing import assert_equal, assert_raises from . import test_spm99analyze - class TestSpm2AnalyzeHeader(test_spm99analyze.TestSpm99AnalyzeHeader): header_class = Spm2AnalyzeHeader def test_slope_inter(self): hdr = self.header_class() - assert_equal(hdr.get_slope_inter(), (1.0, 0.0)) + assert hdr.get_slope_inter() == (1.0, 0.0) for in_tup, exp_err, out_tup, raw_slope in ( ((2.0,), None, (2.0, 0.), 2.), ((None,), None, (None, None), np.nan), @@ -43,16 +42,17 @@ def test_slope_inter(self): ((None, 0.0), None, (None, None), np.nan)): hdr = self.header_class() if not exp_err is None: - assert_raises(exp_err, hdr.set_slope_inter, *in_tup) + with pytest.raises(exp_err): + hdr.set_slope_inter(*in_tup) # raw set if not in_tup[0] is None: hdr['scl_slope'] = in_tup[0] else: hdr.set_slope_inter(*in_tup) - assert_equal(hdr.get_slope_inter(), out_tup) + assert hdr.get_slope_inter() == out_tup # Check set survives through checking hdr = Spm2AnalyzeHeader.from_header(hdr, check=True) - assert_equal(hdr.get_slope_inter(), out_tup) + assert hdr.get_slope_inter() == out_tup assert_array_equal(hdr['scl_slope'], raw_slope) diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 5ee94e98c2..e84a18ea4f 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -12,13 +12,16 @@ from io import BytesIO -from numpy.testing import assert_array_equal, assert_array_almost_equal, dec +from numpy.testing import assert_array_equal, assert_array_almost_equal +import unittest +import pytest -# Decorator to skip tests requiring save / load if scipy not available for mat -# files from ..optpkg import optional_package _, have_scipy, _ = optional_package('scipy') -scipy_skip = dec.skipif(not have_scipy, 'scipy not available') + +# Decorator to skip tests requiring save / load if scipy not available for mat +# files +needs_scipy = unittest.skipUnless(have_scipy, 'scipy not available') from ..spm99analyze import (Spm99AnalyzeHeader, Spm99AnalyzeImage, HeaderTypeError) @@ -26,12 +29,14 @@ from ..volumeutils import apply_read_scaling, _dt_min_max from ..spatialimages import supported_np_types, HeaderDataError -from nose.tools import assert_true, assert_false, assert_equal, assert_raises - -from ..testing import assert_allclose_safely, suppress_warnings +from ..testing import ( + bytesio_round_trip, + bytesio_filemap, + assert_allclose_safely, + suppress_warnings +) from . import test_analyze -from .test_helpers import (bytesio_round_trip, bytesio_filemap, bz2_mio_error) FLOAT_TYPES = np.sctypes['float'] COMPLEX_TYPES = np.sctypes['complex'] @@ -61,7 +66,7 @@ def test_data_scaling(self): # almost equal assert_array_almost_equal(data, data_back, 4) # But not quite - assert_false(np.all(data == data_back)) + assert not np.all(data == data_back) # This is exactly the same call, just testing it works twice data_back2 = hdr.data_from_fileobj(S3) assert_array_equal(data_back, data_back2, 4) @@ -69,12 +74,12 @@ def test_data_scaling(self): hdr.data_to_fileobj(data, S3, rescale=True) data_back = hdr.data_from_fileobj(S3) assert_array_almost_equal(data, data_back, 4) - assert_false(np.all(data == data_back)) + assert not np.all(data == data_back) # This doesn't use scaling, and so gets perfect precision with np.errstate(invalid='ignore'): hdr.data_to_fileobj(data, S3, rescale=False) data_back = hdr.data_from_fileobj(S3) - assert_true(np.all(data == data_back)) + assert np.all(data == data_back) class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader, @@ -84,7 +89,7 @@ class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader, def test_empty(self): super(TestSpm99AnalyzeHeader, self).test_empty() hdr = self.header_class() - assert_equal(hdr['scl_slope'], 1) + assert hdr['scl_slope'] == 1 def test_big_scaling(self): # Test that upcasting works for huge scalefactors @@ -98,11 +103,11 @@ def test_big_scaling(self): data = np.array([type_info(dtt)['max']], dtype=dtt)[:, None, None] hdr.data_to_fileobj(data, sio) data_back = hdr.data_from_fileobj(sio) - assert_true(np.allclose(data, data_back)) + assert np.allclose(data, data_back) def test_slope_inter(self): hdr = self.header_class() - assert_equal(hdr.get_slope_inter(), (1.0, None)) + assert hdr.get_slope_inter() == (1.0, None) for in_tup, exp_err, out_tup, raw_slope in ( ((2.0,), None, (2.0, None), 2.), ((None,), None, (None, None), np.nan), @@ -120,16 +125,17 @@ def test_slope_inter(self): ((None, 0.0), None, (None, None), np.nan)): hdr = self.header_class() if not exp_err is None: - assert_raises(exp_err, hdr.set_slope_inter, *in_tup) + with pytest.raises(exp_err): + hdr.set_slope_inter(*in_tup) # raw set if not in_tup[0] is None: hdr['scl_slope'] = in_tup[0] else: hdr.set_slope_inter(*in_tup) - assert_equal(hdr.get_slope_inter(), out_tup) + assert hdr.get_slope_inter() == out_tup # Check set survives through checking hdr = Spm99AnalyzeHeader.from_header(hdr, check=True) - assert_equal(hdr.get_slope_inter(), out_tup) + assert hdr.get_slope_inter() == out_tup assert_array_equal(hdr['scl_slope'], raw_slope) def test_origin_checks(self): @@ -139,16 +145,14 @@ def test_origin_checks(self): hdr.data_shape = [1, 1, 1] hdr['origin'][0] = 101 # severity 20 fhdr, message, raiser = self.log_chk(hdr, 20) - assert_equal(fhdr, hdr) - assert_equal(message, 'very large origin values ' - 'relative to dims; leaving as set, ' - 'ignoring for affine') - assert_raises(*raiser) + assert fhdr == hdr + assert (message == 'very large origin values ' + 'relative to dims; leaving as set, ' + 'ignoring for affine') + pytest.raises(*raiser) # diagnose binary block dxer = self.header_class.diagnose_binaryblock - assert_equal(dxer(hdr.binaryblock), - 'very large origin values ' - 'relative to dims') + assert dxer(hdr.binaryblock) == 'very large origin values relative to dims' class ImageScalingMixin(object): @@ -165,9 +169,9 @@ def assert_scale_me_scaling(self, hdr): # Assert that header `hdr` has "scale-me" scaling slope, inter = self._get_raw_scaling(hdr) if not slope is None: - assert_true(np.isnan(slope)) + assert np.isnan(slope) if not inter is None: - assert_true(np.isnan(inter)) + assert np.isnan(inter) def _get_raw_scaling(self, hdr): return hdr['scl_slope'], None @@ -187,7 +191,7 @@ def assert_null_scaling(self, arr, slope, inter): img = img_class(arr, np.eye(4), input_hdr) img_hdr = img.header self._set_raw_scaling(input_hdr, slope, inter) - assert_array_equal(img.get_data(), arr) + assert_array_equal(img.get_fdata(), arr) # Scaling has no effect on image as written via header (with rescaling # turned off). fm = bytesio_filemap(img) @@ -196,12 +200,12 @@ def assert_null_scaling(self, arr, slope, inter): img_hdr.write_to(hdr_fobj) img_hdr.data_to_fileobj(arr, img_fobj, rescale=False) raw_rt_img = img_class.from_file_map(fm) - assert_array_equal(raw_rt_img.get_data(), arr) + assert_array_equal(raw_rt_img.get_fdata(), arr) # Scaling makes no difference for image round trip fm = bytesio_filemap(img) img.to_file_map(fm) rt_img = img_class.from_file_map(fm) - assert_array_equal(rt_img.get_data(), arr) + assert_array_equal(rt_img.get_fdata(), arr) def test_header_scaling(self): # For images that implement scaling, test effect of scaling @@ -258,20 +262,20 @@ def _check_write_scaling(self, img = img_class(arr, aff) self.assert_scale_me_scaling(img.header) # Array from image unchanged by scaling - assert_array_equal(img.get_data(), arr) + assert_array_equal(img.get_fdata(), arr) # As does round trip img_rt = bytesio_round_trip(img) self.assert_scale_me_scaling(img_rt.header) # Round trip array is not scaled - assert_array_equal(img_rt.get_data(), arr) + assert_array_equal(img_rt.get_fdata(), arr) # Explicit scaling causes scaling after round trip self._set_raw_scaling(img.header, slope, inter) self.assert_scaling_equal(img.header, slope, inter) # Array from image unchanged by scaling - assert_array_equal(img.get_data(), arr) + assert_array_equal(img.get_fdata(), arr) # But the array scaled after round trip img_rt = bytesio_round_trip(img) - assert_array_equal(img_rt.get_data(), + assert_array_equal(img_rt.get_fdata(), apply_read_scaling(arr, effective_slope, effective_inter)) @@ -289,7 +293,7 @@ def _check_write_scaling(self, img.header.set_data_dtype(np.uint8) with np.errstate(invalid='ignore'): img_rt = bytesio_round_trip(img) - assert_array_equal(img_rt.get_data(), + assert_array_equal(img_rt.get_fdata(), apply_read_scaling(np.round(arr), effective_slope, effective_inter)) @@ -299,7 +303,7 @@ def _check_write_scaling(self, with np.errstate(invalid='ignore'): img_rt = bytesio_round_trip(img) exp_unscaled_arr = np.clip(np.round(arr), 0, 255) - assert_array_equal(img_rt.get_data(), + assert_array_equal(img_rt.get_fdata(), apply_read_scaling(exp_unscaled_arr, effective_slope, effective_inter)) @@ -313,7 +317,7 @@ def test_int_int_scaling(self): img.set_data_dtype(np.uint8) self._set_raw_scaling(hdr, 1, 0 if hdr.has_data_intercept else None) img_rt = bytesio_round_trip(img) - assert_array_equal(img_rt.get_data(), np.clip(arr, 0, 255)) + assert_array_equal(img_rt.get_fdata(), np.clip(arr, 0, 255)) def test_no_scaling(self): # Test writing image converting types when not calculating scaling @@ -337,7 +341,7 @@ def test_no_scaling(self): with np.errstate(invalid='ignore'): rt_img = bytesio_round_trip(img) with suppress_warnings(): # invalid mult - back_arr = rt_img.get_data() + back_arr = np.asanyarray(rt_img.dataobj) exp_back = arr.copy() # If converting to floating point type, casting is direct. # Otherwise we will need to do float-(u)int casting at some point @@ -392,54 +396,31 @@ def test_nan2zero_range_ok(self): arr[1, 0, 0] = 256 # to push outside uint8 range img = img_class(arr, np.eye(4)) rt_img = bytesio_round_trip(img) - assert_array_equal(rt_img.get_data(), arr) + assert_array_equal(rt_img.get_fdata(), arr) # Uncontroversial so far, but now check that nan2zero works correctly # for int type img.set_data_dtype(np.uint8) with np.errstate(invalid='ignore'): rt_img = bytesio_round_trip(img) - assert_equal(rt_img.get_data()[0, 0, 0], 0) + assert rt_img.get_fdata()[0, 0, 0] == 0 class TestSpm99AnalyzeImage(test_analyze.TestAnalyzeImage, ImageScalingMixin): # class for testing images image_class = Spm99AnalyzeImage - # Flag to skip bz2 save tests if they are going to break - bad_bz2 = bz2_mio_error() # Decorating the old way, before the team invented @ - test_data_hdr_cache = (scipy_skip( - test_analyze.TestAnalyzeImage.test_data_hdr_cache - )) - - test_header_updating = (scipy_skip( - test_analyze.TestAnalyzeImage.test_header_updating - )) - - test_offset_to_zero = (scipy_skip( - test_analyze.TestAnalyzeImage.test_offset_to_zero - )) - - test_big_offset_exts = (scipy_skip( - test_analyze.TestAnalyzeImage.test_big_offset_exts - )) - - test_header_scaling = scipy_skip( - ImageScalingMixin.test_header_scaling) - - test_int_int_scaling = scipy_skip( - ImageScalingMixin.test_int_int_scaling) - - test_write_scaling = scipy_skip( - ImageScalingMixin.test_write_scaling) - - test_no_scaling = scipy_skip( - ImageScalingMixin.test_no_scaling) - - test_nan2zero_range_ok = scipy_skip( - ImageScalingMixin.test_nan2zero_range_ok) - - @scipy_skip + test_data_hdr_cache = needs_scipy(test_analyze.TestAnalyzeImage.test_data_hdr_cache) + test_header_updating = needs_scipy(test_analyze.TestAnalyzeImage.test_header_updating) + test_offset_to_zero = needs_scipy(test_analyze.TestAnalyzeImage.test_offset_to_zero) + test_big_offset_exts = needs_scipy(test_analyze.TestAnalyzeImage.test_big_offset_exts) + test_header_scaling = needs_scipy(ImageScalingMixin.test_header_scaling) + test_int_int_scaling = needs_scipy(ImageScalingMixin.test_int_int_scaling) + test_write_scaling = needs_scipy(ImageScalingMixin.test_write_scaling) + test_no_scaling = needs_scipy(ImageScalingMixin.test_no_scaling) + test_nan2zero_range_ok = needs_scipy(ImageScalingMixin.test_nan2zero_range_ok) + + @needs_scipy def test_mat_read(self): # Test mat file reading and writing for the SPM analyze types img_klass = self.image_class @@ -452,7 +433,7 @@ def test_mat_read(self): # Test round trip img.to_file_map() r_img = img_klass.from_file_map(fm) - assert_array_equal(r_img.get_data(), arr) + assert_array_equal(r_img.get_fdata(), arr) assert_array_equal(r_img.affine, aff) # mat files are for matlab and have 111 voxel origins. We need to # adjust for that, when loading and saving. Check for signs of that in @@ -461,7 +442,7 @@ def test_mat_read(self): from scipy.io import loadmat, savemat mat_fileobj.seek(0) mats = loadmat(mat_fileobj) - assert_true('M' in mats and 'mat' in mats) + assert 'M' in mats and 'mat' in mats from_111 = np.eye(4) from_111[:3, 3] = -1 to_111 = np.eye(4) @@ -472,7 +453,7 @@ def test_mat_read(self): # should have a flip. The 'mat' matrix does include flips and so # should be unaffected by the flipping. If both are present we prefer # the the 'mat' matrix. - assert_true(img.header.default_x_flip) # check the default + assert img.header.default_x_flip # check the default flipper = np.diag([-1, 1, 1, 1]) assert_array_equal(mats['M'], np.dot(aff, np.dot(flipper, from_111))) mat_fileobj.seek(0) @@ -480,7 +461,7 @@ def test_mat_read(self): dict(M=np.diag([3, 4, 5, 1]), mat=np.diag([6, 7, 8, 1]))) # Check we are preferring the 'mat' matrix r_img = img_klass.from_file_map(fm) - assert_array_equal(r_img.get_data(), arr) + assert_array_equal(r_img.get_fdata(), arr) assert_array_equal(r_img.affine, np.dot(np.diag([6, 7, 8, 1]), to_111)) # But will use M if present @@ -488,7 +469,7 @@ def test_mat_read(self): mat_fileobj.truncate(0) savemat(mat_fileobj, dict(M=np.diag([3, 4, 5, 1]))) r_img = img_klass.from_file_map(fm) - assert_array_equal(r_img.get_data(), arr) + assert_array_equal(r_img.get_fdata(), arr) assert_array_equal(r_img.affine, np.dot(np.diag([3, 4, 5, 1]), np.dot(flipper, to_111))) @@ -514,7 +495,7 @@ def test_origin_affine(): assert_array_equal(aff, hdr.get_base_affine()) hdr.set_data_shape((3, 5, 7)) hdr.set_zooms((3, 2, 1)) - assert_true(hdr.default_x_flip) + assert hdr.default_x_flip assert_array_almost_equal( hdr.get_origin_affine(), # from center of image [[-3., 0., 0., 3.], diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 40d5ebc41e..bbe83c6973 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -1,30 +1,16 @@ """ Tests for warnings context managers """ -from __future__ import division, print_function, absolute_import import sys +import os import warnings import numpy as np -from nose.tools import assert_equal -from nose.tools import assert_raises from ..testing import (error_warnings, suppress_warnings, clear_and_catch_warnings, assert_allclose_safely, - get_fresh_mod, assert_re_in) - - -def assert_warn_len_equal(mod, n_in_context): - mod_warns = mod.__warningregistry__ - # Python 3.4 appears to clear any pre-existing warnings of the same type, - # when raising warnings inside a catch_warnings block. So, there is a - # warning generated by the tests within the context manager, but no - # previous warnings. - if 'version' in mod_warns: - assert_equal(len(mod_warns), 2) # including 'version' - else: - assert_equal(len(mod_warns), n_in_context) - + get_fresh_mod, assert_re_in, test_data, data_path) +import pytest def test_assert_allclose_safely(): # Test the safe version of allclose @@ -32,7 +18,8 @@ def test_assert_allclose_safely(): assert_allclose_safely(1, 1) assert_allclose_safely(1, [1, 1]) assert_allclose_safely([1, 1], 1 + 1e-6) - assert_raises(AssertionError, assert_allclose_safely, [1, 1], 1 + 1e-4) + with pytest.raises(AssertionError): + assert_allclose_safely([1, 1], 1 + 1e-4) # Broadcastable matrices a = np.ones((2, 3)) b = np.ones((3, 2, 3)) @@ -40,24 +27,26 @@ def test_assert_allclose_safely(): a[0, 0] = 1 + eps assert_allclose_safely(a, b) a[0, 0] = 1 + 1.1e-5 - assert_raises(AssertionError, assert_allclose_safely, a, b) + with pytest.raises(AssertionError): + assert_allclose_safely(a, b) # Nans in same place a[0, 0] = np.nan b[:, 0, 0] = np.nan assert_allclose_safely(a, b) # Never equal with nans present, if not matching nans - assert_raises(AssertionError, - assert_allclose_safely, a, b, - match_nans=False) + with pytest.raises(AssertionError): + assert_allclose_safely(a, b, match_nans=False) b[0, 0, 0] = 1 - assert_raises(AssertionError, assert_allclose_safely, a, b) + with pytest.raises(AssertionError): + assert_allclose_safely(a, b) # Test allcloseness of inf, especially np.float128 infs for dtt in np.sctypes['float']: a = np.array([-np.inf, 1, np.inf], dtype=dtt) b = np.array([-np.inf, 1, np.inf], dtype=dtt) assert_allclose_safely(a, b) b[1] = 0 - assert_raises(AssertionError, assert_allclose_safely, a, b) + with pytest.raises(AssertionError): + assert_allclose_safely(a, b) # Empty compares equal to empty assert_allclose_safely([], []) @@ -69,19 +58,19 @@ def assert_warn_len_equal(mod, n_in_context): # warning generated by the tests within the context manager, but no # previous warnings. if 'version' in mod_warns: - assert_equal(len(mod_warns), 2) # including 'version' + assert len(mod_warns) == 2 # including 'version' else: - assert_equal(len(mod_warns), n_in_context) + assert len(mod_warns) == n_in_context def test_clear_and_catch_warnings(): # Initial state of module, no warnings my_mod = get_fresh_mod(__name__) - assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + assert getattr(my_mod, '__warningregistry__', {}) == {} with clear_and_catch_warnings(modules=[my_mod]): warnings.simplefilter('ignore') warnings.warn('Some warning') - assert_equal(my_mod.__warningregistry__, {}) + assert my_mod.__warningregistry__ == {} # Without specified modules, don't clear warnings during context with clear_and_catch_warnings(): warnings.warn('Some warning') @@ -107,23 +96,26 @@ def test_clear_and_catch_warnings_inherit(): with my_cacw(): warnings.simplefilter('ignore') warnings.warn('Some warning') - assert_equal(my_mod.__warningregistry__, {}) + assert my_mod.__warningregistry__ == {} def test_warn_error(): # Check warning error context manager n_warns = len(warnings.filters) with error_warnings(): - assert_raises(UserWarning, warnings.warn, 'A test') + with pytest.raises(UserWarning): + warnings.warn('A test') with error_warnings() as w: # w not used for anything - assert_raises(UserWarning, warnings.warn, 'A test') - assert_equal(n_warns, len(warnings.filters)) + with pytest.raises(UserWarning): + warnings.warn('A test') + assert n_warns == len(warnings.filters) # Check other errors are propagated def f(): with error_warnings(): raise ValueError('An error') - assert_raises(ValueError, f) + with pytest.raises(ValueError): + f() def test_warn_ignore(): @@ -135,31 +127,52 @@ def test_warn_ignore(): with suppress_warnings() as w: # w not used warnings.warn('Here is a warning, you will not see it') warnings.warn('Nor this one', DeprecationWarning) - assert_equal(n_warns, len(warnings.filters)) + assert n_warns == len(warnings.filters) # Check other errors are propagated def f(): with suppress_warnings(): raise ValueError('An error') - assert_raises(ValueError, f) - - -def test_assert_re_in(): - assert_re_in(".*", "") - assert_re_in(".*", ["any"]) - - # Should do match not search - assert_re_in("ab", "abc") - assert_raises(AssertionError, assert_re_in, "ab", "cab") - assert_raises(AssertionError, assert_re_in, "ab$", "abc") + with pytest.raises(ValueError): + f() +@pytest.mark.parametrize("regex, entries", [ + [".*", ""], + [".*", ["any"]], + ["ab", "abc"], # Sufficient to have one entry matching - assert_re_in("ab", ["", "abc", "laskdjf"]) - assert_raises(AssertionError, assert_re_in, "ab$", ["ddd", ""]) - + ["ab", ["", "abc", "laskdjf"]], # Tuples should be ok too - assert_re_in("ab", ("", "abc", "laskdjf")) - assert_raises(AssertionError, assert_re_in, "ab$", ("ddd", "")) - - # Shouldn't "match" the empty list - assert_raises(AssertionError, assert_re_in, "", []) + ["ab", ("", "abc", "laskdjf")], + # Should do match not search + pytest.param("ab", "cab", marks=pytest.mark.xfail), + pytest.param("ab$", "abc", marks=pytest.mark.xfail), + pytest.param("ab$", ["ddd", ""], marks=pytest.mark.xfail), + pytest.param("ab$", ("ddd", ""), marks=pytest.mark.xfail), + #Shouldn't "match" the empty list + pytest.param("", [], marks=pytest.mark.xfail) +]) +def test_assert_re_in(regex, entries): + assert_re_in(regex, entries) + + +def test_test_data(): + assert test_data() == data_path + assert test_data() == os.path.abspath(os.path.join(os.path.dirname(__file__), + '..', 'tests', 'data')) + for subdir in ('nicom', 'gifti', 'externals'): + assert test_data(subdir) == os.path.join(data_path[:-10], subdir, 'tests', 'data') + assert os.path.exists(test_data(subdir)) + assert not os.path.exists(test_data(subdir, 'doesnotexist')) + + for subdir in ('freesurfer', 'doesnotexist'): + with pytest.raises(ValueError): + test_data(subdir) + + assert not os.path.exists(test_data(None, 'doesnotexist')) + + for subdir, fname in [('gifti', 'ascii.gii'), + ('nicom', '0.dcm'), + ('externals', 'example_1.nc'), + (None, 'empty.tck')]: + assert os.path.exists(test_data(subdir, fname)) diff --git a/nibabel/tests/test_tmpdirs.py b/nibabel/tests/test_tmpdirs.py index 48fa5885a9..c4d119b14f 100644 --- a/nibabel/tests/test_tmpdirs.py +++ b/nibabel/tests/test_tmpdirs.py @@ -1,12 +1,10 @@ """ Test tmpdirs module """ -from __future__ import division, print_function, absolute_import from os import getcwd from os.path import realpath, abspath, dirname, isfile from ..tmpdirs import InGivenDirectory -from nose.tools import assert_true, assert_equal MY_PATH = abspath(__file__) MY_DIR = dirname(MY_PATH) @@ -16,10 +14,10 @@ def test_given_directory(): # Test InGivenDirectory cwd = getcwd() with InGivenDirectory() as tmpdir: - assert_equal(tmpdir, abspath(cwd)) - assert_equal(tmpdir, abspath(getcwd())) + assert tmpdir == abspath(cwd) + assert tmpdir == abspath(getcwd()) with InGivenDirectory(MY_DIR) as tmpdir: - assert_equal(tmpdir, MY_DIR) - assert_equal(realpath(MY_DIR), realpath(abspath(getcwd()))) + assert tmpdir == MY_DIR + assert realpath(MY_DIR) == realpath(abspath(getcwd())) # We were deleting the Given directory! Check not so now. - assert_true(isfile(MY_PATH)) + assert isfile(MY_PATH) diff --git a/nibabel/tests/test_trackvis.py b/nibabel/tests/test_trackvis.py index 9f8d84946c..ad4eb083a2 100644 --- a/nibabel/tests/test_trackvis.py +++ b/nibabel/tests/test_trackvis.py @@ -1,5 +1,4 @@ ''' Testing trackvis module ''' -from __future__ import division, print_function, absolute_import from functools import partial @@ -10,16 +9,14 @@ from ..orientations import aff2axcodes from ..volumeutils import native_code, swapped_code -from nose.tools import assert_true, assert_false, assert_equal, assert_raises -from numpy.testing import assert_array_equal, assert_array_almost_equal -from ..testing import error_warnings, suppress_warnings - +from numpy.testing import assert_array_almost_equal, assert_array_equal +import pytest def test_write(): streams = [] out_f = BytesIO() tv.write(out_f, [], {}) - assert_equal(out_f.getvalue(), tv.empty_header().tostring()) + assert out_f.getvalue() == tv.empty_header().tostring() out_f.truncate(0) out_f.seek(0) # Write something not-default @@ -27,7 +24,7 @@ def test_write(): # read it back out_f.seek(0) streams, hdr = tv.read(out_f) - assert_equal(hdr['id_string'], b'TRACKb') + assert hdr['id_string'] == b'TRACKb' # check that we can pass none for the header out_f.truncate(0) out_f.seek(0) @@ -38,12 +35,12 @@ def test_write(): # check that we check input values out_f.truncate(0) out_f.seek(0) - assert_raises(tv.HeaderError, - tv.write, out_f, [], {'id_string': 'not OK'}) - assert_raises(tv.HeaderError, - tv.write, out_f, [], {'version': 3}) - assert_raises(tv.HeaderError, - tv.write, out_f, [], {'hdr_size': 0}) + with pytest.raises(tv.HeaderError): + tv.write(out_f, [], {'id_string': 'not OK'}) + with pytest.raises(tv.HeaderError): + tv.write(out_f, [], {'version': 3}) + with pytest.raises(tv.HeaderError): + tv.write(out_f, [], {'hdr_size': 0}) def test_write_scalars_props(): @@ -58,26 +55,31 @@ def test_write_scalars_props(): out_f = BytesIO() streams = [(points, None, None), (points, scalars, None)] - assert_raises(tv.DataError, tv.write, out_f, streams) + with pytest.raises(tv.DataError): + tv.write(out_f, streams) out_f.seek(0) streams = [(points, np.zeros((N, M + 1)), None), (points, scalars, None)] - assert_raises(tv.DataError, tv.write, out_f, streams) + with pytest.raises(tv.DataError): + tv.write(out_f, streams) # Or if scalars different N compared to points bad_scalars = np.zeros((N + 1, M)) out_f.seek(0) streams = [(points, bad_scalars, None), (points, bad_scalars, None)] - assert_raises(tv.DataError, tv.write, out_f, streams) + with pytest.raises(tv.DataError): + tv.write(out_f, streams) # Similarly properties must have the same length for each streamline out_f.seek(0) streams = [(points, scalars, None), (points, scalars, props)] - assert_raises(tv.DataError, tv.write, out_f, streams) + with pytest.raises(tv.DataError): + tv.write(out_f, streams) out_f.seek(0) streams = [(points, scalars, np.zeros((P + 1,))), (points, scalars, props)] - assert_raises(tv.DataError, tv.write, out_f, streams) + with pytest.raises(tv.DataError): + tv.write(out_f, streams) # If all is OK, then we get back what we put in out_f.seek(0) streams = [(points, scalars, props), @@ -135,7 +137,7 @@ def test_round_trip(): tv.write(out_f, streams, {}) out_f.seek(0) streams2, hdr = tv.read(out_f) - assert_true(streamlist_equal(streams, streams2)) + assert streamlist_equal(streams, streams2) # test that we can write in different endianness and get back same result, # for versions 1, 2 and not-specified for in_dict, back_version in (({}, 2), @@ -146,15 +148,15 @@ def test_round_trip(): tv.write(out_f, streams, in_dict, endian_code) out_f.seek(0) streams2, hdr = tv.read(out_f) - assert_true(streamlist_equal(streams, streams2)) - assert_equal(hdr['version'], back_version) + assert streamlist_equal(streams, streams2) + assert hdr['version'] == back_version # test that we can get out and pass in generators out_f.seek(0) streams3, hdr = tv.read(out_f, as_generator=True) # check this is a generator rather than a list - assert_true(hasattr(streams3, 'send')) + assert hasattr(streams3, 'send') # but that it results in the same output - assert_true(streamlist_equal(streams, list(streams3))) + assert streamlist_equal(streams, list(streams3)) # write back in out_f.seek(0) streams3, hdr = tv.read(out_f, as_generator=True) @@ -165,7 +167,7 @@ def test_round_trip(): # and re-read just to check out_f_write.seek(0) streams2, hdr = tv.read(out_f_write) - assert_true(streamlist_equal(streams, streams2)) + assert streamlist_equal(streams, streams2) def test_points_processing(): @@ -193,11 +195,11 @@ def _rt(streams, hdr, points_space): # voxmm is the default. In this case we don't do anything to the # points, and we let the header pass through without further checks (raw_streams, hdr), (proc_streams, _) = _rt(vxmm_streams, {}, None) - assert_true(streamlist_equal(raw_streams, proc_streams)) - assert_true(streamlist_equal(vxmm_streams, proc_streams)) + assert streamlist_equal(raw_streams, proc_streams) + assert streamlist_equal(vxmm_streams, proc_streams) (raw_streams, hdr), (proc_streams, _) = _rt(vxmm_streams, {}, 'voxmm') - assert_true(streamlist_equal(raw_streams, proc_streams)) - assert_true(streamlist_equal(vxmm_streams, proc_streams)) + assert streamlist_equal(raw_streams, proc_streams) + assert streamlist_equal(vxmm_streams, proc_streams) # with 'voxels' as input, check for not all voxel_size == 0, warn if any # voxel_size == 0 for hdr in ( # these cause read / write errors @@ -208,22 +210,23 @@ def _rt(streams, hdr, points_space): ): # Check error on write out_f.seek(0) - assert_raises(tv.HeaderError, - tv.write, out_f, vx_streams, hdr, None, 'voxel') + with pytest.raises(tv.HeaderError): + tv.write(out_f, vx_streams, hdr, None, 'voxel') out_f.seek(0) # bypass write error and check read tv.write(out_f, vxmm_streams, hdr, None, points_space=None) out_f.seek(0) - assert_raises(tv.HeaderError, tv.read, out_f, False, 'voxel') + with pytest.raises(tv.HeaderError): + tv.read(out_f, False, 'voxel') # There's a warning for any voxel sizes == 0 hdr = {'voxel_size': [2, 3, 0]} - with error_warnings(): - assert_raises(UserWarning, _rt, vx_streams, hdr, 'voxel') + with pytest.warns(UserWarning): + _rt(vx_streams, hdr, 'voxel') # This should be OK hdr = {'voxel_size': [2, 3, 4]} (raw_streams, hdr), (proc_streams, _) = _rt(vx_streams, hdr, 'voxel') - assert_true(streamlist_equal(vxmm_streams, raw_streams)) - assert_true(streamlist_equal(vx_streams, proc_streams)) + assert streamlist_equal(vxmm_streams, raw_streams) + assert streamlist_equal(vx_streams, proc_streams) # Now we try with rasmm points. In this case we need valid voxel_size, # and voxel_order, and vox_to_ras. The voxel_order has to match the # vox_to_ras, and so do the voxel sizes @@ -249,19 +252,20 @@ def _rt(streams, hdr, points_space): ): # Check error on write out_f.seek(0) - assert_raises(tv.HeaderError, - tv.write, out_f, rasmm_streams, hdr, None, 'rasmm') + with pytest.raises(tv.HeaderError): + tv.write(out_f, rasmm_streams, hdr, None, 'rasmm') out_f.seek(0) # bypass write error and check read tv.write(out_f, vxmm_streams, hdr, None, points_space=None) out_f.seek(0) - assert_raises(tv.HeaderError, tv.read, out_f, False, 'rasmm') + with pytest.raises(tv.HeaderError): + tv.read(out_f, False, 'rasmm') # This should be OK hdr = {'voxel_size': [2, 3, 4], 'voxel_order': 'RAS', 'vox_to_ras': aff} (raw_streams, hdr), (proc_streams, _) = _rt(rasmm_streams, hdr, 'rasmm') - assert_true(streamlist_equal(vxmm_streams, raw_streams)) - assert_true(streamlist_equal(rasmm_streams, proc_streams)) + assert streamlist_equal(vxmm_streams, raw_streams) + assert streamlist_equal(rasmm_streams, proc_streams) # More complex test to check matrix orientation fancy_affine = np.array([[0., -2, 0, 10], [3, 0, 0, 20], @@ -280,100 +284,93 @@ def f(pts): # from vx to mm (ijk1 * [[3, 2, 4]], scalars[1], None)] (raw_streams, hdr), (proc_streams, _) = _rt( fancy_rasmm_streams, hdr, 'rasmm') - assert_true(streamlist_equal(fancy_vxmm_streams, raw_streams)) - assert_true(streamlist_equal(fancy_rasmm_streams, proc_streams)) + assert streamlist_equal(fancy_vxmm_streams, raw_streams) + assert streamlist_equal(fancy_rasmm_streams, proc_streams) def test__check_hdr_points_space(): # Test checking routine for points_space input given header # None or voxmm -> no checks, pass through - assert_equal(tv._check_hdr_points_space({}, None), None) - assert_equal(tv._check_hdr_points_space({}, 'voxmm'), None) + assert tv._check_hdr_points_space({}, None) is None + assert tv._check_hdr_points_space({}, 'voxmm') is None # strange value for points_space -> ValueError - assert_raises(ValueError, - tv._check_hdr_points_space, {}, 'crazy') + with pytest.raises(ValueError): + tv._check_hdr_points_space({}, 'crazy') # Input not in (None, 'voxmm', 'voxels', 'rasmm') - error # voxels means check voxel sizes present and not all 0. hdr = tv.empty_header() assert_array_equal(hdr['voxel_size'], [0, 0, 0]) - assert_raises(tv.HeaderError, - tv._check_hdr_points_space, hdr, 'voxel') + with pytest.raises(tv.HeaderError): + tv._check_hdr_points_space(hdr, 'voxel') # Negative voxel size gives error - because it is not what trackvis does, # and this not what we mean by 'voxmm' hdr['voxel_size'] = [-2, 3, 4] - assert_raises(tv.HeaderError, - tv._check_hdr_points_space, hdr, 'voxel') + with pytest.raises(tv.HeaderError): + tv._check_hdr_points_space(hdr, 'voxel') # Warning here only hdr['voxel_size'] = [2, 3, 0] - with error_warnings(): - assert_raises(UserWarning, - tv._check_hdr_points_space, hdr, 'voxel') + with pytest.warns(UserWarning): + tv._check_hdr_points_space(hdr, 'voxel') # This is OK hdr['voxel_size'] = [2, 3, 4] - assert_equal(tv._check_hdr_points_space(hdr, 'voxel'), None) + assert tv._check_hdr_points_space(hdr, 'voxel') is None # rasmm - check there is an affine, that it matches voxel_size and # voxel_order # no affine hdr['voxel_size'] = [2, 3, 4] - assert_raises(tv.HeaderError, - tv._check_hdr_points_space, hdr, 'rasmm') + with pytest.raises(tv.HeaderError): + tv._check_hdr_points_space(hdr, 'rasmm') # still no affine hdr['voxel_order'] = 'RAS' - assert_raises(tv.HeaderError, - tv._check_hdr_points_space, hdr, 'rasmm') + with pytest.raises(tv.HeaderError): + tv._check_hdr_points_space(hdr, 'rasmm') # nearly an affine, but 0 at position 3,3 - means not recorded in trackvis # standard hdr['vox_to_ras'] = np.diag([2, 3, 4, 0]) - assert_raises(tv.HeaderError, - tv._check_hdr_points_space, hdr, 'rasmm') + with pytest.raises(tv.HeaderError): + tv._check_hdr_points_space(hdr, 'rasmm') # This affine doesn't match RAS voxel order hdr['vox_to_ras'] = np.diag([-2, 3, 4, 1]) - assert_raises(tv.HeaderError, - tv._check_hdr_points_space, hdr, 'rasmm') + with pytest.raises(tv.HeaderError): + tv._check_hdr_points_space(hdr, 'rasmm') # This affine doesn't match the voxel size hdr['vox_to_ras'] = np.diag([3, 3, 4, 1]) - assert_raises(tv.HeaderError, - tv._check_hdr_points_space, hdr, 'rasmm') + with pytest.raises(tv.HeaderError): + tv._check_hdr_points_space(hdr, 'rasmm') # This should be OK good_aff = np.diag([2, 3, 4, 1]) hdr['vox_to_ras'] = good_aff - assert_equal(tv._check_hdr_points_space(hdr, 'rasmm'), - None) + assert tv._check_hdr_points_space(hdr, 'rasmm') is None # Default voxel order of LPS assumed hdr['voxel_order'] = '' # now the RAS affine raises an error - assert_raises(tv.HeaderError, - tv._check_hdr_points_space, hdr, 'rasmm') + with pytest.raises(tv.HeaderError): + tv._check_hdr_points_space(hdr, 'rasmm') # this affine does have LPS voxel order good_lps = np.dot(np.diag([-1, -1, 1, 1]), good_aff) hdr['vox_to_ras'] = good_lps - assert_equal(tv._check_hdr_points_space(hdr, 'rasmm'), - None) + assert tv._check_hdr_points_space(hdr, 'rasmm') is None def test_empty_header(): for endian in '<>': for version in (1, 2): hdr = tv.empty_header(endian, version) - assert_equal(hdr['id_string'], b'TRACK') - assert_equal(hdr['version'], version) - assert_equal(hdr['hdr_size'], 1000) + assert hdr['id_string'] == b'TRACK' + assert hdr['version'] == version + assert hdr['hdr_size'] == 1000 assert_array_equal( hdr['image_orientation_patient'], [0, 0, 0, 0, 0, 0]) hdr = tv.empty_header(version=2) assert_array_equal(hdr['vox_to_ras'], np.zeros((4, 4))) hdr_endian = tv.endian_codes[tv.empty_header().dtype.byteorder] - assert_equal(hdr_endian, tv.native_code) + assert hdr_endian == tv.native_code def test_get_affine(): # Test get affine behavior, including pending deprecation hdr = tv.empty_header() - # Using version 1 affine is not a good idea because is fragile and not - # very useful. The default atleast_v2=None mode raises a FutureWarning - with error_warnings(): - assert_raises(FutureWarning, tv.aff_from_hdr, hdr) # testing the old behavior old_afh = partial(tv.aff_from_hdr, atleast_v2=False) # default header gives useless affine @@ -398,11 +395,12 @@ def test_get_affine(): exp_aff) # check against voxel order. This one works hdr['voxel_order'] = ''.join(aff2axcodes(exp_aff)) - assert_equal(hdr['voxel_order'], b'RAS') + assert hdr['voxel_order'] == b'RAS' assert_array_equal(old_afh(hdr), exp_aff) # This one doesn't hdr['voxel_order'] = 'LAS' - assert_raises(tv.HeaderError, old_afh, hdr) + with pytest.raises(tv.HeaderError): + old_afh(hdr) # This one does work because the routine allows the final dimension to # be flipped to try and match the voxel order hdr['voxel_order'] = 'RAI' @@ -418,12 +416,12 @@ def test_get_affine(): tv.aff_to_hdr(in_aff, hdr, pos_vox=True, set_order=True) # Unset easier option hdr['vox_to_ras'] = 0 - assert_equal(hdr['voxel_order'], o_codes) + assert hdr['voxel_order'] == o_codes # Check it came back the way we wanted assert_array_equal(old_afh(hdr), in_aff) - # Check that the default case matches atleast_v2=False case - with suppress_warnings(): - assert_array_equal(tv.aff_from_hdr(hdr), flipped_aff) + # Check that v1 header raises error + with pytest.raises(tv.HeaderError): + tv.aff_from_hdr(hdr) # now use the easier vox_to_ras field hdr = tv.empty_header() aff = np.eye(4) @@ -455,42 +453,34 @@ def test_aff_to_hdr(): # Historically we flip the first axis if there is a negative determinant assert_array_almost_equal(hdr['voxel_size'], [-1, 2, 3]) assert_array_almost_equal(tv.aff_from_hdr(hdr, atleast_v2=False), aff2) - # Test that default mode raises DeprecationWarning - with error_warnings(): - assert_raises(FutureWarning, tv.aff_to_hdr, affine, hdr) - assert_raises(FutureWarning, tv.aff_to_hdr, affine, hdr, None, None) - assert_raises(FutureWarning, tv.aff_to_hdr, affine, hdr, False, None) - assert_raises(FutureWarning, tv.aff_to_hdr, affine, hdr, None, False) - # And has same effect as above - with suppress_warnings(): - tv.aff_to_hdr(affine, hdr) + tv.aff_to_hdr(affine, hdr, pos_vox=False, set_order=False) assert_array_almost_equal(tv.aff_from_hdr(hdr, atleast_v2=False), affine) # Check pos_vox and order flags for hdr in ({}, {'version': 2}, {'version': 1}): tv.aff_to_hdr(aff2, hdr, pos_vox=True, set_order=False) assert_array_equal(hdr['voxel_size'], [1, 2, 3]) - assert_false('voxel_order' in hdr) + assert 'voxel_order' not in hdr tv.aff_to_hdr(aff2, hdr, pos_vox=False, set_order=True) assert_array_equal(hdr['voxel_size'], [-1, 2, 3]) - assert_equal(hdr['voxel_order'], 'RAI') + assert hdr['voxel_order'] == 'RAI' tv.aff_to_hdr(aff2, hdr, pos_vox=True, set_order=True) assert_array_equal(hdr['voxel_size'], [1, 2, 3]) - assert_equal(hdr['voxel_order'], 'RAI') + assert hdr['voxel_order'] == 'RAI' if 'version' in hdr and hdr['version'] == 1: - assert_false('vox_to_ras' in hdr) + assert 'vox_to_ras' not in hdr else: assert_array_equal(hdr['vox_to_ras'], aff2) def test_tv_class(): tvf = tv.TrackvisFile([]) - assert_equal(tvf.streamlines, []) - assert_true(isinstance(tvf.header, np.ndarray)) - assert_equal(tvf.endianness, tv.native_code) - assert_equal(tvf.filename, None) + assert tvf.streamlines == [] + assert isinstance(tvf.header, np.ndarray) + assert tvf.endianness == tv.native_code + assert tvf.filename is None out_f = BytesIO() tvf.to_file(out_f) - assert_equal(out_f.getvalue(), tv.empty_header().tostring()) + assert out_f.getvalue() == tv.empty_header().tostring() out_f.truncate(0) out_f.seek(0) # Write something not-default @@ -499,36 +489,25 @@ def test_tv_class(): # read it back out_f.seek(0) tvf_back = tv.TrackvisFile.from_file(out_f) - assert_equal(tvf_back.header['id_string'], b'TRACKb') + assert tvf_back.header['id_string'] == b'TRACKb' # check that we check input values out_f.truncate(0) out_f.seek(0) - assert_raises(tv.HeaderError, - tv.TrackvisFile, - [], {'id_string': 'not OK'}) - assert_raises(tv.HeaderError, - tv.TrackvisFile, - [], {'version': 3}) - assert_raises(tv.HeaderError, - tv.TrackvisFile, - [], {'hdr_size': 0}) + with pytest.raises(tv.HeaderError): + tv.TrackvisFile([], {'id_string': 'not OK'}) + with pytest.raises(tv.HeaderError): + tv.TrackvisFile([], {'version': 3}) + with pytest.raises(tv.HeaderError): + tv.TrackvisFile([], {'hdr_size': 0}) affine = np.diag([1, 2, 3, 1]) affine[:3, 3] = [10, 11, 12] # affine methods will raise same warnings and errors as function - with error_warnings(): - assert_raises(FutureWarning, tvf.set_affine, affine) - assert_raises(FutureWarning, tvf.set_affine, affine, None, None) - assert_raises(FutureWarning, tvf.set_affine, affine, False, None) - assert_raises(FutureWarning, tvf.set_affine, affine, None, False) - assert_raises(FutureWarning, tvf.get_affine) - assert_raises(FutureWarning, tvf.get_affine, None) tvf.set_affine(affine, pos_vox=True, set_order=True) aff = tvf.get_affine(atleast_v2=True) assert_array_almost_equal(aff, affine) # Test that we raise an error with an iterator - assert_raises(tv.TrackvisFileError, - tv.TrackvisFile, - iter([])) + with pytest.raises(tv.TrackvisFileError): + tv.TrackvisFile(iter([])) def test_tvfile_io(): @@ -544,22 +523,23 @@ def test_tvfile_io(): tvf.to_file(out_f) out_f.seek(0) tvf2 = tv.TrackvisFile.from_file(out_f) - assert_equal(tvf2.filename, None) - assert_true(streamlist_equal(vxmm_streams, tvf2.streamlines)) - assert_equal(tvf2.points_space, None) + assert tvf2.filename is None + assert streamlist_equal(vxmm_streams, tvf2.streamlines) + assert tvf2.points_space is None # Voxel points_space tvf = tv.TrackvisFile(vx_streams, points_space='voxel') out_f.seek(0) # No voxel size - error - assert_raises(tv.HeaderError, tvf.to_file, out_f) + with pytest.raises(tv.HeaderError): + tvf.to_file(out_f) out_f.seek(0) # With voxel size, no error, roundtrip works tvf.header['voxel_size'] = [2, 3, 4] tvf.to_file(out_f) out_f.seek(0) tvf2 = tv.TrackvisFile.from_file(out_f, points_space='voxel') - assert_true(streamlist_equal(vx_streams, tvf2.streamlines)) - assert_equal(tvf2.points_space, 'voxel') + assert streamlist_equal(vx_streams, tvf2.streamlines) + assert tvf2.points_space == 'voxel' out_f.seek(0) # Also with affine specified tvf = tv.TrackvisFile(vx_streams, points_space='voxel', @@ -567,7 +547,7 @@ def test_tvfile_io(): tvf.to_file(out_f) out_f.seek(0) tvf2 = tv.TrackvisFile.from_file(out_f, points_space='voxel') - assert_true(streamlist_equal(vx_streams, tvf2.streamlines)) + assert streamlist_equal(vx_streams, tvf2.streamlines) # Fancy affine test fancy_affine = np.array([[0., -2, 0, 10], [3, 0, 0, 20], @@ -583,15 +563,16 @@ def f(pts): # from vx to mm tvf = tv.TrackvisFile(fancy_rasmm_streams, points_space='rasmm') out_f.seek(0) # No affine - assert_raises(tv.HeaderError, tvf.to_file, out_f) + with pytest.raises(tv.HeaderError): + tvf.to_file(out_f) out_f.seek(0) # With affine set, no error, roundtrip works tvf.set_affine(fancy_affine, pos_vox=True, set_order=True) tvf.to_file(out_f) out_f.seek(0) tvf2 = tv.TrackvisFile.from_file(out_f, points_space='rasmm') - assert_true(streamlist_equal(fancy_rasmm_streams, tvf2.streamlines)) - assert_equal(tvf2.points_space, 'rasmm') + assert streamlist_equal(fancy_rasmm_streams, tvf2.streamlines) + assert tvf2.points_space == 'rasmm' out_f.seek(0) # Also when affine given in init tvf = tv.TrackvisFile(fancy_rasmm_streams, points_space='rasmm', @@ -599,7 +580,7 @@ def f(pts): # from vx to mm tvf.to_file(out_f) out_f.seek(0) tvf2 = tv.TrackvisFile.from_file(out_f, points_space='rasmm') - assert_true(streamlist_equal(fancy_rasmm_streams, tvf2.streamlines)) + assert streamlist_equal(fancy_rasmm_streams, tvf2.streamlines) def test_read_truncated(): @@ -613,26 +594,29 @@ def test_read_truncated(): value = out_f.getvalue()[:-(3 * 4)] new_f = BytesIO(value) # By default, raises a DataError - assert_raises(tv.DataError, tv.read, new_f) + with pytest.raises(tv.DataError): + tv.read(new_f) # This corresponds to strict mode new_f.seek(0) - assert_raises(tv.DataError, tv.read, new_f, strict=True) + with pytest.raises(tv.DataError): + tv.read(new_f, strict=True) # lenient error mode lets this error pass, with truncated track short_streams = [(xyz0, None, None), (xyz1[:-1], None, None)] new_f.seek(0) streams2, hdr = tv.read(new_f, strict=False) - assert_true(streamlist_equal(streams2, short_streams)) + assert streamlist_equal(streams2, short_streams) # Check that lenient works when number of tracks is 0, where 0 signals to # the reader to read until the end of the file. again_hdr = hdr.copy() - assert_equal(again_hdr['n_count'], 2) + assert again_hdr['n_count'] == 2 again_hdr['n_count'] = 0 again_bytes = again_hdr.tostring() + value[again_hdr.itemsize:] again_f = BytesIO(again_bytes) streams2, _ = tv.read(again_f, strict=False) - assert_true(streamlist_equal(streams2, short_streams)) + assert streamlist_equal(streams2, short_streams) # Set count to one above actual number of tracks, always raise error again_hdr['n_count'] = 3 again_bytes = again_hdr.tostring() + value[again_hdr.itemsize:] again_f = BytesIO(again_bytes) - assert_raises(tv.DataError, tv.read, again_f, strict=False) + with pytest.raises(tv.DataError): + tv.read(again_f, strict=False) diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index 05d3b1eb3f..2ec3e06182 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -3,26 +3,22 @@ from ..tripwire import TripWire, is_tripwire, TripWireError -from nose.tools import (assert_true, assert_false, assert_raises, - assert_equal, assert_not_equal) - +import pytest def test_is_tripwire(): - assert_false(is_tripwire(object())) - assert_true(is_tripwire(TripWire('some message'))) + assert not is_tripwire(object()) + assert is_tripwire(TripWire('some message')) def test_tripwire(): # Test tripwire object silly_module_name = TripWire('We do not have silly_module_name') - assert_raises(TripWireError, - getattr, - silly_module_name, - 'do_silly_thing') + with pytest.raises(TripWireError): + silly_module_name.do_silly_thing # Check AttributeError can be checked too try: silly_module_name.__wrapped__ except TripWireError as err: - assert_true(isinstance(err, AttributeError)) + assert isinstance(err, AttributeError) else: raise RuntimeError("No error raised, but expected") diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index 68710b3126..907a3bbb1e 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -14,15 +14,16 @@ from ..optpkg import optional_package from ..viewers import OrthoSlicer3D -from ..testing import skipif from numpy.testing import assert_array_equal, assert_equal -from nose.tools import assert_raises, assert_true +import unittest +import pytest # Need at least MPL 1.3 for viewer tests. -matplotlib, has_mpl, _ = optional_package('matplotlib', min_version='1.3') +# 2020.02.11 - 1.3 wheels are no longer distributed, so the minimum we test with is 1.5 +matplotlib, has_mpl, _ = optional_package('matplotlib', min_version='1.5') -needs_mpl = skipif(not has_mpl, 'These tests need matplotlib') +needs_mpl = unittest.skipUnless(has_mpl, 'These tests need matplotlib') if has_mpl: matplotlib.use('Agg') @@ -37,7 +38,7 @@ def test_viewer(): data = data * np.array([1., 2.]) # give it a # of volumes > 1 v = OrthoSlicer3D(data) assert_array_equal(v.position, (0, 0, 0)) - assert_true('OrthoSlicer3D' in repr(v)) + assert 'OrthoSlicer3D' in repr(v) # fake some events, inside and outside axes v._on_scroll(nt('event', 'button inaxes key')('up', None, None)) @@ -52,8 +53,10 @@ def test_viewer(): v.set_volume_idx(1) v.cmap = 'hot' v.clim = (0, 3) - assert_raises(ValueError, OrthoSlicer3D.clim.fset, v, (0.,)) # bad limits - assert_raises(ValueError, OrthoSlicer3D.cmap.fset, v, 'foo') # wrong cmap + with pytest.raises(ValueError): + OrthoSlicer3D.clim.fset(v, (0.,)) # bad limits + with pytest.raises(ValueError): + OrthoSlicer3D.cmap.fset(v, 'foo') # wrong cmap # decrement/increment volume numbers via keypress v.set_volume_idx(1) # should just pass @@ -75,8 +78,8 @@ def test_viewer(): v.close() # complex input should raise a TypeError prior to figure creation - assert_raises(TypeError, OrthoSlicer3D, - data[:, :, :, 0].astype(np.complex64)) + with pytest.raises(TypeError): + OrthoSlicer3D(data[:, :, :, 0].astype(np.complex64)) # other cases fig, axes = plt.subplots(1, 4) @@ -86,10 +89,13 @@ def test_viewer(): float) v2 = OrthoSlicer3D(data, affine=aff, axes=axes[:3]) # bad data (not 3+ dim) - assert_raises(ValueError, OrthoSlicer3D, data[:, :, 0, 0]) + with pytest.raises(ValueError): + OrthoSlicer3D(data[:, :, 0, 0]) # bad affine (not 4x4) - assert_raises(ValueError, OrthoSlicer3D, data, affine=np.eye(3)) - assert_raises(TypeError, v2.link_to, 1) + with pytest.raises(ValueError): + OrthoSlicer3D(data, affine=np.eye(3)) + with pytest.raises(TypeError): + v2.link_to(1) v2.link_to(v1) v2.link_to(v1) # shouldn't do anything v1.close() diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 29c0edaf07..8b0b6a52cf 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Test for volumeutils module ''' -from __future__ import division import os from os.path import exists @@ -19,19 +18,21 @@ import itertools import gzip import bz2 +import threading +import time import numpy as np from ..tmpdirs import InTemporaryDirectory from ..openers import ImageOpener -from .. import volumeutils from ..volumeutils import (array_from_file, _is_compressed_fobj, array_to_file, allopen, # for backwards compatibility fname_ext_ul_case, - calculate_scale, - can_cast, + calculate_scale, # Deprecated + can_cast, # Deprecated + scale_min_max, # Deprecated write_zeros, seek_tell, apply_read_scaling, @@ -45,17 +46,18 @@ rec2dict, _dt_min_max, _write_data, + _ftype4scaled_finite, ) from ..openers import Opener, BZ2File from ..casting import (floor_log2, type_info, OK_FLOATS, shared_range) +from ..deprecator import ExpiredDeprecationError + from numpy.testing import (assert_array_almost_equal, assert_array_equal) +import pytest -from nose.tools import assert_true, assert_false, assert_equal, assert_raises - -from ..testing import (assert_dt_equal, assert_allclose_safely, - suppress_warnings, clear_and_catch_warnings) +from ..testing import assert_dt_equal, assert_allclose_safely, suppress_warnings #: convenience variables for numpy types FLOAT_TYPES = np.sctypes['float'] @@ -66,6 +68,15 @@ NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES +def test_deprecated_functions(): + with pytest.raises(ExpiredDeprecationError): + scale_min_max(0, 1, np.uint8, True) + with pytest.raises(ExpiredDeprecationError): + calculate_scale(np.array([-2, -1], dtype=np.int8), np.uint8, True) + with pytest.raises(ExpiredDeprecationError): + can_cast(np.float32, np.float32) + + def test__is_compressed_fobj(): # _is_compressed helper function with InTemporaryDirectory(): @@ -75,7 +86,7 @@ def test__is_compressed_fobj(): fname = 'test.bin' + ext for mode in ('wb', 'rb'): fobj = opener(fname, mode) - assert_equal(_is_compressed_fobj(fobj), compressed) + assert _is_compressed_fobj(fobj) == compressed fobj.close() @@ -106,20 +117,20 @@ def make_array(n, bytes): contents1 = bytearray(4 * n) fobj_r.readinto(contents1) # Second element is 1 - assert_false(contents1[0:8] == b'\x00' * 8) + assert contents1[0:8] != b'\x00' * 8 out_arr = make_array(n, contents1) assert_array_equal(in_arr, out_arr) # Set second element to 0 out_arr[1] = 0 # Show this changed the bytes string - assert_equal(contents1[:8], b'\x00' * 8) + assert contents1[:8] == b'\x00' * 8 # Reread, to get unmodified contents fobj_r.seek(0) contents2 = bytearray(4 * n) fobj_r.readinto(contents2) out_arr2 = make_array(n, contents2) assert_array_equal(in_arr, out_arr2) - assert_equal(out_arr[1], 0) + assert out_arr[1] == 0 finally: fobj_r.close() os.unlink(fname) @@ -131,30 +142,30 @@ def test_array_from_file(): in_arr = np.arange(24, dtype=dtype).reshape(shape) # Check on string buffers offset = 0 - assert_true(buf_chk(in_arr, BytesIO(), None, offset)) + assert buf_chk(in_arr, BytesIO(), None, offset) offset = 10 - assert_true(buf_chk(in_arr, BytesIO(), None, offset)) + assert buf_chk(in_arr, BytesIO(), None, offset) # check on real file fname = 'test.bin' with InTemporaryDirectory(): # fortran ordered out_buf = open(fname, 'wb') in_buf = open(fname, 'rb') - assert_true(buf_chk(in_arr, out_buf, in_buf, offset)) + assert buf_chk(in_arr, out_buf, in_buf, offset) # Drop offset to check that shape's not coming from file length out_buf.seek(0) in_buf.seek(0) offset = 5 - assert_true(buf_chk(in_arr, out_buf, in_buf, offset)) + assert buf_chk(in_arr, out_buf, in_buf, offset) del out_buf, in_buf # Make sure empty shape, and zero length, give empty arrays arr = array_from_file((), np.dtype('f8'), BytesIO()) - assert_equal(len(arr), 0) + assert len(arr) == 0 arr = array_from_file((0,), np.dtype('f8'), BytesIO()) - assert_equal(len(arr), 0) + assert len(arr) == 0 # Check error from small file - assert_raises(IOError, array_from_file, - shape, dtype, BytesIO()) + with pytest.raises(IOError): + array_from_file(shape, dtype, BytesIO()) # check on real file fd, fname = tempfile.mkstemp() with InTemporaryDirectory(): @@ -162,8 +173,8 @@ def test_array_from_file(): in_buf = open(fname, 'rb') # For windows this will raise a WindowsError from mmap, Unices # appear to raise an IOError - assert_raises(Exception, array_from_file, - shape, dtype, in_buf) + with pytest.raises(Exception): + array_from_file(shape, dtype, in_buf) del in_buf @@ -178,35 +189,35 @@ def test_array_from_file_mmap(): with open('test.bin', 'rb') as fobj: res = array_from_file(shape, dt, fobj) assert_array_equal(res, arr) - assert_true(isinstance(res, np.memmap)) - assert_equal(res.mode, 'c') + assert isinstance(res, np.memmap) + assert res.mode == 'c' with open('test.bin', 'rb') as fobj: res = array_from_file(shape, dt, fobj, mmap=True) assert_array_equal(res, arr) - assert_true(isinstance(res, np.memmap)) - assert_equal(res.mode, 'c') + assert isinstance(res, np.memmap) + assert res.mode == 'c' with open('test.bin', 'rb') as fobj: res = array_from_file(shape, dt, fobj, mmap='c') assert_array_equal(res, arr) - assert_true(isinstance(res, np.memmap)) - assert_equal(res.mode, 'c') + assert isinstance(res, np.memmap) + assert res.mode == 'c' with open('test.bin', 'rb') as fobj: res = array_from_file(shape, dt, fobj, mmap='r') assert_array_equal(res, arr) - assert_true(isinstance(res, np.memmap)) - assert_equal(res.mode, 'r') + assert isinstance(res, np.memmap) + assert res.mode == 'r' with open('test.bin', 'rb+') as fobj: res = array_from_file(shape, dt, fobj, mmap='r+') assert_array_equal(res, arr) - assert_true(isinstance(res, np.memmap)) - assert_equal(res.mode, 'r+') + assert isinstance(res, np.memmap) + assert res.mode == 'r+' with open('test.bin', 'rb') as fobj: res = array_from_file(shape, dt, fobj, mmap=False) assert_array_equal(res, arr) - assert_false(isinstance(res, np.memmap)) + assert not isinstance(res, np.memmap) with open('test.bin', 'rb') as fobj: - assert_raises(ValueError, - array_from_file, shape, dt, fobj, mmap='p') + with pytest.raises(ValueError): + array_from_file(shape, dt, fobj, mmap='p') def buf_chk(in_arr, out_buf, in_buf, offset): @@ -274,7 +285,7 @@ def test_array_from_file_reread(): out_arr = array_from_file(shape, dtt, fobj_r, offset, order) assert_array_equal(in_arr, out_arr) out_arr[..., 0] = -1 - assert_false(np.allclose(in_arr, out_arr)) + assert not np.allclose(in_arr, out_arr) out_arr2 = array_from_file(shape, dtt, fobj_r, offset, order) assert_array_equal(in_arr, out_arr2) finally: @@ -293,9 +304,7 @@ def test_array_to_file(): for code in '<>': ndt = dt.newbyteorder(code) for allow_intercept in (True, False): - with suppress_warnings(): # deprecated - scale, intercept, mn, mx = \ - calculate_scale(arr, ndt, allow_intercept) + scale, intercept, mn, mx = _calculate_scale(arr, ndt, allow_intercept) data_back = write_return(arr, str_io, ndt, 0, intercept, scale) assert_array_almost_equal(arr, data_back) @@ -334,7 +343,7 @@ def test_a2f_upscale(): back = apply_read_scaling(raw, slope, inter) top = back - arr score = np.abs(top / arr) - assert_true(np.all(score < 10)) + assert np.all(score < 10) def test_a2f_min_max(): @@ -545,13 +554,12 @@ def test_a2f_scaled_unscaled(): nan_fill = np.round(nan_fill) # nan2zero will check whether 0 in scaled to a valid value in output if (in_dtype in CFLOAT_TYPES and not mn_out <= nan_fill <= mx_out): - assert_raises(ValueError, - array_to_file, - arr, - fobj, - out_dtype=out_dtype, - divslope=divslope, - intercept=intercept) + with pytest.raises(ValueError): + array_to_file(arr, + fobj, + out_dtype=out_dtype, + divslope=divslope, + intercept=intercept) continue with suppress_warnings(): back_arr = write_return(arr, fobj, @@ -623,13 +631,12 @@ def test_a2f_bad_scaling(): intercept=inter, divslope=slope)) else: - assert_raises(ValueError, - array_to_file, - arr, - fobj, - np.int8, - intercept=inter, - divslope=slope) + with pytest.raises(ValueError): + array_to_file(arr, + fobj, + np.int8, + intercept=inter, + divslope=slope) def test_a2f_nan2zero_range(): @@ -666,8 +673,10 @@ def test_a2f_nan2zero_range(): # Errors from datatype threshold after scaling back_arr = write_return(arr, fobj, np.int8, intercept=128) assert_array_equal([-128, -128, -127, -128], back_arr) - assert_raises(ValueError, write_return, arr, fobj, np.int8, intercept=129) - assert_raises(ValueError, write_return, arr_no_nan, fobj, np.int8, intercept=129) + with pytest.raises(ValueError): + write_return(arr, fobj, np.int8, intercept=129) + with pytest.raises(ValueError): + write_return(arr_no_nan, fobj, np.int8, intercept=129) # OK with nan2zero false, but we get whatever nan casts to nan_cast = np.array(np.nan).astype(np.int8) back_arr = write_return(arr, fobj, np.int8, intercept=129, nan2zero=False) @@ -675,10 +684,10 @@ def test_a2f_nan2zero_range(): # divslope back_arr = write_return(arr, fobj, np.int8, intercept=256, divslope=2) assert_array_equal([-128, -128, -128, -128], back_arr) - assert_raises(ValueError, write_return, arr, fobj, np.int8, - intercept=257.1, divslope=2) - assert_raises(ValueError, write_return, arr_no_nan, fobj, np.int8, - intercept=257.1, divslope=2) + with pytest.raises(ValueError): + write_return(arr, fobj, np.int8, intercept=257.1, divslope=2) + with pytest.raises(ValueError): + write_return(arr_no_nan, fobj, np.int8, intercept=257.1, divslope=2) # OK with nan2zero false back_arr = write_return(arr, fobj, np.int8, intercept=257.1, divslope=2, nan2zero=False) @@ -697,14 +706,16 @@ def test_a2f_non_numeric(): # Some versions of numpy can cast structured types to float, others not try: arr.astype(float) - except ValueError: + except (TypeError, ValueError): pass else: back_arr = write_return(arr, fobj, float) assert_array_equal(back_arr, arr.astype(float)) # mn, mx never work for structured types - assert_raises(ValueError, write_return, arr, fobj, float, mn=0) - assert_raises(ValueError, write_return, arr, fobj, float, mx=10) + with pytest.raises(ValueError): + write_return(arr, fobj, float, mn=0) + with pytest.raises(ValueError): + write_return(arr, fobj, float, mx=10) def write_return(data, fileobj, out_dtype, *args, **kwargs): @@ -718,42 +729,39 @@ def write_return(data, fileobj, out_dtype, *args, **kwargs): def test_apply_scaling(): # Null scaling, same array returned arr = np.zeros((3,), dtype=np.int16) - assert_true(apply_read_scaling(arr) is arr) - assert_true(apply_read_scaling(arr, np.float64(1.0)) is arr) - assert_true(apply_read_scaling(arr, inter=np.float64(0)) is arr) + assert apply_read_scaling(arr) is arr + assert apply_read_scaling(arr, np.float64(1.0)) is arr + assert apply_read_scaling(arr, inter=np.float64(0)) is arr f32, f64 = np.float32, np.float64 f32_arr = np.zeros((1,), dtype=f32) i16_arr = np.zeros((1,), dtype=np.int16) # Check float upcast (not the normal numpy scalar rule) # This is the normal rule - no upcast from scalar - assert_equal((f32_arr * f64(1)).dtype, np.float32) - assert_equal((f32_arr + f64(1)).dtype, np.float32) + assert (f32_arr * f64(1)).dtype == np.float32 + assert (f32_arr + f64(1)).dtype == np.float32 # The function does upcast though ret = apply_read_scaling(np.float32(0), np.float64(2)) - assert_equal(ret.dtype, np.float64) + assert ret.dtype == np.float64 ret = apply_read_scaling(np.float32(0), inter=np.float64(2)) - assert_equal(ret.dtype, np.float64) + assert ret.dtype == np.float64 # Check integer inf upcast big = f32(type_info(f32)['max']) # Normally this would not upcast - assert_equal((i16_arr * big).dtype, np.float32) + assert (i16_arr * big).dtype == np.float32 # An equivalent case is a little hard to find for the intercept nmant_32 = type_info(np.float32)['nmant'] big_delta = np.float32(2**(floor_log2(big) - nmant_32)) - assert_equal((i16_arr * big_delta + big).dtype, np.float32) + assert (i16_arr * big_delta + big).dtype == np.float32 # Upcasting does occur with this routine - assert_equal(apply_read_scaling(i16_arr, big).dtype, np.float64) - assert_equal(apply_read_scaling(i16_arr, big_delta, big).dtype, np.float64) + assert apply_read_scaling(i16_arr, big).dtype == np.float64 + assert apply_read_scaling(i16_arr, big_delta, big).dtype == np.float64 # If float32 passed, no overflow, float32 returned - assert_equal(apply_read_scaling(np.int8(0), f32(-1.0), f32(0.0)).dtype, - np.float32) + assert apply_read_scaling(np.int8(0), f32(-1.0), f32(0.0)).dtype == np.float32 # float64 passed, float64 returned - assert_equal(apply_read_scaling(np.int8(0), -1.0, 0.0).dtype, np.float64) + assert apply_read_scaling(np.int8(0), -1.0, 0.0).dtype == np.float64 # float32 passed, overflow, float64 returned - assert_equal(apply_read_scaling(np.int8(0), f32(1e38), f32(0.0)).dtype, - np.float64) - assert_equal(apply_read_scaling(np.int8(0), f32(-1e38), f32(0.0)).dtype, - np.float64) + assert apply_read_scaling(np.int8(0), f32(1e38), f32(0.0)).dtype == np.float64 + assert apply_read_scaling(np.int8(0), f32(-1e38), f32(0.0)).dtype == np.float64 # Non-zero intercept still generates floats assert_dt_equal(apply_read_scaling(i16_arr, 1.0, 1.0).dtype, float) assert_dt_equal(apply_read_scaling( @@ -772,7 +780,7 @@ def test_apply_read_scaling_ints(): def test_apply_read_scaling_nones(): # Check that we can pass None as slope and inter to apply read scaling - arr = np.arange(10, dtype=np.int16) + arr=np.arange(10, dtype=np.int16) assert_array_equal(apply_read_scaling(arr, None, None), arr) assert_array_equal(apply_read_scaling(arr, 2, None), arr * 2) assert_array_equal(apply_read_scaling(arr, None, 1), arr + 1) @@ -780,10 +788,10 @@ def test_apply_read_scaling_nones(): def test_int_scinter(): # Finding float type needed for applying scale, offset to ints - assert_equal(int_scinter_ftype(np.int8, 1.0, 0.0), np.float32) - assert_equal(int_scinter_ftype(np.int8, -1.0, 0.0), np.float32) - assert_equal(int_scinter_ftype(np.int8, 1e38, 0.0), np.float64) - assert_equal(int_scinter_ftype(np.int8, -1e38, 0.0), np.float64) + assert int_scinter_ftype(np.int8, 1.0, 0.0) == np.float32 + assert int_scinter_ftype(np.int8, -1.0, 0.0) == np.float32 + assert int_scinter_ftype(np.int8, 1e38, 0.0) == np.float64 + assert int_scinter_ftype(np.int8, -1e38, 0.0) == np.float64 def test_working_type(): @@ -795,29 +803,29 @@ def wt(*args, **kwargs): d1 = np.atleast_1d for in_type in NUMERIC_TYPES: in_ts = np.dtype(in_type).str - assert_equal(wt(in_type), in_ts) - assert_equal(wt(in_type, 1, 0), in_ts) - assert_equal(wt(in_type, 1.0, 0.0), in_ts) + assert wt(in_type) == in_ts + assert wt(in_type, 1, 0) == in_ts + assert wt(in_type, 1.0, 0.0) == in_ts in_val = d1(in_type(0)) for slope_type in NUMERIC_TYPES: sl_val = slope_type(1) # no scaling, regardless of type - assert_equal(wt(in_type, sl_val, 0.0), in_ts) + assert wt(in_type, sl_val, 0.0) == in_ts sl_val = slope_type(2) # actual scaling out_val = in_val / d1(sl_val) - assert_equal(wt(in_type, sl_val), out_val.dtype.str) + assert wt(in_type, sl_val) == out_val.dtype.str for inter_type in NUMERIC_TYPES: i_val = inter_type(0) # no scaling, regardless of type - assert_equal(wt(in_type, 1, i_val), in_ts) + assert wt(in_type, 1, i_val) == in_ts i_val = inter_type(1) # actual scaling out_val = in_val - d1(i_val) - assert_equal(wt(in_type, 1, i_val), out_val.dtype.str) + assert wt(in_type, 1, i_val) == out_val.dtype.str # Combine scaling and intercept out_val = (in_val - d1(i_val)) / d1(sl_val) - assert_equal(wt(in_type, sl_val, i_val), out_val.dtype.str) + assert wt(in_type, sl_val, i_val) == out_val.dtype.str # Confirm that type codes and dtypes work as well f32s = np.dtype(np.float32).str - assert_equal(wt('f4', 1, 0), f32s) - assert_equal(wt(np.dtype('f4'), 1, 0), f32s) + assert wt('f4', 1, 0) == f32s + assert wt(np.dtype('f4'), 1, 0) == f32s def test_better_float(): @@ -826,18 +834,16 @@ def check_against(f1, f2): return f1 if FLOAT_TYPES.index(f1) >= FLOAT_TYPES.index(f2) else f2 for first in FLOAT_TYPES: for other in IUINT_TYPES + np.sctypes['complex']: - assert_equal(better_float_of(first, other), first) - assert_equal(better_float_of(other, first), first) + assert better_float_of(first, other) == first + assert better_float_of(other, first) == first for other2 in IUINT_TYPES + np.sctypes['complex']: - assert_equal(better_float_of(other, other2), np.float32) - assert_equal(better_float_of(other, other2, np.float64), - np.float64) + assert better_float_of(other, other2) == np.float32 + assert better_float_of(other, other2, np.float64) == np.float64 for second in FLOAT_TYPES: - assert_equal(better_float_of(first, second), - check_against(first, second)) + assert better_float_of(first, second) == check_against(first, second) # Check codes and dtypes work - assert_equal(better_float_of('f4', 'f8', 'f4'), np.float64) - assert_equal(better_float_of('i4', 'i8', 'f8'), np.float64) + assert better_float_of('f4', 'f8', 'f4') == np.float64 + assert better_float_of('i4', 'i8', 'f8') == np.float64 def test_best_write_scale_ftype(): @@ -846,14 +852,10 @@ def test_best_write_scale_ftype(): # Return float type cannot be less capable than the input array type for dtt in IUINT_TYPES + FLOAT_TYPES: arr = np.arange(10, dtype=dtt) - assert_equal(best_write_scale_ftype(arr, 1, 0), - better_float_of(dtt, np.float32)) - assert_equal(best_write_scale_ftype(arr, 1, 0, np.float64), - better_float_of(dtt, np.float64)) - assert_equal(best_write_scale_ftype(arr, np.float32(2), 0), - better_float_of(dtt, np.float32)) - assert_equal(best_write_scale_ftype(arr, 1, np.float32(1)), - better_float_of(dtt, np.float32)) + assert best_write_scale_ftype(arr, 1, 0) == better_float_of(dtt, np.float32) + assert best_write_scale_ftype(arr, 1, 0, np.float64) == better_float_of(dtt, np.float64) + assert best_write_scale_ftype(arr, np.float32(2), 0) == better_float_of(dtt, np.float32) + assert best_write_scale_ftype(arr, 1, np.float32(1)) == better_float_of(dtt, np.float32) # Overflowing ints with scaling results in upcast best_vals = ((np.float32, np.float64),) if np.longdouble in OK_FLOATS: @@ -866,55 +868,33 @@ def test_best_write_scale_ftype(): big_delta = lower_t(2**(floor_log2(t_max) - nmant)) # delta below max # Even large values that don't overflow don't change output arr = np.array([0, t_max], dtype=lower_t) - assert_equal(best_write_scale_ftype(arr, 1, 0), lower_t) + assert best_write_scale_ftype(arr, 1, 0) == lower_t # Scaling > 1 reduces output values, so no upcast needed - assert_equal(best_write_scale_ftype(arr, lower_t(1.01), 0), lower_t) + assert best_write_scale_ftype(arr, lower_t(1.01), 0) == lower_t # Scaling < 1 increases values, so upcast may be needed (and is here) - assert_equal(best_write_scale_ftype(arr, lower_t(0.99), 0), higher_t) + assert best_write_scale_ftype(arr, lower_t(0.99), 0) == higher_t # Large minus offset on large array can cause upcast - assert_equal(best_write_scale_ftype(arr, 1, -big_delta / 2.01), lower_t) - assert_equal(best_write_scale_ftype(arr, 1, -big_delta / 2.0), higher_t) + assert best_write_scale_ftype(arr, 1, -big_delta / 2.01) == lower_t + assert best_write_scale_ftype(arr, 1, -big_delta / 2.0) == higher_t # With infs already in input, default type returns arr[0] = np.inf - assert_equal(best_write_scale_ftype(arr, lower_t(0.5), 0), lower_t) + assert best_write_scale_ftype(arr, lower_t(0.5), 0) == lower_t arr[0] = -np.inf - assert_equal(best_write_scale_ftype(arr, lower_t(0.5), 0), lower_t) - - -def test_can_cast(): - tests = ((np.float32, np.float32, True, True, True), - (np.float64, np.float32, True, True, True), - (np.complex128, np.float32, False, False, False), - (np.float32, np.complex128, True, True, True), - (np.float32, np.uint8, False, True, True), - (np.uint32, np.complex128, True, True, True), - (np.int64, np.float32, True, True, True), - (np.complex128, np.int16, False, False, False), - (np.float32, np.int16, False, True, True), - (np.uint8, np.int16, True, True, True), - (np.uint16, np.int16, False, True, True), - (np.int16, np.uint16, False, False, True), - (np.int8, np.uint16, False, False, True), - (np.uint16, np.uint8, False, True, True), - ) - for intype, outtype, def_res, scale_res, all_res in tests: - assert_equal(def_res, can_cast(intype, outtype)) - assert_equal(scale_res, can_cast(intype, outtype, False, True)) - assert_equal(all_res, can_cast(intype, outtype, True, True)) + assert best_write_scale_ftype(arr, lower_t(0.5), 0) == lower_t def test_write_zeros(): bio = BytesIO() write_zeros(bio, 10000) - assert_equal(bio.getvalue(), b'\x00' * 10000) + assert bio.getvalue() == b'\x00' * 10000 bio.seek(0) bio.truncate(0) write_zeros(bio, 10000, 256) - assert_equal(bio.getvalue(), b'\x00' * 10000) + assert bio.getvalue() == b'\x00' * 10000 bio.seek(0) bio.truncate(0) write_zeros(bio, 200, 256) - assert_equal(bio.getvalue(), b'\x00' * 200) + assert bio.getvalue() == b'\x00' * 200 def test_seek_tell(): @@ -931,13 +911,13 @@ def test_seek_tell(): bio.seek(0) # First write the file with ImageOpener(in_file, 'wb') as fobj: - assert_equal(fobj.tell(), 0) + assert fobj.tell() == 0 # already at position - OK st(fobj, 0) - assert_equal(fobj.tell(), 0) + assert fobj.tell() == 0 # Move position by writing fobj.write(b'\x01' * start) - assert_equal(fobj.tell(), start) + assert fobj.tell() == start # Files other than BZ2Files can seek forward on write, leaving # zeros in their wake. BZ2Files can't seek when writing, unless # we enable the write0 flag to seek_tell @@ -946,40 +926,40 @@ def test_seek_tell(): fobj.write(b'\x00' * diff) else: st(fobj, end) - assert_equal(fobj.tell(), end) + assert fobj.tell() == end # Write tail fobj.write(b'\x02' * tail) bio.seek(0) # Now read back the file testing seek_tell in reading mode with ImageOpener(in_file, 'rb') as fobj: - assert_equal(fobj.tell(), 0) + assert fobj.tell() == 0 st(fobj, 0) - assert_equal(fobj.tell(), 0) + assert fobj.tell() == 0 st(fobj, start) - assert_equal(fobj.tell(), start) + assert fobj.tell() == start st(fobj, end) - assert_equal(fobj.tell(), end) + assert fobj.tell() == end # Seek anywhere works in read mode for all files st(fobj, 0) bio.seek(0) # Check we have the expected written output with ImageOpener(in_file, 'rb') as fobj: - assert_equal(fobj.read(), - b'\x01' * start + b'\x00' * diff + b'\x02' * tail) + assert fobj.read() == b'\x01' * start + b'\x00' * diff + b'\x02' * tail for in_file in ('test2.gz', 'test2.bz2'): # Check failure of write seek backwards with ImageOpener(in_file, 'wb') as fobj: fobj.write(b'g' * 10) - assert_equal(fobj.tell(), 10) + assert fobj.tell() == 10 seek_tell(fobj, 10) - assert_equal(fobj.tell(), 10) - assert_raises(IOError, seek_tell, fobj, 5) + assert fobj.tell() == 10 + with pytest.raises(IOError): + seek_tell(fobj, 5) # Make sure read seeks don't affect file with ImageOpener(in_file, 'rb') as fobj: seek_tell(fobj, 10) seek_tell(fobj, 0) with ImageOpener(in_file, 'rb') as fobj: - assert_equal(fobj.read(), b'g' * 10) + assert fobj.read() == b'g' * 10 def test_seek_tell_logic(): @@ -987,7 +967,7 @@ def test_seek_tell_logic(): # Seek works? OK bio = BytesIO() seek_tell(bio, 10) - assert_equal(bio.tell(), 10) + assert bio.tell() == 10 class BabyBio(BytesIO): @@ -995,17 +975,19 @@ def seek(self, *args): raise IOError() bio = BabyBio() # Fresh fileobj, position 0, can't seek - error - assert_raises(IOError, bio.seek, 10) + with pytest.raises(IOError): + bio.seek(10) # Put fileobj in correct position by writing ZEROB = b'\x00' bio.write(ZEROB * 10) seek_tell(bio, 10) # already there, nothing to do - assert_equal(bio.tell(), 10) - assert_equal(bio.getvalue(), ZEROB * 10) + assert bio.tell() == 10 + assert bio.getvalue() == ZEROB * 10 # Try write zeros to get to new position - assert_raises(IOError, bio.seek, 20) + with pytest.raises(IOError): + bio.seek(20) seek_tell(bio, 20, write0=True) - assert_equal(bio.getvalue(), ZEROB * 20) + assert bio.getvalue() == ZEROB * 20 def test_fname_ext_ul_case(): @@ -1018,42 +1000,41 @@ def test_fname_ext_ul_case(): with open('bfile.txt', 'wt') as fobj: fobj.write('More interesting information') # If there is no file, the case doesn't change - assert_equal(fname_ext_ul_case('nofile.txt'), 'nofile.txt') - assert_equal(fname_ext_ul_case('nofile.TXT'), 'nofile.TXT') + assert fname_ext_ul_case('nofile.txt') == 'nofile.txt' + assert fname_ext_ul_case('nofile.TXT') == 'nofile.TXT' # If there is a file, accept upper or lower case for ext if os_cares_case: - assert_equal(fname_ext_ul_case('afile.txt'), 'afile.TXT') - assert_equal(fname_ext_ul_case('bfile.TXT'), 'bfile.txt') + assert fname_ext_ul_case('afile.txt') == 'afile.TXT' + assert fname_ext_ul_case('bfile.TXT') == 'bfile.txt' else: - assert_equal(fname_ext_ul_case('afile.txt'), 'afile.txt') - assert_equal(fname_ext_ul_case('bfile.TXT'), 'bfile.TXT') - assert_equal(fname_ext_ul_case('afile.TXT'), 'afile.TXT') - assert_equal(fname_ext_ul_case('bfile.txt'), 'bfile.txt') + assert fname_ext_ul_case('afile.txt') == 'afile.txt' + assert fname_ext_ul_case('bfile.TXT') == 'bfile.TXT' + assert fname_ext_ul_case('afile.TXT') == 'afile.TXT' + assert fname_ext_ul_case('bfile.txt') == 'bfile.txt' # Not mixed case though - assert_equal(fname_ext_ul_case('afile.TxT'), 'afile.TxT') + assert fname_ext_ul_case('afile.TxT') == 'afile.TxT' def test_allopen(): # This import into volumeutils is for compatibility. The code is the # ``openers`` module. - with clear_and_catch_warnings() as w: - warnings.filterwarnings('once', category=DeprecationWarning) + with pytest.deprecated_call() as w: # Test default mode is 'rb' fobj = allopen(__file__) # Check we got the deprecation warning - assert_equal(len(w), 1) - assert_equal(fobj.mode, 'rb') + assert len(w) == 1 + assert fobj.mode == 'rb' # That we can set it fobj = allopen(__file__, 'r') - assert_equal(fobj.mode, 'r') + assert fobj.mode == 'r' # with keyword arguments fobj = allopen(__file__, mode='r') - assert_equal(fobj.mode, 'r') + assert fobj.mode == 'r' # fileobj returns fileobj msg = b'tiddle pom' sobj = BytesIO(msg) fobj = allopen(sobj) - assert_equal(fobj.read(), msg) + assert fobj.read() == msg # mode is gently ignored fobj = allopen(sobj, mode='r') @@ -1069,7 +1050,7 @@ def test_allopen_compresslevel(): # Stash module global from .. import volumeutils as vu original_compress_level = vu.default_compresslevel - assert_equal(original_compress_level, 1) + assert original_compress_level == 1 try: with InTemporaryDirectory(): for compresslevel in ('default', 1, 9): @@ -1082,8 +1063,8 @@ def test_allopen_compresslevel(): with open(fname, 'rb') as fobj: my_selves_smaller = fobj.read() sizes[compresslevel] = len(my_selves_smaller) - assert_equal(sizes['default'], sizes[1]) - assert_true(sizes[1] > sizes[9]) + assert sizes['default'] == sizes[1] + assert sizes[1] > sizes[9] finally: vu.default_compresslevel = original_compress_level @@ -1114,7 +1095,7 @@ def test_shape_zoom_affine(): def test_rec2dict(): r = np.zeros((), dtype=[('x', 'i4'), ('s', 'S10')]) d = rec2dict(r) - assert_equal(d, {'x': 0, 's': b''}) + assert d == {'x': 0, 's': b''} def test_dtypes(): @@ -1128,35 +1109,37 @@ def test_dtypes(): dt_defs = ((16, 'float32', np.float32),) dtr = make_dt_codes(dt_defs) # check we have the fields we were expecting - assert_equal(dtr.value_set(), set((16,))) - assert_equal(dtr.fields, ('code', 'label', 'type', - 'dtype', 'sw_dtype')) + assert dtr.value_set() == set((16,)) + assert dtr.fields == ('code', 'label', 'type', 'dtype', 'sw_dtype') # These of course should pass regardless of dtype - assert_equal(dtr[np.float32], 16) - assert_equal(dtr['float32'], 16) + assert dtr[np.float32] == 16 + assert dtr['float32'] == 16 # These also pass despite dtype issue - assert_equal(dtr[np.dtype(np.float32)], 16) - assert_equal(dtr[np.dtype('f4')], 16) - assert_equal(dtr[np.dtype('f4').newbyteorder('S')], 16) + assert dtr[np.dtype(np.float32)] == 16 + assert dtr[np.dtype('f4')] == 16 + assert dtr[np.dtype('f4').newbyteorder('S')] == 16 # But this one used to fail - assert_equal(dtr[np.dtype('f4').newbyteorder(native_code)], 16) + assert dtr[np.dtype('f4').newbyteorder(native_code)] == 16 # Check we can pass in niistring as well dt_defs = ((16, 'float32', np.float32, 'ASTRING'),) dtr = make_dt_codes(dt_defs) - assert_equal(dtr[np.dtype('f4').newbyteorder('S')], 16) - assert_equal(dtr.value_set(), set((16,))) - assert_equal(dtr.fields, ('code', 'label', 'type', 'niistring', - 'dtype', 'sw_dtype')) - assert_equal(dtr.niistring[16], 'ASTRING') + assert dtr[np.dtype('f4').newbyteorder('S')] == 16 + assert dtr.value_set() == set((16,)) + assert dtr.fields == ('code', 'label', 'type', 'niistring', 'dtype', + 'sw_dtype') + assert dtr.niistring[16] == 'ASTRING' # And that unequal elements raises error dt_defs = ((16, 'float32', np.float32, 'ASTRING'), (16, 'float32', np.float32)) - assert_raises(ValueError, make_dt_codes, dt_defs) + with pytest.raises(ValueError): + make_dt_codes(dt_defs) # And that 2 or 5 elements raises error dt_defs = ((16, 'float32'),) - assert_raises(ValueError, make_dt_codes, dt_defs) + with pytest.raises(ValueError): + make_dt_codes(dt_defs) dt_defs = ((16, 'float32', np.float32, 'ASTRING', 'ANOTHERSTRING'),) - assert_raises(ValueError, make_dt_codes, dt_defs) + with pytest.raises(ValueError): + make_dt_codes(dt_defs) def test__write_data(): @@ -1241,7 +1224,101 @@ def read(self, n_bytes): array_from_file(shape, np.int8, NoStringIO()) except IOError as err: message = str(err) - assert_equal(message, - 'Expected {0} bytes, got {1} bytes from {2}\n' - ' - could the file be damaged?'.format( - 11390625000000000000, 0, 'object')) + assert message == 'Expected {0} bytes, got {1} bytes from {2}\n' \ + ' - could the file be damaged?'.format( + 11390625000000000000, 0, 'object') + + +def test__ftype4scaled_finite_warningfilters(): + # This test checks our ability to properly manage the thread-unsafe + # warnings filter list. + + # _ftype4scaled_finite always operates on one-or-two element arrays + # Ensure that an overflow will happen for < float64 + finfo = np.finfo(np.float32) + tst_arr = np.array((finfo.min, finfo.max), dtype=np.float32) + + go = threading.Event() + stop = threading.Event() + err = [] + + class MakeTotalDestroy(threading.Thread): + def run(self): + # Restore the warnings filters when we're done testing + with warnings.catch_warnings(): + go.set() + while not stop.is_set(): + warnings.filters[:] = [] + time.sleep(0) + + class CheckScaling(threading.Thread): + def run(self): + go.wait() + # Give ourselves a few bites at the apple + # 200 loops through the function takes ~10ms + # The highest number of iterations I've seen before hitting interference + # is 131, with 99% under 30, so this should be reasonably reliable. + for i in range(200): + try: + # Use float16 to ensure two failures and increase time in function + _ftype4scaled_finite(tst_arr, 2.0, 1.0, default=np.float16) + except Exception as e: + err.append(e) + break + stop.set() + + thread_a = CheckScaling() + thread_b = MakeTotalDestroy() + thread_a.start() + thread_b.start() + thread_a.join() + thread_b.join() + + if err: + raise err[0] + + +def _calculate_scale(data, out_dtype, allow_intercept): + ''' Calculate scaling and optional intercept for data + + Copy of the deprecated volumeutils.calculate_scale, to preserve tests + + Parameters + ---------- + data : array + out_dtype : dtype + output data type in some form understood by ``np.dtype`` + allow_intercept : bool + If True allow non-zero intercept + + Returns + ------- + scaling : None or float + scalefactor to divide into data. None if no valid data + intercept : None or float + intercept to subtract from data. None if no valid data + mn : None or float + minimum of finite value in data or None if this will not + be used to threshold data + mx : None or float + minimum of finite value in data, or None if this will not + be used to threshold data + ''' + # Code here is a compatibility shell around arraywriters refactor + in_dtype = data.dtype + out_dtype = np.dtype(out_dtype) + if np.can_cast(in_dtype, out_dtype): + return 1.0, 0.0, None, None + from ..arraywriters import make_array_writer, WriterError, get_slope_inter + try: + writer = make_array_writer(data, out_dtype, True, allow_intercept) + except WriterError as e: + raise ValueError(str(e)) + if out_dtype.kind in 'fc': + return (1.0, 0.0, None, None) + mn, mx = writer.finite_range() + if (mn, mx) == (np.inf, -np.inf): # No valid data + return (None, None, None, None) + if in_dtype.kind not in 'fc': + mn, mx = (None, None) + return get_slope_inter(writer) + (mn, mx) diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 5e307067ab..26e04dd8f9 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -26,8 +26,7 @@ import logging import numpy as np -from io import BytesIO -from six import StringIO +from io import BytesIO, StringIO from ..wrapstruct import WrapStructError, WrapStruct, LabeledWrapStruct from ..batteryrunners import Report @@ -35,12 +34,11 @@ from ..spatialimages import HeaderDataError from .. import imageglobals -from unittest import TestCase +from ..testing import BaseTestCase from numpy.testing import assert_array_equal +import pytest -from ..testing import (assert_equal, assert_true, assert_false, - assert_raises, assert_not_equal) INTEGER_TYPES = np.sctypes['int'] + np.sctypes['uint'] @@ -81,7 +79,7 @@ def log_chk(hdr, level): if level == 0: # Should never log or raise error logger.setLevel(0) hdrc.check_fix(logger=logger, error_level=0) - assert_equal(str_io.getvalue(), '') + assert str_io.getvalue() == '' logger.removeHandler(handler) return hdrc, '', () # Non zero defect level, test above and below threshold. @@ -91,12 +89,12 @@ def log_chk(hdr, level): # Logging level above threshold, no log. logger.setLevel(level + 1) hdrc.check_fix(logger=logger, error_level=e_lev) - assert_equal(str_io.getvalue(), '') + assert str_io.getvalue() == '' # Logging level below threshold, log appears, store logged message logger.setLevel(level - 1) hdrc = hdr.copy() hdrc.check_fix(logger=logger, error_level=e_lev) - assert_true(str_io.getvalue() != '') + assert str_io.getvalue() != '' message = str_io.getvalue().strip() logger.removeHandler(handler) # When error level == level, check_fix should raise an error @@ -108,7 +106,7 @@ def log_chk(hdr, level): return hdrc, message, raiser -class _TestWrapStructBase(TestCase): +class _TestWrapStructBase(BaseTestCase): ''' Class implements base tests for binary headers It serves as a base class for other binary header tests @@ -124,12 +122,12 @@ def test_general_init(self): hdr = self.header_class() # binaryblock has length given by header data dtype binblock = hdr.binaryblock - assert_equal(len(binblock), hdr.structarr.dtype.itemsize) + assert len(binblock) == hdr.structarr.dtype.itemsize # Endianness will be native by default for empty header - assert_equal(hdr.endianness, native_code) + assert hdr.endianness == native_code # But you can change this if you want hdr = self.header_class(endianness='swapped') - assert_equal(hdr.endianness, swapped_code) + assert hdr.endianness == swapped_code # You can also pass in a check flag, without data this has no # effect hdr = self.header_class(check=False) @@ -142,17 +140,17 @@ def test__eq__(self): # Test equal and not equal hdr1 = self.header_class() hdr2 = self.header_class() - assert_equal(hdr1, hdr2) + assert hdr1 == hdr2 self._set_something_into_hdr(hdr1) - assert_not_equal(hdr1, hdr2) + assert hdr1 != hdr2 self._set_something_into_hdr(hdr2) - assert_equal(hdr1, hdr2) + assert hdr1 == hdr2 # Check byteswapping maintains equality hdr3 = hdr2.as_byteswapped() - assert_equal(hdr2, hdr3) + assert hdr2 == hdr3 # Check comparing to funny thing says no - assert_not_equal(hdr1, None) - assert_not_equal(hdr1, 1) + assert hdr1 != None + assert hdr1 != 1 def test_to_from_fileobj(self): # Successful write using write_to @@ -161,28 +159,26 @@ def test_to_from_fileobj(self): hdr.write_to(str_io) str_io.seek(0) hdr2 = self.header_class.from_fileobj(str_io) - assert_equal(hdr2.endianness, native_code) - assert_equal(hdr2.binaryblock, hdr.binaryblock) + assert hdr2.endianness == native_code + assert hdr2.binaryblock == hdr.binaryblock def test_mappingness(self): hdr = self.header_class() - assert_raises(ValueError, - hdr.__setitem__, - 'nonexistent key', - 0.1) + with pytest.raises(ValueError): + hdr['nonexistent key'] = 0.1 hdr_dt = hdr.structarr.dtype keys = hdr.keys() - assert_equal(keys, list(hdr)) + assert keys == list(hdr) vals = hdr.values() - assert_equal(len(vals), len(keys)) - assert_equal(keys, list(hdr_dt.names)) + assert len(vals) == len(keys) + assert keys == list(hdr_dt.names) for key, val in hdr.items(): assert_array_equal(hdr[key], val) # verify that .get operates as destined - assert_equal(hdr.get('nonexistent key'), None) - assert_equal(hdr.get('nonexistent key', 'default'), 'default') - assert_equal(hdr.get(keys[0]), vals[0]) - assert_equal(hdr.get(keys[0], 'default'), vals[0]) + assert hdr.get('nonexistent key') is None + assert hdr.get('nonexistent key', 'default') == 'default' + assert hdr.get(keys[0]) == vals[0] + assert hdr.get(keys[0], 'default') == vals[0] # make sure .get returns values which evaluate to False. We have to # use a different falsy value depending on the data type of the first @@ -190,9 +186,9 @@ def test_mappingness(self): falsyval = 0 if np.issubdtype(hdr_dt[0], np.number) else b'' hdr[keys[0]] = falsyval - assert_equal(hdr[keys[0]], falsyval) - assert_equal(hdr.get(keys[0]), falsyval) - assert_equal(hdr.get(keys[0], -1), falsyval) + assert hdr[keys[0]] == falsyval + assert hdr.get(keys[0]) == falsyval + assert hdr.get(keys[0], -1) == falsyval def test_endianness_ro(self): @@ -204,16 +200,17 @@ def test_endianness_ro(self): data) - but this is done via via the as_byteswapped method ''' hdr = self.header_class() - assert_raises(AttributeError, hdr.__setattr__, 'endianness', '<') + with pytest.raises(AttributeError): + hdr.endianness = '<' def test_endian_guess(self): # Check guesses of endian eh = self.header_class() - assert_equal(eh.endianness, native_code) + assert eh.endianness == native_code hdr_data = eh.structarr.copy() hdr_data = hdr_data.byteswap(swapped_code) eh_swapped = self.header_class(hdr_data.tostring()) - assert_equal(eh_swapped.endianness, swapped_code) + assert eh_swapped.endianness == swapped_code def test_binblock_is_file(self): # Checks that the binary string respresentation is the whole of the @@ -225,7 +222,7 @@ def test_binblock_is_file(self): hdr = self.header_class() str_io = BytesIO() hdr.write_to(str_io) - assert_equal(str_io.getvalue(), hdr.binaryblock) + assert str_io.getvalue() == hdr.binaryblock def test_structarr(self): # structarr attribute also read only @@ -233,7 +230,8 @@ def test_structarr(self): # Just check we can get structarr hdr.structarr # That it's read only - assert_raises(AttributeError, hdr.__setattr__, 'structarr', 0) + with pytest.raises(AttributeError): + hdr.structarr = 0 def log_chk(self, hdr, level): return log_chk(hdr, level) @@ -242,50 +240,49 @@ def assert_no_log_err(self, hdr): """ Assert that no logging or errors result from this `hdr` """ fhdr, message, raiser = self.log_chk(hdr, 0) - assert_equal((fhdr, message), (hdr, '')) + assert (fhdr, message) == (hdr, '') def test_bytes(self): # Test get of bytes hdr1 = self.header_class() bb = hdr1.binaryblock hdr2 = self.header_class(hdr1.binaryblock) - assert_equal(hdr1, hdr2) - assert_equal(hdr1.binaryblock, hdr2.binaryblock) + assert hdr1 == hdr2 + assert hdr1.binaryblock == hdr2.binaryblock # Do a set into the header, and try again. The specifics of 'setting # something' will depend on the nature of the bytes object self._set_something_into_hdr(hdr1) hdr2 = self.header_class(hdr1.binaryblock) - assert_equal(hdr1, hdr2) - assert_equal(hdr1.binaryblock, hdr2.binaryblock) + assert hdr1 == hdr2 + assert hdr1.binaryblock == hdr2.binaryblock # Short and long binaryblocks give errors # (here set through init) - assert_raises(WrapStructError, - self.header_class, - bb[:-1]) - assert_raises(WrapStructError, - self.header_class, - bb + b'\x00') + with pytest.raises(WrapStructError): + self.header_class(bb[:-1]) + with pytest.raises(WrapStructError): + self.header_class(bb + b'\x00') # Checking set to true by default, and prevents nonsense being # set into the header. bb_bad = self.get_bad_bb() if bb_bad is None: return with imageglobals.LoggingOutputSuppressor(): - assert_raises(HeaderDataError, self.header_class, bb_bad) + with pytest.raises(HeaderDataError): + self.header_class(bb_bad) # now slips past without check _ = self.header_class(bb_bad, check=False) def test_as_byteswapped(self): # Check byte swapping hdr = self.header_class() - assert_equal(hdr.endianness, native_code) + assert hdr.endianness == native_code # same code just returns a copy hdr2 = hdr.as_byteswapped(native_code) - assert_false(hdr is hdr2) + assert not hdr is hdr2 # Different code gives byteswapped copy hdr_bs = hdr.as_byteswapped(swapped_code) - assert_equal(hdr_bs.endianness, swapped_code) - assert_not_equal(hdr.binaryblock, hdr_bs.binaryblock) + assert hdr_bs.endianness == swapped_code + assert hdr.binaryblock != hdr_bs.binaryblock # Note that contents is not rechecked on swap / copy class DC(self.header_class): @@ -293,7 +290,8 @@ class DC(self.header_class): def check_fix(self, *args, **kwargs): raise Exception # Assumes check=True default - assert_raises(Exception, DC, hdr.binaryblock) + with pytest.raises(Exception): + DC(hdr.binaryblock) hdr = DC(hdr.binaryblock, check=False) hdr2 = hdr.as_byteswapped(native_code) hdr_bs = hdr.as_byteswapped(swapped_code) @@ -312,7 +310,8 @@ def test_str(self): hdr = self.header_class() # Check something returns from str s1 = str(hdr) - assert_true(len(s1) > 0) + assert len(s1) > 0 + class _TestLabeledWrapStruct(_TestWrapStructBase): @@ -325,28 +324,30 @@ class MyHdr(self.header_class): _field_recoders = {} hdr = MyHdr() # Key not existing raises error - assert_raises(ValueError, hdr.get_value_label, 'improbable') + with pytest.raises(ValueError): + hdr.get_value_label('improbable') # Even if there is a recoder - assert_true('improbable' not in hdr.keys()) + assert 'improbable' not in hdr.keys() rec = Recoder([[0, 'fullness of heart']], ('code', 'label')) hdr._field_recoders['improbable'] = rec - assert_raises(ValueError, hdr.get_value_label, 'improbable') + with pytest.raises(ValueError): + hdr.get_value_label('improbable') # If the key exists in the structure, and is intable, then we can recode for key, value in hdr.items(): # No recoder at first - assert_raises(ValueError, hdr.get_value_label, 0) + with pytest.raises(ValueError): + hdr.get_value_label(0) if not value.dtype.type in INTEGER_TYPES or not np.isscalar(value): continue code = int(value) rec = Recoder([[code, 'fullness of heart']], ('code', 'label')) hdr._field_recoders[key] = rec - assert_equal(hdr.get_value_label(key), 'fullness of heart') + assert hdr.get_value_label(key) == 'fullness of heart' # If key exists, but value is missing, we get 'unknown code' # Speculating that we can set code value 0 or 1 new_code = 1 if code == 0 else 0 hdr[key] = new_code - assert_equal(hdr.get_value_label(key), - ''.format(new_code)) + assert hdr.get_value_label(key) == ''.format(new_code) class MyWrapStruct(WrapStruct): @@ -419,48 +420,48 @@ def _set_something_into_hdr(self, hdr): def test_empty(self): # Test contents of default header hdr = self.header_class() - assert_equal(hdr['an_integer'], 1) - assert_equal(hdr['a_str'], b'a string') + assert hdr['an_integer'] == 1 + assert hdr['a_str'] == b'a string' def test_str(self): hdr = self.header_class() s1 = str(hdr) - assert_true(len(s1) > 0) - assert_true('an_integer' in s1) - assert_true('a_str' in s1) + assert len(s1) > 0 + assert 'an_integer' in s1 + assert 'a_str' in s1 def test_copy(self): hdr = self.header_class() hdr2 = hdr.copy() - assert_equal(hdr, hdr2) + assert hdr == hdr2 self._set_something_into_hdr(hdr) - assert_not_equal(hdr, hdr2) + assert hdr != hdr2 self._set_something_into_hdr(hdr2) - assert_equal(hdr, hdr2) + assert hdr == hdr2 def test_copy(self): hdr = self.header_class() hdr2 = hdr.copy() - assert_equal(hdr, hdr2) + assert hdr == hdr2 self._set_something_into_hdr(hdr) - assert_not_equal(hdr, hdr2) + assert hdr != hdr2 self._set_something_into_hdr(hdr2) - assert_equal(hdr, hdr2) + assert hdr == hdr2 def test_checks(self): # Test header checks hdr_t = self.header_class() # _dxer just returns the diagnostics as a string # Default hdr is OK - assert_equal(self._dxer(hdr_t), '') + assert self._dxer(hdr_t) == '' # An integer should be 1 hdr = hdr_t.copy() hdr['an_integer'] = 2 - assert_equal(self._dxer(hdr), 'an_integer should be 1') + assert self._dxer(hdr) == 'an_integer should be 1' # String should be lower case hdr = hdr_t.copy() hdr['a_str'] = 'My Name' - assert_equal(self._dxer(hdr), 'a_str should be lower case') + assert self._dxer(hdr) == 'a_str should be lower case' def test_log_checks(self): # Test logging, fixing, errors for header checking @@ -471,17 +472,15 @@ def test_log_checks(self): hdr['an_integer'] = 2 # severity 40 fhdr, message, raiser = self.log_chk(hdr, 40) return - assert_equal(fhdr['an_integer'], 1) - assert_equal(message, - 'an_integer should be 1; set an_integer to 1') - assert_raises(*raiser) + assert fhdr['an_integer'] == 1 + assert message == 'an_integer should be 1; set an_integer to 1' + pytest.raises(*raiser) # lower case string hdr = HC() hdr['a_str'] = 'Hello' # severity = 20 fhdr, message, raiser = self.log_chk(hdr, 20) - assert_equal(message, 'a_str should be lower case; ' - 'set a_str to lower case') - assert_raises(*raiser) + assert message == 'a_str should be lower case; set a_str to lower case' + pytest.raises(*raiser) def test_logger_error(self): # Check that we can reset the logger and error level @@ -500,12 +499,11 @@ def test_logger_error(self): # Check log message appears in new logger imageglobals.logger = logger hdr.copy().check_fix() - assert_equal(str_io.getvalue(), - 'a_str should be lower case; ' - 'set a_str to lower case\n') + assert str_io.getvalue() == 'a_str should be lower case; set a_str to lower case\n' # Check that error_level in fact causes error to be raised imageglobals.error_level = 20 - assert_raises(HeaderDataError, hdr.copy().check_fix) + with pytest.raises(HeaderDataError): + hdr.copy().check_fix() finally: imageglobals.logger, imageglobals.error_level = log_cache @@ -519,13 +517,13 @@ class MyHdr(self.header_class): _field_recoders = {} hdr = MyHdr() s1 = str(hdr) - assert_true(len(s1) > 0) - assert_true('an_integer : 1' in s1) - assert_true('fullness of heart' not in s1) + assert len(s1) > 0 + assert 'an_integer : 1' in s1 + assert 'fullness of heart' not in s1 rec = Recoder([[1, 'fullness of heart']], ('code', 'label')) hdr._field_recoders['an_integer'] = rec s2 = str(hdr) - assert_true('fullness of heart' in s2) + assert 'fullness of heart' in s2 hdr['an_integer'] = 10 s1 = str(hdr) - assert_true('' in s1) + assert '' in s1 diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 8c1b704260..2636d8acb7 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -8,7 +8,6 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Contexts for *with* statement providing temporary directories ''' -from __future__ import division, print_function, absolute_import import os import shutil from tempfile import template, mkdtemp diff --git a/nibabel/trackvis.py b/nibabel/trackvis.py index 7da4ffcbe1..691ad7b537 100644 --- a/nibabel/trackvis.py +++ b/nibabel/trackvis.py @@ -4,24 +4,24 @@ We will deprecate this, the old interface, in some future release. """ -from __future__ import division, print_function import warnings import struct import itertools import numpy as np import numpy.linalg as npl +from numpy.compat.py3k import asstr -from .py3k import asstr from .volumeutils import (native_code, swapped_code, endian_codes, rec2dict) from .openers import ImageOpener from .orientations import aff2axcodes from .affines import apply_affine +from .deprecated import deprecate_with_version -try: - basestring -except NameError: # python 3 - basestring = str +warnings.warn("The trackvis interface has been deprecated and will be removed " + "in v4.0; please use the 'nibabel.streamlines' interface.", + DeprecationWarning, + stacklevel=2) # Definition of trackvis header structure. # See http://www.trackvis.org/docs/?subsect=fileformat @@ -99,6 +99,9 @@ class DataError(Exception): """ +@deprecate_with_version('trackvis.read is deprecated; please use ' + 'nibabel.streamlines.load, instead.', + since='2.5.0', until='4.0.0') def read(fileobj, as_generator=False, points_space=None, strict=True): ''' Read trackvis file from `fileobj`, return `streamlines`, `header` @@ -254,6 +257,9 @@ def track_gen(): return streamlines, hdr +@deprecate_with_version('trackvis.write is deprecated; please use ' + 'nibabel.streamlines.save, instead.', + since='2.5.0', until='4.0.0') def write(fileobj, streamlines, hdr_mapping=None, endianness=None, points_space=None): ''' Write header and `streamlines` to trackvis file `fileobj` @@ -314,7 +320,7 @@ def write(fileobj, streamlines, hdr_mapping=None, endianness=None, >>> pts1 = np.random.uniform(size=(10,3)) >>> streamlines = ([(pts0, None, None), (pts1, None, None)]) >>> write(file_obj, streamlines) - >>> _ = file_obj.seek(0) # returns 0 in python 3 + >>> _ = file_obj.seek(0) # returns 0 >>> streams, hdr = read(file_obj) >>> len(streams) 2 @@ -536,6 +542,9 @@ def _hdr_from_mapping(hdr=None, mapping=None, endianness=native_code): return hdr +@deprecate_with_version('empty_header is deprecated; please use ' + 'nibabel.streamlines.TrkFile.create_empty_header, instead.', + since='2.5.0', until='4.0.0') def empty_header(endianness=None, version=2): ''' Empty trackvis header @@ -590,7 +599,10 @@ def empty_header(endianness=None, version=2): return hdr -def aff_from_hdr(trk_hdr, atleast_v2=None): +@deprecate_with_version('aff_from_hdr is deprecated; please use ' + 'nibabel.streamlines.trk.get_affine_trackvis_to_rasmm, instead.', + since='2.5.0', until='4.0.0') +def aff_from_hdr(trk_hdr, atleast_v2=True): ''' Return voxel to mm affine from trackvis header Affine is mapping from voxel space to Nifti (RAS) output coordinate @@ -625,12 +637,6 @@ def aff_from_hdr(trk_hdr, atleast_v2=None): origin field to 0. In future, we'll raise an error rather than try and estimate the affine from version 1 fields ''' - if atleast_v2 is None: - warnings.warn('Defaulting to `atleast_v2` of False. Future versions ' - 'will default to True', - FutureWarning, - stacklevel=2) - atleast_v2 = False if trk_hdr['version'] == 2: aff = trk_hdr['vox_to_ras'] if aff[3, 3] != 0: @@ -673,7 +679,10 @@ def aff_from_hdr(trk_hdr, atleast_v2=None): return aff -def aff_to_hdr(affine, trk_hdr, pos_vox=None, set_order=None): +@deprecate_with_version('aff_to_hdr is deprecated; please use the ' + 'nibabel.streamlines.TrkFile.affine_to_rasmm property, instead.', + since='2.5.0', until='4.0.0') +def aff_to_hdr(affine, trk_hdr, pos_vox=True, set_order=True): ''' Set affine `affine` into trackvis header `trk_hdr` Affine is mapping from voxel space to Nifti RAS) output coordinate @@ -715,18 +724,6 @@ def aff_to_hdr(affine, trk_hdr, pos_vox=None, set_order=None): application). The application also ignores the origin field, and may not use the 'image_orientation_patient' field. ''' - if pos_vox is None: - warnings.warn('Default for ``pos_vox`` will change to True in ' - 'future versions of nibabel', - FutureWarning, - stacklevel=2) - pos_vox = False - if set_order is None: - warnings.warn('Default for ``set_order`` will change to True in ' - 'future versions of nibabel', - FutureWarning, - stacklevel=2) - set_order = False try: version = trk_hdr['version'] except (KeyError, ValueError): # dict or structured array @@ -797,6 +794,9 @@ class TrackvisFile(object): relationship between voxels, rasmm and voxmm space (above). ''' + @deprecate_with_version('TrackvisFile is deprecated; please use ' + 'nibabel.streamlines.TrkFile, instead.', + since='2.5.0', until='4.0.0') def __init__(self, streamlines, mapping=None, @@ -826,17 +826,15 @@ def __init__(self, @classmethod def from_file(klass, file_like, points_space=None): streamlines, header = read(file_like, points_space=points_space) - filename = (file_like if isinstance(file_like, basestring) - else None) + filename = file_like if isinstance(file_like, str) else None return klass(streamlines, header, None, filename, points_space) def to_file(self, file_like): write(file_like, self.streamlines, self.header, self.endianness, points_space=self.points_space) - self.filename = (file_like if isinstance(file_like, basestring) - else None) + self.filename = file_like if isinstance(file_like, str) else None - def get_affine(self, atleast_v2=None): + def get_affine(self, atleast_v2=True): """ Get affine from header in object Returns @@ -853,15 +851,9 @@ def get_affine(self, atleast_v2=None): consider it unsafe for version 1 headers, and in future versions of nibabel we will raise an error for trackvis headers < version 2. """ - if atleast_v2 is None: - warnings.warn('Defaulting to `atleast_v2` of False. Future ' - 'versions will default to True', - FutureWarning, - stacklevel=2) - atleast_v2 = False return aff_from_hdr(self.header, atleast_v2) - def set_affine(self, affine, pos_vox=None, set_order=None): + def set_affine(self, affine, pos_vox=True, set_order=True): """ Set affine `affine` into trackvis header Affine is mapping from voxel space to Nifti RAS) output coordinate @@ -888,16 +880,4 @@ def set_affine(self, affine, pos_vox=None, set_order=None): ------- None """ - if pos_vox is None: - warnings.warn('Default for ``pos_vox`` will change to True in ' - 'future versions of nibabel', - FutureWarning, - stacklevel=2) - pos_vox = False - if set_order is None: - warnings.warn('Default for ``set_order`` will change to True in ' - 'future versions of nibabel', - FutureWarning, - stacklevel=2) - set_order = False return aff_to_hdr(affine, self.header, pos_vox, set_order) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 9c15625348..0cdbdcb815 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -3,7 +3,6 @@ Includes version of OrthoSlicer3D code originally written by our own Paul Ivanov. """ -from __future__ import division, print_function import numpy as np import weakref @@ -414,7 +413,7 @@ def _set_position(self, x, y, z, notify=True): idx = [slice(None)] * len(self._axes) for ii in range(3): idx[self._order[ii]] = self._data_idx[ii] - vdata = self._data[idx].ravel() + vdata = self._data[tuple(idx)].ravel() vdata = np.concatenate((vdata, [vdata[-1]])) self._volume_ax_objs['patch'].set_x(self._data_idx[3] - 0.5) self._volume_ax_objs['step'].set_ydata(vdata) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 2b8349d369..41d248a671 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -7,7 +7,6 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Utility functions for analyze-like formats ''' -from __future__ import division, print_function import sys import warnings @@ -401,17 +400,17 @@ def can_cast(in_type, out_type, has_intercept=False, has_slope=False): Examples -------- - >>> can_cast(np.float64, np.float32) + >>> can_cast(np.float64, np.float32) # doctest: +SKIP True - >>> can_cast(np.complex128, np.float32) + >>> can_cast(np.complex128, np.float32) # doctest: +SKIP False - >>> can_cast(np.int64, np.float32) + >>> can_cast(np.int64, np.float32) # doctest: +SKIP True - >>> can_cast(np.float32, np.int16) + >>> can_cast(np.float32, np.int16) # doctest: +SKIP False - >>> can_cast(np.float32, np.int16, False, True) + >>> can_cast(np.float32, np.int16, False, True) # doctest: +SKIP True - >>> can_cast(np.int16, np.uint8) + >>> can_cast(np.int16, np.uint8) # doctest: +SKIP False Whether we can actually cast int to uint when we don't have an intercept @@ -421,9 +420,9 @@ def can_cast(in_type, out_type, has_intercept=False, has_slope=False): Here we need an intercept to scale the full range of an int to a uint - >>> can_cast(np.int16, np.uint8, False, True) + >>> can_cast(np.int16, np.uint8, False, True) # doctest: +SKIP False - >>> can_cast(np.int16, np.uint8, True, True) + >>> can_cast(np.int16, np.uint8, True, True) # doctest: +SKIP True ''' in_dtype = np.dtype(in_type) @@ -480,7 +479,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): >>> from io import BytesIO >>> bio = BytesIO() >>> arr = np.arange(6).reshape(1,2,3) - >>> _ = bio.write(arr.tostring('F')) # outputs int in python3 + >>> _ = bio.write(arr.tostring('F')) # outputs int >>> arr2 = array_from_file((1,2,3), arr.dtype, bio) >>> np.all(arr == arr2) True @@ -610,7 +609,7 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, >>> array_to_file(data, sio, np.float) >>> sio.getvalue() == data.tostring('F') True - >>> _ = sio.truncate(0); _ = sio.seek(0) # outputs 0 in python 3 + >>> _ = sio.truncate(0); _ = sio.seek(0) # outputs 0 >>> array_to_file(data, sio, np.int16) >>> sio.getvalue() == data.astype(np.int16).tostring() True @@ -1095,26 +1094,26 @@ def scale_min_max(mn, mx, out_type, allow_intercept): Examples -------- - >>> scale_min_max(0, 255, np.uint8, False) + >>> scale_min_max(0, 255, np.uint8, False) # doctest: +SKIP (1.0, 0.0) - >>> scale_min_max(-128, 127, np.int8, False) + >>> scale_min_max(-128, 127, np.int8, False) # doctest: +SKIP (1.0, 0.0) - >>> scale_min_max(0, 127, np.int8, False) + >>> scale_min_max(0, 127, np.int8, False) # doctest: +SKIP (1.0, 0.0) - >>> scaling, intercept = scale_min_max(0, 127, np.int8, True) - >>> np.allclose((0 - intercept) / scaling, -128) + >>> scaling, intercept = scale_min_max(0, 127, np.int8, True) # doctest: +SKIP + >>> np.allclose((0 - intercept) / scaling, -128) # doctest: +SKIP True - >>> np.allclose((127 - intercept) / scaling, 127) + >>> np.allclose((127 - intercept) / scaling, 127) # doctest: +SKIP True - >>> scaling, intercept = scale_min_max(-10, -1, np.int8, True) - >>> np.allclose((-10 - intercept) / scaling, -128) + >>> scaling, intercept = scale_min_max(-10, -1, np.int8, True) # doctest: +SKIP + >>> np.allclose((-10 - intercept) / scaling, -128) # doctest: +SKIP True - >>> np.allclose((-1 - intercept) / scaling, 127) + >>> np.allclose((-1 - intercept) / scaling, 127) # doctest: +SKIP True - >>> scaling, intercept = scale_min_max(1, 10, np.int8, True) - >>> np.allclose((1 - intercept) / scaling, -128) + >>> scaling, intercept = scale_min_max(1, 10, np.int8, True) # doctest: +SKIP + >>> np.allclose((1 - intercept) / scaling, -128) # doctest: +SKIP True - >>> np.allclose((10 - intercept) / scaling, 127) + >>> np.allclose((10 - intercept) / scaling, 127) # doctest: +SKIP True Notes @@ -1334,26 +1333,30 @@ def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', tst_arr = np.atleast_1d(tst_arr) slope = np.atleast_1d(slope) inter = np.atleast_1d(inter) - warnings.filterwarnings('ignore', '.*overflow.*', RuntimeWarning) - try: - for ftype in OK_FLOATS[def_ind:]: - tst_trans = tst_arr.copy() - slope = slope.astype(ftype) - inter = inter.astype(ftype) - if direction == 'read': # as in reading of image from disk - if slope != 1.0: - tst_trans = tst_trans * slope - if inter != 0.0: - tst_trans = tst_trans + inter - elif direction == 'write': - if inter != 0.0: - tst_trans = tst_trans - inter - if slope != 1.0: - tst_trans = tst_trans / slope + overflow_filter = ('error', '.*overflow.*', RuntimeWarning) + for ftype in OK_FLOATS[def_ind:]: + tst_trans = tst_arr.copy() + slope = slope.astype(ftype) + inter = inter.astype(ftype) + try: + with warnings.catch_warnings(): + # Error on overflows to short circuit the logic + warnings.filterwarnings(*overflow_filter) + if direction == 'read': # as in reading of image from disk + if slope != 1.0: + tst_trans = tst_trans * slope + if inter != 0.0: + tst_trans = tst_trans + inter + elif direction == 'write': + if inter != 0.0: + tst_trans = tst_trans - inter + if slope != 1.0: + tst_trans = tst_trans / slope + # Double-check that result is finite if np.all(np.isfinite(tst_trans)): return ftype - finally: - warnings.filters.pop(0) + except RuntimeWarning: + pass raise ValueError('Overflow using highest floating point type') diff --git a/nisext/py3builder.py b/nisext/py3builder.py index 9435f6c60b..4f82a8cfb2 100644 --- a/nisext/py3builder.py +++ b/nisext/py3builder.py @@ -1,6 +1,5 @@ """ distutils utilities for porting to python 3 within 2-compatible tree """ -from __future__ import division, print_function, absolute_import import sys import re diff --git a/nisext/testers.py b/nisext/testers.py index a80bbd904b..e0ca4a040a 100644 --- a/nisext/testers.py +++ b/nisext/testers.py @@ -29,7 +29,6 @@ ''' -from __future__ import print_function import os import sys diff --git a/nisext/tests/test_testers.py b/nisext/tests/test_testers.py index 336677b48f..08fa70cd1a 100644 --- a/nisext/tests/test_testers.py +++ b/nisext/tests/test_testers.py @@ -1,6 +1,5 @@ """ Tests for testers """ -from __future__ import division, print_function import os from os.path import dirname, pathsep diff --git a/requirements.txt b/requirements.txt index 061fa37bef..3134ffd33c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,3 @@ -# Minumum requirements -# -# Check these against -# nibabel/info.py -# .travis.yml -# doc/source/installation.rst - -six>=1.3 -numpy>=1.7.1 +# Auto-generated by tools/update_requirements.py +numpy >=1.13 +packaging >=14.3 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000..85aebfee7d --- /dev/null +++ b/setup.cfg @@ -0,0 +1,101 @@ +[metadata] +name = nibabel +url = https://nipy.org/nibabel +download_url = https://github.com/nipy/nibabel +author = nibabel developers +author_email = neuroimaging@python.org +maintainer = Chris Markiewicz +maintainer_email = neuroimaging@python.org +classifiers = + Development Status :: 4 - Beta + Environment :: Console + Intended Audience :: Science/Research + License :: OSI Approved :: MIT License + Operating System :: OS Independent + Programming Language :: Python + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + Topic :: Scientific/Engineering +license = MIT License +description = Access a multitude of neuroimaging data formats +long_description = file:README.rst +long_description_content_type = text/x-rst; charset=UTF-8 +platforms = OS Independent +provides = + nibabel + nisext + +[options] +python_requires = >=3.6 +install_requires = + numpy >=1.13 + packaging >=14.3 +zip_safe = False +packages = find: + +[options.extras_require] +dicom = + pydicom >=0.9.9 +dicomfs = + %(dicom)s + pillow +dev = + gitpython + twine +doc = + matplotlib >= 1.5.3 + numpydoc + sphinx >=0.3 + texext +minc2 = + h5py +spm = + scipy +style = + flake8 +test = + coverage + pytest !=5.3.4 + pytest-cov +all = + %(dicomfs)s + %(dev)s + %(doc)s + %(minc2)s + %(spm)s + %(style)s + %(test)s + +[options.entry_points] +console_scripts = + nib-ls=nibabel.cmdline.ls:main + nib-dicomfs=nibabel.cmdline.dicomfs:main + nib-diff=nibabel.cmdline.diff:main + nib-nifti-dx=nibabel.cmdline.nifti_dx:main + nib-tck2trk=nibabel.cmdline.tck2trk:main + nib-trk2tck=nibabel.cmdline.trk2tck:main + parrec2nii=nibabel.cmdline.parrec2nii:main + +[options.package_data] +nibabel = + tests/data/* + */tests/data/* + benchmarks/pytest.benchmark.ini + +[flake8] +max-line-length = 100 +ignore = D100,D101,D102,D103,D104,D105,D200,D201,D202,D204,D205,D208,D209,D210,D300,D301,D400,D401,D403,E24,E121,E123,E126,E226,E266,E402,E704,E731,F821,I100,I101,I201,N802,N803,N804,N806,W503,W504,W605 +exclude = + *test* + *sphinx* + nibabel/externals/* + */__init__.py + +[versioneer] +VCS = git +style = pep440 +versionfile_source = nibabel/_version.py +versionfile_build = nibabel/_version.py +tag_prefix = +parentdir_prefix = diff --git a/setup.py b/setup.py index 222ad8562a..9c281a032c 100755 --- a/setup.py +++ b/setup.py @@ -9,117 +9,21 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Build helper.""" -import os -from os.path import join as pjoin import sys -from functools import partial - -# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly -# update it when the contents of directories change. -if os.path.exists('MANIFEST'): - os.remove('MANIFEST') +import os from setuptools import setup +import versioneer -# Commit hash writing, and dependency checking -from nisext.sexts import (get_comrec_build, package_check, install_scripts_bat, - read_vars_from) -cmdclass = {'build_py': get_comrec_build('nibabel'), - 'install_scripts': install_scripts_bat} - -# Get project related strings. -INFO = read_vars_from(pjoin('nibabel', 'info.py')) - -# Prepare setuptools args -if 'setuptools' in sys.modules: - extra_setuptools_args = dict( - tests_require=['nose'], - test_suite='nose.collector', - zip_safe=False, - extras_require=dict( - doc='Sphinx>=0.3', - test='nose>=0.10.1'), - ) - pkg_chk = partial(package_check, setuptools_args = extra_setuptools_args) -else: - extra_setuptools_args = {} - pkg_chk = package_check - -# Do dependency checking -pkg_chk('numpy', INFO.NUMPY_MIN_VERSION) -pkg_chk('six', INFO.SIX_MIN_VERSION) -custom_pydicom_messages = {'missing opt': 'Missing optional package "%s"' - ' provided by package "pydicom"' -} -pkg_chk('dicom', - INFO.PYDICOM_MIN_VERSION, - optional='dicom', - messages = custom_pydicom_messages) - -def main(**extra_args): - setup(name=INFO.NAME, - maintainer=INFO.MAINTAINER, - maintainer_email=INFO.MAINTAINER_EMAIL, - description=INFO.DESCRIPTION, - long_description=INFO.LONG_DESCRIPTION, - url=INFO.URL, - download_url=INFO.DOWNLOAD_URL, - license=INFO.LICENSE, - classifiers=INFO.CLASSIFIERS, - author=INFO.AUTHOR, - author_email=INFO.AUTHOR_EMAIL, - platforms=INFO.PLATFORMS, - version=INFO.VERSION, - provides=INFO.PROVIDES, - install_requires=INFO.REQUIRES, - packages = ['nibabel', - 'nibabel.externals', - 'nibabel.externals.tests', - 'nibabel.gifti', - 'nibabel.gifti.tests', - 'nibabel.cifti2', - 'nibabel.cifti2.tests', - 'nibabel.cmdline', - 'nibabel.cmdline.tests', - 'nibabel.nicom', - 'nibabel.freesurfer', - 'nibabel.freesurfer.tests', - 'nibabel.nicom.tests', - 'nibabel.testing', - 'nibabel.tests', - 'nibabel.benchmarks', - 'nibabel.streamlines', - 'nibabel.streamlines.tests', - # install nisext as its own package - 'nisext', - 'nisext.tests'], - # The package_data spec has no effect for me (on python 2.6) -- even - # changing to data_files doesn't get this stuff included in the source - # distribution -- not sure if it has something to do with the magic - # above, but distutils is surely the worst piece of code in all of - # python -- duplicating things into MANIFEST.in but this is admittedly - # only a workaround to get things started -- not a solution - package_data = {'nibabel': - [pjoin('tests', 'data', '*'), - pjoin('externals', 'tests', 'data', '*'), - pjoin('nicom', 'tests', 'data', '*'), - pjoin('gifti', 'tests', 'data', '*'), - pjoin('streamlines', 'tests', 'data', '*'), - ]}, - scripts = [pjoin('bin', 'parrec2nii'), - pjoin('bin', 'nib-ls'), - pjoin('bin', 'nib-dicomfs'), - pjoin('bin', 'nib-nifti-dx'), - pjoin('bin', 'nib-tck2trk'), - pjoin('bin', 'nib-trk2tck'), - pjoin('bin', 'nib-diff'), - ], - cmdclass = cmdclass, - **extra_args - ) - +# Give setuptools a hint to complain if it's too old a version +# 30.3.0 allows us to put most metadata in setup.cfg +# Should match pyproject.toml +SETUP_REQUIRES = ['setuptools >= 30.3.0'] +# This enables setuptools to install wheel on-the-fly +SETUP_REQUIRES += ['wheel'] if 'bdist_wheel' in sys.argv else [] if __name__ == "__main__": - # Do not use nisext's dynamically updated install_requires - extra_setuptools_args.pop('install_requires', None) - main(**extra_setuptools_args) + setup(name='nibabel', + setup_requires=SETUP_REQUIRES, + version=versioneer.get_version(), + cmdclass=versioneer.get_cmdclass()) diff --git a/setup_egg.py b/setup_egg.py deleted file mode 100644 index b67a2d9405..0000000000 --- a/setup_egg.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -"""Wrapper to run setup.py using setuptools.""" - -import setuptools # flake8: noqa ; needed to monkeypatch dist_utils - -############################################################################### -# Call the setup.py script, injecting the setuptools-specific arguments. - -if __name__ == '__main__': - exec(open('setup.py', 'rt').read(), dict(__name__='__main__')) diff --git a/tools/refresh_readme.py b/tools/refresh_readme.py index b64ee6e8c1..577c10bd36 100755 --- a/tools/refresh_readme.py +++ b/tools/refresh_readme.py @@ -3,7 +3,6 @@ Should be run from nibabel root (containing setup.py) """ -from __future__ import print_function import os import runpy @@ -19,7 +18,7 @@ rel = runpy.run_path(os.path.join('nibabel', 'info.py')) -readme = ''.join(readme_lines) + '\n' + rel['LONG_DESCRIPTION'] +readme = ''.join(readme_lines) + '\n' + rel['long_description'] with open('README.rst', 'wt') as fobj: fobj.write(readme) diff --git a/tools/update_requirements.py b/tools/update_requirements.py new file mode 100755 index 0000000000..551424994c --- /dev/null +++ b/tools/update_requirements.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +import sys +from configparser import ConfigParser +from pathlib import Path + +if sys.version_info < (3, 6): + print("This script requires Python 3.6 to work correctly") + sys.exit(1) + +repo_root = Path(__file__).parent.parent +setup_cfg = repo_root / "setup.cfg" +reqs = repo_root / "requirements.txt" +min_reqs = repo_root / "min-requirements.txt" + +config = ConfigParser() +config.read(setup_cfg) +requirements = config.get("options", "install_requires").strip().splitlines() + +script_name = Path(__file__).relative_to(repo_root) + +lines = [f"# Auto-generated by {script_name}", ""] + +# Write requirements +lines[1:-1] = requirements +reqs.write_text("\n".join(lines)) + +# Write minimum requirements +lines[1:-1] = [req.replace(">=", "==").replace("~=", "==") for req in requirements] +min_reqs.write_text("\n".join(lines)) diff --git a/tox.ini b/tox.ini index 5585639795..a0002e12b6 100644 --- a/tox.ini +++ b/tox.ini @@ -18,8 +18,3 @@ deps = deps = [testenv:np-1.2.1] deps = -[flake8] -max-line-length=100 -ignore=D100,D101,D102,D103,D104,D105,D200,D201,D202,D204,D205,D208,D209,D210,D300,D301,D400,D401,D403,E24,E121,E123,E126,E226,E266,E402,E704,E731,F821,I100,I101,I201,N802,N803,N804,N806,W503,W504,W605 -exclude=*test*,*sphinx*,nibabel/externals/*,*/__init__.py - diff --git a/versioneer.py b/versioneer.py new file mode 100644 index 0000000000..7c8333493e --- /dev/null +++ b/versioneer.py @@ -0,0 +1,1850 @@ + +# Version: 0.18 + +"""The Versioneer - like a rocketeer, but for versions. + +The Versioneer +============== + +* like a rocketeer, but for versions! +* https://github.com/warner/python-versioneer +* Brian Warner +* License: Public Domain +* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy +* [![Latest Version] +(https://pypip.in/version/versioneer/badge.svg?style=flat) +](https://pypi.python.org/pypi/versioneer/) +* [![Build Status] +(https://travis-ci.org/warner/python-versioneer.png?branch=master) +](https://travis-ci.org/warner/python-versioneer) + +This is a tool for managing a recorded version number in distutils-based +python projects. The goal is to remove the tedious and error-prone "update +the embedded version string" step from your release process. Making a new +release should be as easy as recording a new tag in your version-control +system, and maybe making new tarballs. + + +## Quick Install + +* `pip install versioneer` to somewhere to your $PATH +* add a `[versioneer]` section to your setup.cfg (see below) +* run `versioneer install` in your source tree, commit the results + +## Version Identifiers + +Source trees come from a variety of places: + +* a version-control system checkout (mostly used by developers) +* a nightly tarball, produced by build automation +* a snapshot tarball, produced by a web-based VCS browser, like github's + "tarball from tag" feature +* a release tarball, produced by "setup.py sdist", distributed through PyPI + +Within each source tree, the version identifier (either a string or a number, +this tool is format-agnostic) can come from a variety of places: + +* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows + about recent "tags" and an absolute revision-id +* the name of the directory into which the tarball was unpacked +* an expanded VCS keyword ($Id$, etc) +* a `_version.py` created by some earlier build step + +For released software, the version identifier is closely related to a VCS +tag. Some projects use tag names that include more than just the version +string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool +needs to strip the tag prefix to extract the version identifier. For +unreleased software (between tags), the version identifier should provide +enough information to help developers recreate the same tree, while also +giving them an idea of roughly how old the tree is (after version 1.2, before +version 1.3). Many VCS systems can report a description that captures this, +for example `git describe --tags --dirty --always` reports things like +"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the +0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has +uncommitted changes. + +The version identifier is used for multiple purposes: + +* to allow the module to self-identify its version: `myproject.__version__` +* to choose a name and prefix for a 'setup.py sdist' tarball + +## Theory of Operation + +Versioneer works by adding a special `_version.py` file into your source +tree, where your `__init__.py` can import it. This `_version.py` knows how to +dynamically ask the VCS tool for version information at import time. + +`_version.py` also contains `$Revision$` markers, and the installation +process marks `_version.py` to have this marker rewritten with a tag name +during the `git archive` command. As a result, generated tarballs will +contain enough information to get the proper version. + +To allow `setup.py` to compute a version too, a `versioneer.py` is added to +the top level of your source tree, next to `setup.py` and the `setup.cfg` +that configures it. This overrides several distutils/setuptools commands to +compute the version when invoked, and changes `setup.py build` and `setup.py +sdist` to replace `_version.py` with a small static file that contains just +the generated version data. + +## Installation + +See [INSTALL.md](./INSTALL.md) for detailed installation instructions. + +## Version-String Flavors + +Code which uses Versioneer can learn about its version string at runtime by +importing `_version` from your main `__init__.py` file and running the +`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can +import the top-level `versioneer.py` and run `get_versions()`. + +Both functions return a dictionary with different flavors of version +information: + +* `['version']`: A condensed version string, rendered using the selected + style. This is the most commonly used value for the project's version + string. The default "pep440" style yields strings like `0.11`, + `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section + below for alternative styles. + +* `['full-revisionid']`: detailed revision identifier. For Git, this is the + full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". + +* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the + commit date in ISO 8601 format. This will be None if the date is not + available. + +* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that + this is only accurate if run in a VCS checkout, otherwise it is likely to + be False or None + +* `['error']`: if the version string could not be computed, this will be set + to a string describing the problem, otherwise it will be None. It may be + useful to throw an exception in setup.py if this is set, to avoid e.g. + creating tarballs with a version string of "unknown". + +Some variants are more useful than others. Including `full-revisionid` in a +bug report should allow developers to reconstruct the exact code being tested +(or indicate the presence of local changes that should be shared with the +developers). `version` is suitable for display in an "about" box or a CLI +`--version` output: it can be easily compared against release notes and lists +of bugs fixed in various releases. + +The installer adds the following text to your `__init__.py` to place a basic +version in `YOURPROJECT.__version__`: + + from ._version import get_versions + __version__ = get_versions()['version'] + del get_versions + +## Styles + +The setup.cfg `style=` configuration controls how the VCS information is +rendered into a version string. + +The default style, "pep440", produces a PEP440-compliant string, equal to the +un-prefixed tag name for actual releases, and containing an additional "local +version" section with more detail for in-between builds. For Git, this is +TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags +--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the +tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and +that this commit is two revisions ("+2") beyond the "0.11" tag. For released +software (exactly equal to a known tag), the identifier will only contain the +stripped tag, e.g. "0.11". + +Other styles are available. See [details.md](details.md) in the Versioneer +source tree for descriptions. + +## Debugging + +Versioneer tries to avoid fatal errors: if something goes wrong, it will tend +to return a version of "0+unknown". To investigate the problem, run `setup.py +version`, which will run the version-lookup code in a verbose mode, and will +display the full contents of `get_versions()` (including the `error` string, +which may help identify what went wrong). + +## Known Limitations + +Some situations are known to cause problems for Versioneer. This details the +most significant ones. More can be found on Github +[issues page](https://github.com/warner/python-versioneer/issues). + +### Subprojects + +Versioneer has limited support for source trees in which `setup.py` is not in +the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are +two common reasons why `setup.py` might not be in the root: + +* Source trees which contain multiple subprojects, such as + [Buildbot](https://github.com/buildbot/buildbot), which contains both + "master" and "slave" subprojects, each with their own `setup.py`, + `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI + distributions (and upload multiple independently-installable tarballs). +* Source trees whose main purpose is to contain a C library, but which also + provide bindings to Python (and perhaps other langauges) in subdirectories. + +Versioneer will look for `.git` in parent directories, and most operations +should get the right version string. However `pip` and `setuptools` have bugs +and implementation details which frequently cause `pip install .` from a +subproject directory to fail to find a correct version string (so it usually +defaults to `0+unknown`). + +`pip install --editable .` should work correctly. `setup.py install` might +work too. + +Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in +some later version. + +[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking +this issue. The discussion in +[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the +issue from the Versioneer side in more detail. +[pip PR#3176](https://github.com/pypa/pip/pull/3176) and +[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve +pip to let Versioneer work correctly. + +Versioneer-0.16 and earlier only looked for a `.git` directory next to the +`setup.cfg`, so subprojects were completely unsupported with those releases. + +### Editable installs with setuptools <= 18.5 + +`setup.py develop` and `pip install --editable .` allow you to install a +project into a virtualenv once, then continue editing the source code (and +test) without re-installing after every change. + +"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a +convenient way to specify executable scripts that should be installed along +with the python package. + +These both work as expected when using modern setuptools. When using +setuptools-18.5 or earlier, however, certain operations will cause +`pkg_resources.DistributionNotFound` errors when running the entrypoint +script, which must be resolved by re-installing the package. This happens +when the install happens with one version, then the egg_info data is +regenerated while a different version is checked out. Many setup.py commands +cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into +a different virtualenv), so this can be surprising. + +[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes +this one, but upgrading to a newer version of setuptools should probably +resolve it. + +### Unicode version strings + +While Versioneer works (and is continually tested) with both Python 2 and +Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. +Newer releases probably generate unicode version strings on py2. It's not +clear that this is wrong, but it may be surprising for applications when then +write these strings to a network connection or include them in bytes-oriented +APIs like cryptographic checksums. + +[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates +this question. + + +## Updating Versioneer + +To upgrade your project to a new release of Versioneer, do the following: + +* install the new Versioneer (`pip install -U versioneer` or equivalent) +* edit `setup.cfg`, if necessary, to include any new configuration settings + indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. +* re-run `versioneer install` in your source tree, to replace + `SRC/_version.py` +* commit any changed files + +## Future Directions + +This tool is designed to make it easily extended to other version-control +systems: all VCS-specific components are in separate directories like +src/git/ . The top-level `versioneer.py` script is assembled from these +components by running make-versioneer.py . In the future, make-versioneer.py +will take a VCS name as an argument, and will construct a version of +`versioneer.py` that is specific to the given VCS. It might also take the +configuration arguments that are currently provided manually during +installation by editing setup.py . Alternatively, it might go the other +direction and include code from all supported VCS systems, reducing the +number of intermediate scripts. + + +## License + +To make Versioneer easier to embed, all its code is dedicated to the public +domain. The `_version.py` that it creates is also in the public domain. +Specifically, both are released under the Creative Commons "Public Domain +Dedication" license (CC0-1.0), as described in +https://creativecommons.org/publicdomain/zero/1.0/ . + +""" + +from __future__ import print_function +try: + import configparser +except ImportError: + import ConfigParser as configparser +import errno +import json +import os +import re +import subprocess +import sys +import runpy + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_root(): + """Get the project root directory. + + We require that all commands are run from the project root, i.e. the + directory that contains setup.py, setup.cfg, and versioneer.py . + """ + root = os.path.realpath(os.path.abspath(os.getcwd())) + setup_py = os.path.join(root, "setup.py") + versioneer_py = os.path.join(root, "versioneer.py") + if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + # allow 'python path/to/setup.py COMMAND' + root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) + setup_py = os.path.join(root, "setup.py") + versioneer_py = os.path.join(root, "versioneer.py") + if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + err = ("Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND').") + raise VersioneerBadRootError(err) + try: + # Certain runtime workflows (setup.py install/develop in a setuptools + # tree) execute all dependencies in a single python process, so + # "versioneer" may be imported multiple times, and python's shared + # module-import table will cache the first one. So we can't use + # os.path.dirname(__file__), as that will find whichever + # versioneer.py was first imported, even in later projects. + me = os.path.realpath(os.path.abspath(__file__)) + me_dir = os.path.normcase(os.path.splitext(me)[0]) + vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) + if me_dir != vsr_dir: + print("Warning: build in %s is using versioneer.py from %s" + % (os.path.dirname(me), versioneer_py)) + except NameError: + pass + return root + + +def get_config_from_root(root): + """Read the project setup.cfg file to determine Versioneer config.""" + # This might raise EnvironmentError (if setup.cfg is missing), or + # configparser.NoSectionError (if it lacks a [versioneer] section), or + # configparser.NoOptionError (if it lacks "VCS="). See the docstring at + # the top of versioneer.py for instructions on writing your setup.cfg . + setup_cfg = os.path.join(root, "setup.cfg") + parser = configparser.SafeConfigParser() + with open(setup_cfg, "r") as f: + parser.readfp(f) + VCS = parser.get("versioneer", "VCS") # mandatory + + def get(parser, name): + if parser.has_option("versioneer", name): + return parser.get("versioneer", name) + return None + cfg = VersioneerConfig() + cfg.VCS = VCS + cfg.style = get(parser, "style") or "" + cfg.versionfile_source = get(parser, "versionfile_source") + cfg.versionfile_build = get(parser, "versionfile_build") + cfg.tag_prefix = get(parser, "tag_prefix") + if cfg.tag_prefix in ("''", '""'): + cfg.tag_prefix = "" + cfg.parentdir_prefix = get(parser, "parentdir_prefix") + cfg.verbose = get(parser, "verbose") + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +# these dictionaries contain VCS-specific tools +LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, + env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + p = None + for c in commands: + try: + dispcmd = str([c] + args) + # remember shell=False, so use git.cmd on windows, not just git + p = subprocess.Popen([c] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None)) + break + except EnvironmentError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %s" % dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %s" % (commands,)) + return None, None + stdout = p.communicate()[0].strip() + if sys.version_info[0] >= 3: + stdout = stdout.decode() + if p.returncode != 0: + if verbose: + print("unable to run %s (error)" % dispcmd) + print("stdout was %s" % stdout) + return None, p.returncode + return stdout, p.returncode + + +LONG_VERSION_PY['git'] = ''' +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. Generated by +# versioneer-0.18 (https://github.com/warner/python-versioneer) + +"""Git implementation of _version.py.""" + +import errno +import os +import re +import subprocess +import sys +import runpy + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" + git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" + git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "%(STYLE)s" + cfg.tag_prefix = "%(TAG_PREFIX)s" + cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" + cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY = {} +HANDLERS = {} + + +def register_vcs_handler(vcs, method): # decorator + """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, + env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + p = None + for c in commands: + try: + dispcmd = str([c] + args) + # remember shell=False, so use git.cmd on windows, not just git + p = subprocess.Popen([c] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None)) + break + except EnvironmentError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %%s" %% dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %%s" %% (commands,)) + return None, None + stdout = p.communicate()[0].strip() + if sys.version_info[0] >= 3: + stdout = stdout.decode() + if p.returncode != 0: + if verbose: + print("unable to run %%s (error)" %% dispcmd) + print("stdout was %%s" %% stdout) + return None, p.returncode + return stdout, p.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for i in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + else: + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %%s but none started with prefix %%s" %% + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + f = open(versionfile_abs, "r") + for line in f.readlines(): + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + f.close() + except EnvironmentError: + pass + # CJM: Nibabel hack to ensure we can git-archive off-release versions and + # revert to old X.Y.Zdev versions + githash + try: + rel = runpy.run_path(os.path.join(os.path.dirname(versionfile_abs), "info.py")) + keywords["fallback"] = rel["VERSION"] + except (FileNotFoundError, KeyError): + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + # CJM: Nibabel fix to avoid hitting unguarded dictionary lookup, better explanation + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = set([r.strip() for r in refnames.strip("()").split(",")]) + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %%d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = set([r for r in refs if re.search(r'\d', r)]) + if verbose: + print("discarding '%%s', no digits" %% ",".join(refs - tags)) + if verbose: + print("likely tags: %%s" %% ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + # CJM: Nibabel fix to filter out refs that exactly match prefix + # or that don't start with a number once the prefix is stripped + # (Mostly a concern when prefix is '') + if not re.match(r'\d', r): + continue + if verbose: + print("picking %%s" %% r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so inspect ./info.py + if verbose: + print("no suitable tags, falling back to info.VERSION or 0+unknown") + return {"version": keywords.get("fallback", "0+unknown"), + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=True) + if rc != 0: + if verbose: + print("Directory %%s not under git control" %% root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long", + "--match", "%%s*" %% tag_prefix], + cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%%s'" + %% describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%%s' doesn't start with prefix '%%s'" + print(fmt %% (full_tag, tag_prefix)) + pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" + %% (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], + cwd=root)[0].strip() + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces): + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + """TAG[.post.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post.devDISTANCE + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post.dev%%d" %% pieces["distance"] + else: + # exception #1 + rendered = "0.post.dev%%d" %% pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%%s" %% pieces["short"] + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%%s" %% pieces["short"] + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Eexceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%%s'" %% style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for i in cfg.versionfile_source.split('/'): + root = os.path.dirname(root) + except NameError: + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None} + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", "date": None} +''' + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + f = open(versionfile_abs, "r") + for line in f.readlines(): + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + f.close() + except EnvironmentError: + pass + # CJM: Nibabel hack to ensure we can git-archive off-release versions and + # revert to old X.Y.Zdev versions + githash + try: + rel = runpy.run_path(os.path.join(os.path.dirname(versionfile_abs), "info.py")) + keywords["fallback"] = rel["VERSION"] + except (FileNotFoundError, KeyError): + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + # CJM: Nibabel fix to avoid hitting unguarded dictionary lookup, better explanation + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = set([r.strip() for r in refnames.strip("()").split(",")]) + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = set([r for r in refs if re.search(r'\d', r)]) + if verbose: + print("discarding '%s', no digits" % ",".join(refs - tags)) + if verbose: + print("likely tags: %s" % ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + # CJM: Nibabel fix to filter out refs that exactly match prefix + # or that don't start with a number once the prefix is stripped + # (Mostly a concern when prefix is '') + if not re.match(r'\d', r): + continue + if verbose: + print("picking %s" % r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so inspect ./info.py + if verbose: + print("no suitable tags, falling back to info.VERSION or 0+unknown") + return {"version": keywords.get("fallback", "0+unknown"), + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=True) + if rc != 0: + if verbose: + print("Directory %s not under git control" % root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", + "--always", "--long", + "--match", "%s*" % tag_prefix], + cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparseable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], + cwd=root) + pieces["distance"] = int(count_out) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], + cwd=root)[0].strip() + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def do_vcs_install(manifest_in, versionfile_source, ipy): + """Git-specific installation logic for Versioneer. + + For Git, this means creating/changing .gitattributes to mark _version.py + for export-subst keyword substitution. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + files = [manifest_in, versionfile_source] + if ipy: + files.append(ipy) + try: + me = __file__ + if me.endswith(".pyc") or me.endswith(".pyo"): + me = os.path.splitext(me)[0] + ".py" + versioneer_file = os.path.relpath(me) + except NameError: + versioneer_file = "versioneer.py" + files.append(versioneer_file) + present = False + try: + f = open(".gitattributes", "r") + for line in f.readlines(): + if line.strip().startswith(versionfile_source): + if "export-subst" in line.strip().split()[1:]: + present = True + f.close() + except EnvironmentError: + pass + if not present: + f = open(".gitattributes", "a+") + f.write("%s export-subst\n" % versionfile_source) + f.close() + files.append(".gitattributes") + run_command(GITS, ["add", "--"] + files) + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for i in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + else: + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +SHORT_VERSION_PY = """ +# This file was generated by 'versioneer.py' (0.18) from +# revision-control system data, or from the parent directory name of an +# unpacked source archive. Distribution tarballs contain a pre-generated copy +# of this file. + +import json + +version_json = ''' +%s +''' # END VERSION_JSON + + +def get_versions(): + return json.loads(version_json) +""" + + +def versions_from_file(filename): + """Try to determine the version from _version.py if present.""" + try: + with open(filename) as f: + contents = f.read() + except EnvironmentError: + raise NotThisMethod("unable to read _version.py") + mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", + contents, re.M | re.S) + if not mo: + mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", + contents, re.M | re.S) + if not mo: + raise NotThisMethod("no version_json in _version.py") + return json.loads(mo.group(1)) + + +def write_to_version_file(filename, versions): + """Write the given version number to the given _version.py file.""" + os.unlink(filename) + contents = json.dumps(versions, sort_keys=True, + indent=1, separators=(",", ": ")) + with open(filename, "w") as f: + f.write(SHORT_VERSION_PY % contents) + + print("set %s to '%s'" % (filename, versions["version"])) + + +def plus_or_dot(pieces): + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_pre(pieces): + """TAG[.post.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post.devDISTANCE + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += ".post.dev%d" % pieces["distance"] + else: + # exception #1 + rendered = "0.post.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Eexceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +class VersioneerBadRootError(Exception): + """The project root directory is unknown or missing key files.""" + + +def get_versions(verbose=False): + """Get the project version from whatever source is available. + + Returns dict with two keys: 'version' and 'full'. + """ + if "versioneer" in sys.modules: + # see the discussion in cmdclass.py:get_cmdclass() + del sys.modules["versioneer"] + + root = get_root() + cfg = get_config_from_root(root) + + assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" + handlers = HANDLERS.get(cfg.VCS) + assert handlers, "unrecognized VCS '%s'" % cfg.VCS + verbose = verbose or cfg.verbose + assert cfg.versionfile_source is not None, \ + "please set versioneer.versionfile_source" + assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" + + versionfile_abs = os.path.join(root, cfg.versionfile_source) + + # extract version from first of: _version.py, VCS command (e.g. 'git + # describe'), parentdir. This is meant to work for developers using a + # source checkout, for users of a tarball created by 'setup.py sdist', + # and for users of a tarball/zipball created by 'git archive' or github's + # download-from-tag feature or the equivalent in other VCSes. + + get_keywords_f = handlers.get("get_keywords") + from_keywords_f = handlers.get("keywords") + if get_keywords_f and from_keywords_f: + try: + keywords = get_keywords_f(versionfile_abs) + ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) + if verbose: + print("got version from expanded keyword %s" % ver) + return ver + except NotThisMethod: + pass + + try: + ver = versions_from_file(versionfile_abs) + if verbose: + print("got version from file %s %s" % (versionfile_abs, ver)) + return ver + except NotThisMethod: + pass + + from_vcs_f = handlers.get("pieces_from_vcs") + if from_vcs_f: + try: + pieces = from_vcs_f(cfg.tag_prefix, root, verbose) + ver = render(pieces, cfg.style) + if verbose: + print("got version from VCS %s" % ver) + return ver + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + if verbose: + print("got version from parentdir %s" % ver) + return ver + except NotThisMethod: + pass + + if verbose: + print("unable to compute version") + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, "error": "unable to compute version", + "date": None} + + +def get_version(): + """Get the short version string for this project.""" + return get_versions()["version"] + + +def get_cmdclass(): + """Get the custom setuptools/distutils subclasses used by Versioneer.""" + if "versioneer" in sys.modules: + del sys.modules["versioneer"] + # this fixes the "python setup.py develop" case (also 'install' and + # 'easy_install .'), in which subdependencies of the main project are + # built (using setup.py bdist_egg) in the same python process. Assume + # a main project A and a dependency B, which use different versions + # of Versioneer. A's setup.py imports A's Versioneer, leaving it in + # sys.modules by the time B's setup.py is executed, causing B to run + # with the wrong versioneer. Setuptools wraps the sub-dep builds in a + # sandbox that restores sys.modules to it's pre-build state, so the + # parent is protected against the child's "import versioneer". By + # removing ourselves from sys.modules here, before the child build + # happens, we protect the child from the parent's versioneer too. + # Also see https://github.com/warner/python-versioneer/issues/52 + + cmds = {} + + # we add "version" to both distutils and setuptools + from distutils.core import Command + + class cmd_version(Command): + description = "report generated version string" + user_options = [] + boolean_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + vers = get_versions(verbose=True) + print("Version: %s" % vers["version"]) + print(" full-revisionid: %s" % vers.get("full-revisionid")) + print(" dirty: %s" % vers.get("dirty")) + print(" date: %s" % vers.get("date")) + if vers["error"]: + print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version + + # we override "build_py" in both distutils and setuptools + # + # most invocation pathways end up running build_py: + # distutils/build -> build_py + # distutils/install -> distutils/build ->.. + # setuptools/bdist_wheel -> distutils/install ->.. + # setuptools/bdist_egg -> distutils/install_lib -> build_py + # setuptools/install -> bdist_egg ->.. + # setuptools/develop -> ? + # pip install: + # copies source tree to a tempdir before running egg_info/etc + # if .git isn't copied too, 'git describe' will fail + # then does setup.py bdist_wheel, or sometimes setup.py install + # setup.py egg_info -> ? + + # we override different "build_py" commands for both environments + if "setuptools" in sys.modules: + from setuptools.command.build_py import build_py as _build_py + else: + from distutils.command.build_py import build_py as _build_py + + class cmd_build_py(_build_py): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + _build_py.run(self) + # now locate _version.py in the new build/ directory and replace + # it with an updated value + if cfg.versionfile_build: + target_versionfile = os.path.join(self.build_lib, + cfg.versionfile_build) + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py + + if "cx_Freeze" in sys.modules: # cx_freeze enabled? + from cx_Freeze.dist import build_exe as _build_exe + # nczeczulin reports that py2exe won't like the pep440-style string + # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. + # setup(console=[{ + # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION + # "product_version": versioneer.get_version(), + # ... + + class cmd_build_exe(_build_exe): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + target_versionfile = cfg.versionfile_source + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + + _build_exe.run(self) + os.unlink(target_versionfile) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % + {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + cmds["build_exe"] = cmd_build_exe + del cmds["build_py"] + + if 'py2exe' in sys.modules: # py2exe enabled? + try: + from py2exe.distutils_buildexe import py2exe as _py2exe # py3 + except ImportError: + from py2exe.build_exe import py2exe as _py2exe # py2 + + class cmd_py2exe(_py2exe): + def run(self): + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + target_versionfile = cfg.versionfile_source + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + + _py2exe.run(self) + os.unlink(target_versionfile) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % + {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + cmds["py2exe"] = cmd_py2exe + + # we override different "sdist" commands for both environments + if "setuptools" in sys.modules: + from setuptools.command.sdist import sdist as _sdist + else: + from distutils.command.sdist import sdist as _sdist + + class cmd_sdist(_sdist): + def run(self): + versions = get_versions() + self._versioneer_generated_versions = versions + # unless we update this, the command will keep using the old + # version + self.distribution.metadata.version = versions["version"] + return _sdist.run(self) + + def make_release_tree(self, base_dir, files): + root = get_root() + cfg = get_config_from_root(root) + _sdist.make_release_tree(self, base_dir, files) + # now locate _version.py in the new base_dir directory + # (remembering that it may be a hardlink) and replace it with an + # updated value + target_versionfile = os.path.join(base_dir, cfg.versionfile_source) + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, + self._versioneer_generated_versions) + cmds["sdist"] = cmd_sdist + + return cmds + + +CONFIG_ERROR = """ +setup.cfg is missing the necessary Versioneer configuration. You need +a section like: + + [versioneer] + VCS = git + style = pep440 + versionfile_source = src/myproject/_version.py + versionfile_build = myproject/_version.py + tag_prefix = + parentdir_prefix = myproject- + +You will also need to edit your setup.py to use the results: + + import versioneer + setup(version=versioneer.get_version(), + cmdclass=versioneer.get_cmdclass(), ...) + +Please read the docstring in ./versioneer.py for configuration instructions, +edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. +""" + +SAMPLE_CONFIG = """ +# See the docstring in versioneer.py for instructions. Note that you must +# re-run 'versioneer.py setup' after changing this section, and commit the +# resulting files. + +[versioneer] +#VCS = git +#style = pep440 +#versionfile_source = +#versionfile_build = +#tag_prefix = +#parentdir_prefix = + +""" + +INIT_PY_SNIPPET = """ +from ._version import get_versions +__version__ = get_versions()['version'] +del get_versions +""" + + +def do_setup(): + """Main VCS-independent setup function for installing Versioneer.""" + root = get_root() + try: + cfg = get_config_from_root(root) + except (EnvironmentError, configparser.NoSectionError, + configparser.NoOptionError) as e: + if isinstance(e, (EnvironmentError, configparser.NoSectionError)): + print("Adding sample versioneer config to setup.cfg", + file=sys.stderr) + with open(os.path.join(root, "setup.cfg"), "a") as f: + f.write(SAMPLE_CONFIG) + print(CONFIG_ERROR, file=sys.stderr) + return 1 + + print(" creating %s" % cfg.versionfile_source) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), + "__init__.py") + if os.path.exists(ipy): + try: + with open(ipy, "r") as f: + old = f.read() + except EnvironmentError: + old = "" + if INIT_PY_SNIPPET not in old: + print(" appending to %s" % ipy) + with open(ipy, "a") as f: + f.write(INIT_PY_SNIPPET) + else: + print(" %s unmodified" % ipy) + else: + print(" %s doesn't exist, ok" % ipy) + ipy = None + + # Make sure both the top-level "versioneer.py" and versionfile_source + # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so + # they'll be copied into source distributions. Pip won't be able to + # install the package without this. + manifest_in = os.path.join(root, "MANIFEST.in") + simple_includes = set() + try: + with open(manifest_in, "r") as f: + for line in f: + if line.startswith("include "): + for include in line.split()[1:]: + simple_includes.add(include) + except EnvironmentError: + pass + # That doesn't cover everything MANIFEST.in can do + # (http://docs.python.org/2/distutils/sourcedist.html#commands), so + # it might give some false negatives. Appending redundant 'include' + # lines is safe, though. + if "versioneer.py" not in simple_includes: + print(" appending 'versioneer.py' to MANIFEST.in") + with open(manifest_in, "a") as f: + f.write("include versioneer.py\n") + else: + print(" 'versioneer.py' already in MANIFEST.in") + if cfg.versionfile_source not in simple_includes: + print(" appending versionfile_source ('%s') to MANIFEST.in" % + cfg.versionfile_source) + with open(manifest_in, "a") as f: + f.write("include %s\n" % cfg.versionfile_source) + else: + print(" versionfile_source already in MANIFEST.in") + + # Make VCS-specific changes. For git, this means creating/changing + # .gitattributes to mark _version.py for export-subst keyword + # substitution. + do_vcs_install(manifest_in, cfg.versionfile_source, ipy) + return 0 + + +def scan_setup_py(): + """Validate the contents of setup.py against Versioneer's expectations.""" + found = set() + setters = False + errors = 0 + with open("setup.py", "r") as f: + for line in f.readlines(): + if "import versioneer" in line: + found.add("import") + if "versioneer.get_cmdclass()" in line: + found.add("cmdclass") + if "versioneer.get_version()" in line: + found.add("get_version") + if "versioneer.VCS" in line: + setters = True + if "versioneer.versionfile_source" in line: + setters = True + if len(found) != 3: + print("") + print("Your setup.py appears to be missing some important items") + print("(but I might be wrong). Please make sure it has something") + print("roughly like the following:") + print("") + print(" import versioneer") + print(" setup( version=versioneer.get_version(),") + print(" cmdclass=versioneer.get_cmdclass(), ...)") + print("") + errors += 1 + if setters: + print("You should remove lines like 'versioneer.VCS = ' and") + print("'versioneer.versionfile_source = ' . This configuration") + print("now lives in setup.cfg, and should be removed from setup.py") + print("") + errors += 1 + return errors + + +if __name__ == "__main__": + cmd = sys.argv[1] + if cmd == "setup": + errors = do_setup() + errors += scan_setup_py() + if errors: + sys.exit(1)