diff --git a/.circle/codecov.sh b/.circle/codecov.sh new file mode 100644 index 0000000000..c71cf1c6f5 --- /dev/null +++ b/.circle/codecov.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# +# This script pull all coverage files into the $CIRCLE_TEST_REPORTS folder +# and sends data to codecov. +# + +# Setting # $ help set +set -e # Exit immediately if a command exits with a non-zero status. +set -u # Treat unset variables as an error when substituting. +set -x # Print command traces before executing command. + +mkdir -p ${CIRCLE_TEST_REPORTS}/ +for report in $( ls ~/scratch/*.xml ); do + rname=$( basename $report ) + cp ${report} ${CIRCLE_TEST_REPORTS}/${rname:: -4}_${CIRCLE_NODE_INDEX}.xml +done + +# Send coverage data to codecov.io +curl -so codecov.io https://codecov.io/bash +chmod 755 codecov.io + +find "${CIRCLE_TEST_REPORTS}/" -name 'coverage*.xml' -print0 | \ + xargs -0 -I file ./codecov.io -f file -t "${CODECOV_TOKEN}" -F unittests +find "${CIRCLE_TEST_REPORTS}/" -name 'smoketests*.xml' -print0 | \ + xargs -0 -I file ./codecov.io -f file -t "${CODECOV_TOKEN}" -F smoketests diff --git a/docker/files/tests.sh b/.circle/tests.sh similarity index 79% rename from docker/files/tests.sh rename to .circle/tests.sh index 3b43003294..602dddca8b 100644 --- a/docker/files/tests.sh +++ b/.circle/tests.sh @@ -1,9 +1,12 @@ #!/bin/bash +# +# Balance nipype testing workflows across CircleCI build nodes +# -set -o nounset -set -o xtrace - -export CODECOV_TOKEN=ac172a50-8e66-42e5-8822-5373fcf54686 +# Setting # $ help set +set -e # Exit immediately if a command exits with a non-zero status. +set -u # Treat unset variables as an error when substituting. +set -x # Print command traces before executing command. if [ "${CIRCLE_NODE_TOTAL:-}" != "4" ]; then echo "These tests were designed to be run at 4x parallelism." @@ -14,15 +17,15 @@ fi # They may need to be rebalanced in the future. case ${CIRCLE_NODE_INDEX} in 0) - docker run --rm -it -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh fmri_spm_dartel Linear /root/examples/ level1 && \ - docker run --rm -it -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh fmri_spm_dartel Linear /root/examples/ l2pipeline - ;; - 1) - docker run --rm -it -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh test_spm Linear /root/examples/ workflow3d && \ - docker run --rm -it -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh test_spm Linear /root/examples/ workflow4d && \ docker run --rm -it -e FSL_COURSE_DATA="/root/examples/nipype-fsl_course_data" -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /root/src/nipype nipype/nipype_test:py27 /usr/bin/run_pytests.sh py27 && \ docker run --rm -it -e FSL_COURSE_DATA="/root/examples/nipype-fsl_course_data" -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /root/src/nipype nipype/nipype_test:py35 /usr/bin/run_pytests.sh py35 && \ - docker run --rm -it -v $SCRATCH:/scratch -w /root/src/nipype/doc nipype/nipype_test:py35 /usr/bin/run_builddocs.sh + docker run --rm -it -v $SCRATCH:/scratch -w /root/src/nipype/doc nipype/nipype_test:py35 /usr/bin/run_builddocs.sh && \ + docker run --rm -it -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh test_spm Linear /root/examples/ workflow3d && \ + docker run --rm -it -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh test_spm Linear /root/examples/ workflow4d + ;; + 1) + docker run --rm -it -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh fmri_spm_dartel Linear /root/examples/ level1 && \ + docker run --rm -it -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh fmri_spm_dartel Linear /root/examples/ l2pipeline ;; 2) docker run --rm -it -e NIPYPE_NUMBER_OF_CPUS=4 -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py27 /usr/bin/run_examples.sh fmri_spm_nested MultiProc /root/examples/ level1 && \ @@ -34,14 +37,3 @@ case ${CIRCLE_NODE_INDEX} in docker run --rm -it -v $HOME/examples:/root/examples:ro -v $SCRATCH:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh fmri_fsl_reuse Linear /root/examples/ level1_workflow ;; esac - -# Put the artifacts in place -bash docker/files/teardown.sh - -# Send coverage data to codecov.io -curl -so codecov.io https://codecov.io/bash -chmod 755 codecov.io -find "${CIRCLE_TEST_REPORTS}/pytest" -name 'coverage*.xml' -print0 | \ - xargs -0 -I file ./codecov.io -f file -t "${CODECOV_TOKEN}" -F unittests -find "${CIRCLE_TEST_REPORTS}/pytest" -name 'smoketests*.xml' -print0 | \ - xargs -0 -I file ./codecov.io -f file -t "${CODECOV_TOKEN}" -F smoketests diff --git a/.dockerignore b/.dockerignore index 381de568df..2140bfcb66 100644 --- a/.dockerignore +++ b/.dockerignore @@ -23,11 +23,13 @@ src/ # other docs/**/* docs/ +.circle/**/* +.circle/ +circle.yml .coverage .coveragerc codecov.yml rtd_requirements.txt -circle.yml Vagrantfile .travis.yml .noserc diff --git a/CHANGES b/CHANGES index c2b403b348..970422b3b4 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,7 @@ Upcoming Release ===================== +* ENH: Added non-steady state detector for EPI data (https://github.com/nipy/nipype/pull/1839) * ENH: Enable new BBRegister init options for FSv6+ (https://github.com/nipy/nipype/pull/1811) * REF: Splits nipype.interfaces.utility into base, csv, and wrappers (https://github.com/nipy/nipype/pull/1828) * FIX: Makespec now runs with nipype in current directory (https://github.com/nipy/nipype/pull/1813) diff --git a/README.rst b/README.rst index 8eb7b3e0c0..da6dbd280d 100644 --- a/README.rst +++ b/README.rst @@ -78,8 +78,8 @@ Support and Communication ------------------------- If you have a problem or would like to ask a question about how to do something in Nipype please open an issue to -`NeuroStars.org `_ with a *nipype* tag. `NeuroStars.org `_ is a -platform similar to StackOverflow but dedicated to neuroinformatics. +`NeuroStars.org `_ with a *nipype* tag. `NeuroStars.org `_ is a +platform similar to StackOverflow but dedicated to neuroinformatics. To participate in the Nipype development related discussions please use the following mailing list:: @@ -117,16 +117,3 @@ Currently Nipype consists of the following files and directories: setup.py Script for building and installing NIPYPE. -License information -------------------- - -We use the 3-clause BSD license; the full license is in the file ``LICENSE`` in -the nipype distribution. - -There are interfaces to some GNU code but these are entirely optional. - -All trademarks referenced herein are property of their respective -holders. - -Copyright (c) 2009-2015, NIPY Developers -All rights reserved. diff --git a/circle.yml b/circle.yml index db67dc81a2..ac27d71a56 100644 --- a/circle.yml +++ b/circle.yml @@ -8,6 +8,7 @@ machine: DATA_NIPYPE_FSL_COURSE: "${OSF_NIPYPE_URL}/57f472cf9ad5a101f977ecfe" DATA_NIPYPE_FSL_FEEDS: "${OSF_NIPYPE_URL}/57f473066c613b01f113e7af" SCRATCH: "$HOME/scratch" + CODECOV_TOKEN: "ac172a50-8e66-42e5-8822-5373fcf54686" services: - docker @@ -32,34 +33,39 @@ dependencies: - if [[ ! -d ~/examples/feeds ]]; then wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 -t 0 -q -O fsl-5.0.9-feeds.tar.gz "${DATA_NIPYPE_FSL_FEEDS}" && tar xzf fsl-5.0.9-feeds.tar.gz -C ~/examples/; fi - docker images - sed -i -E "s/(__version__ = )'[A-Za-z0-9.-]+'/\1'$CIRCLE_TAG'/" nipype/info.py - - e=1 && for i in {1..5}; do docker build -t nipype/nipype:latest --build-arg BUILD_DATE=`date -u +"%Y-%m-%dT%H:%M:%SZ"` --build-arg VCS_REF=`git rev-parse --short HEAD` --build-arg VERSION=$CIRCLE_TAG . && e=0 && break || sleep 15; done && [ "$e" -eq "0" ] : + - e=1 && for i in {1..5}; do docker build --rm=false -t nipype/nipype:latest --build-arg BUILD_DATE=`date -u +"%Y-%m-%dT%H:%M:%SZ"` --build-arg VCS_REF=`git rev-parse --short HEAD` --build-arg VERSION=$CIRCLE_TAG . && e=0 && break || sleep 15; done && [ "$e" -eq "0" ] : timeout: 21600 - - e=1 && for i in {1..5}; do docker build -f docker/Dockerfile_py27 -t nipype/nipype_test:py27 . && e=0 && break || sleep 15; done && [ "$e" -eq "0" ] : + - e=1 && for i in {1..5}; do docker build --rm=false -f docker/Dockerfile_py27 -t nipype/nipype_test:py27 . && e=0 && break || sleep 15; done && [ "$e" -eq "0" ] : timeout: 1600 - - e=1 && for i in {1..5}; do docker build -f docker/Dockerfile_py35 -t nipype/nipype_test:py35 . && e=0 && break || sleep 15; done && [ "$e" -eq "0" ] : + - e=1 && for i in {1..5}; do docker build --rm=false -f docker/Dockerfile_py35 -t nipype/nipype_test:py35 . && e=0 && break || sleep 15; done && [ "$e" -eq "0" ] : timeout: 1600 - docker save -o $HOME/docker/cache.tar nipype/nipype:latest nipype/nipype_test:py27 nipype/nipype_test:py35 : timeout: 6000 test: override: - - bash docker/files/tests.sh : + - bash .circle/tests.sh : timeout: 7200 parallel: true + post: + # Send coverage data to codecov.io + - bash .circle/codecov.sh general: artifacts: - - "~/docs" - - "~/logs" + - "~/scratch/docs" + - "~/scratch/logs" deployment: production: tag: /.*/ commands: + # Deploy to docker hub - if [[ -n "$DOCKER_PASS" ]]; then docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS && docker push nipype/nipype:latest; fi : timeout: 21600 - if [[ -n "$DOCKER_PASS" ]]; then docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS && docker tag nipype/nipype nipype/nipype:$CIRCLE_TAG && docker push nipype/nipype:$CIRCLE_TAG; fi : timeout: 21600 + # Automatic deployment to Pypi: # - printf "[distutils]\nindex-servers =\n pypi\n\n[pypi]\nusername:$PYPI_USER\npassword:$PYPI_PASS\n" > ~/.pypirc # - python setup.py sdist upload -r pypi diff --git a/docker/files/teardown.sh b/docker/files/teardown.sh deleted file mode 100644 index 3712b7ad23..0000000000 --- a/docker/files/teardown.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# -# This script puts all artifacts in place after the smoke tests -# -# -set -u -set -e - -mkdir -p ${CIRCLE_TEST_REPORTS}/pytest -mv ~/scratch/*.xml ${CIRCLE_TEST_REPORTS}/pytest -mkdir -p ~/docs -mv ~/scratch/docs/* ~/docs/ -mkdir -p ~/logs -mv ~/scratch/builddocs.log ~/logs/builddocs.log -mv ~/scratch/logs/* ~/logs/ diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py index bfa74ca6b9..09d7493264 100644 --- a/nipype/algorithms/confounds.py +++ b/nipype/algorithms/confounds.py @@ -579,6 +579,79 @@ def _list_outputs(self): outputs['detrended_file'] = op.abspath(self.inputs.detrended_file) return outputs + +class NonSteadyStateDetectorInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, desc='4D NIFTI EPI file') + + +class NonSteadyStateDetectorOutputSpec(TraitedSpec): + n_volumes_to_discard = traits.Int(desc='Number of non-steady state volumes' + 'detected in the beginning of the scan.') + + +class NonSteadyStateDetector(BaseInterface): + """ + Returns the number of non-steady state volumes detected at the beginning + of the scan. + """ + + input_spec = NonSteadyStateDetectorInputSpec + output_spec = NonSteadyStateDetectorOutputSpec + + def _run_interface(self, runtime): + in_nii = nb.load(self.inputs.in_plots) + global_signal = in_nii.get_data()[:,:,:,:50].mean(axis=0).mean(axis=0).mean(axis=0) + + self._results = { + 'out_file': _is_outlier(global_signal) + } + + return runtime + + def _list_outputs(self): + return self._results + +def _is_outlier(points, thresh=3.5): + """ + Returns a boolean array with True if points are outliers and False + otherwise. + + Parameters: + ----------- + points : An numobservations by numdimensions array of observations + thresh : The modified z-score to use as a threshold. Observations with + a modified z-score (based on the median absolute deviation) greater + than this value will be classified as outliers. + + Returns: + -------- + mask : A numobservations-length boolean array. + + References: + ---------- + Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and + Handle Outliers", The ASQC Basic References in Quality Control: + Statistical Techniques, Edward F. Mykytka, Ph.D., Editor. + """ + if len(points.shape) == 1: + points = points[:, None] + median = np.median(points, axis=0) + diff = np.sum((points - median) ** 2, axis=-1) + diff = np.sqrt(diff) + med_abs_deviation = np.median(diff) + + modified_z_score = 0.6745 * diff / med_abs_deviation + + timepoints_to_discard = 0 + for i in range(len(modified_z_score)): + if modified_z_score[i] <= thresh: + break + else: + timepoints_to_discard += 1 + + return timepoints_to_discard + + def regress_poly(degree, data, remove_mean=True, axis=-1): ''' returns data with degree polynomial regressed out. Be default it is calculated along the last axis (usu. time). diff --git a/nipype/algorithms/tests/test_auto_CompCor.py b/nipype/algorithms/tests/test_auto_CompCor.py deleted file mode 100644 index 12cec2ebb0..0000000000 --- a/nipype/algorithms/tests/test_auto_CompCor.py +++ /dev/null @@ -1,37 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals -from ..confounds import CompCor - - -def test_CompCor_inputs(): - input_map = dict(components_file=dict(usedefault=True, - ), - header=dict(), - ignore_exception=dict(nohash=True, - usedefault=True, - ), - mask_file=dict(), - num_components=dict(usedefault=True, - ), - realigned_file=dict(mandatory=True, - ), - regress_poly_degree=dict(usedefault=True, - ), - use_regress_poly=dict(usedefault=True, - ), - ) - inputs = CompCor.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value - - -def test_CompCor_outputs(): - output_map = dict(components_file=dict(), - ) - outputs = CompCor.output_spec() - - for key, metadata in list(output_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_ComputeDVARS.py b/nipype/algorithms/tests/test_auto_ComputeDVARS.py index 9a52898fc7..7c59f851d1 100644 --- a/nipype/algorithms/tests/test_auto_ComputeDVARS.py +++ b/nipype/algorithms/tests/test_auto_ComputeDVARS.py @@ -17,6 +17,8 @@ def test_ComputeDVARS_inputs(): ), in_mask=dict(mandatory=True, ), + intensity_normalization=dict(usedefault=True, + ), remove_zerovariance=dict(usedefault=True, ), save_all=dict(usedefault=True, diff --git a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py index da1dec59d6..e230992eec 100644 --- a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py +++ b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py @@ -19,6 +19,8 @@ def test_FramewiseDisplacement_inputs(): ), out_file=dict(usedefault=True, ), + parameter_source=dict(mandatory=True, + ), radius=dict(usedefault=True, ), save_plot=dict(usedefault=True, diff --git a/nipype/algorithms/tests/test_auto_ErrorMap.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py similarity index 58% rename from nipype/algorithms/tests/test_auto_ErrorMap.py rename to nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py index f3d19c5690..7b12363ee8 100644 --- a/nipype/algorithms/tests/test_auto_ErrorMap.py +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -1,34 +1,26 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals -from ..metrics import ErrorMap +from ..confounds import NonSteadyStateDetector -def test_ErrorMap_inputs(): +def test_NonSteadyStateDetector_inputs(): input_map = dict(ignore_exception=dict(nohash=True, usedefault=True, ), - in_ref=dict(mandatory=True, + in_file=dict(mandatory=True, ), - in_tst=dict(mandatory=True, - ), - mask=dict(), - metric=dict(mandatory=True, - usedefault=True, - ), - out_map=dict(), ) - inputs = ErrorMap.input_spec() + inputs = NonSteadyStateDetector.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value -def test_ErrorMap_outputs(): - output_map = dict(distance=dict(), - out_map=dict(), +def test_NonSteadyStateDetector_outputs(): + output_map = dict(n_volumes_to_discard=dict(), ) - outputs = ErrorMap.output_spec() + outputs = NonSteadyStateDetector.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): diff --git a/nipype/algorithms/tests/test_auto_Overlap.py b/nipype/algorithms/tests/test_auto_Overlap.py deleted file mode 100644 index dcabbec296..0000000000 --- a/nipype/algorithms/tests/test_auto_Overlap.py +++ /dev/null @@ -1,47 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals -from ..misc import Overlap - - -def test_Overlap_inputs(): - input_map = dict(bg_overlap=dict(mandatory=True, - usedefault=True, - ), - ignore_exception=dict(nohash=True, - usedefault=True, - ), - mask_volume=dict(), - out_file=dict(usedefault=True, - ), - vol_units=dict(mandatory=True, - usedefault=True, - ), - volume1=dict(mandatory=True, - ), - volume2=dict(mandatory=True, - ), - weighting=dict(usedefault=True, - ), - ) - inputs = Overlap.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value - - -def test_Overlap_outputs(): - output_map = dict(dice=dict(), - diff_file=dict(), - jaccard=dict(), - labels=dict(), - roi_di=dict(), - roi_ji=dict(), - roi_voldiff=dict(), - volume_difference=dict(), - ) - outputs = Overlap.output_spec() - - for key, metadata in list(output_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_TSNR.py b/nipype/algorithms/tests/test_auto_TSNR.py deleted file mode 100644 index d906d39e3f..0000000000 --- a/nipype/algorithms/tests/test_auto_TSNR.py +++ /dev/null @@ -1,43 +0,0 @@ -# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT -from __future__ import unicode_literals -from ..misc import TSNR - - -def test_TSNR_inputs(): - input_map = dict(detrended_file=dict(hash_files=False, - usedefault=True, - ), - ignore_exception=dict(nohash=True, - usedefault=True, - ), - in_file=dict(mandatory=True, - ), - mean_file=dict(hash_files=False, - usedefault=True, - ), - regress_poly=dict(), - stddev_file=dict(hash_files=False, - usedefault=True, - ), - tsnr_file=dict(hash_files=False, - usedefault=True, - ), - ) - inputs = TSNR.input_spec() - - for key, metadata in list(input_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(inputs.traits()[key], metakey) == value - - -def test_TSNR_outputs(): - output_map = dict(detrended_file=dict(), - mean_file=dict(), - stddev_file=dict(), - tsnr_file=dict(), - ) - outputs = TSNR.output_spec() - - for key, metadata in list(output_map.items()): - for metakey, value in list(metadata.items()): - assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_confounds.py b/nipype/algorithms/tests/test_confounds.py index 8f9d31ed56..4eaa9dc073 100644 --- a/nipype/algorithms/tests/test_confounds.py +++ b/nipype/algorithms/tests/test_confounds.py @@ -6,7 +6,8 @@ import pytest from nipype.testing import example_data -from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS +from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS, \ + _is_outlier import numpy as np @@ -63,4 +64,12 @@ def test_dvars(tmpdir): assert (np.abs(dv1[:, 1] - ground_truth[:, 1]).sum() / len(dv1)) > 0.05 - assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05 \ No newline at end of file + assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05 + +def test_outliers(tmpdir): + np.random.seed(0) + in_data = np.random.randn(100) + in_data[0] += 10 + + assert _is_outlier(in_data) == 1 + diff --git a/nipype/info.py b/nipype/info.py index ef13ed6d62..b990d67a05 100644 --- a/nipype/info.py +++ b/nipype/info.py @@ -143,7 +143,6 @@ def get_nipype_gitversion(): 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, 'prov>=%s' % PROV_MIN_VERSION, 'click>=%s' % CLICK_MIN_VERSION, - 'xvfbwrapper', 'funcsigs', 'configparser', 'pytest>=%s' % PYTEST_MIN_VERSION @@ -165,6 +164,7 @@ def get_nipype_gitversion(): 'fmri': ['nitime', 'nilearn', 'dipy', 'nipy', 'matplotlib'], 'profiler': ['psutil'], 'duecredit': ['duecredit'], + 'xvfbwrapper': ['xvfbwrapper'], # 'mesh': ['mayavi'] # Enable when it works } @@ -172,4 +172,3 @@ def get_nipype_gitversion(): EXTRA_REQUIRES['all'] = [val for _, val in list(EXTRA_REQUIRES.items())] STATUS = 'stable' - diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py index d71ec104de..dfb8997372 100644 --- a/nipype/interfaces/ants/segmentation.py +++ b/nipype/interfaces/ants/segmentation.py @@ -1129,12 +1129,30 @@ def _format_arg(self, opt, spec, val): retval = '' if not isdefined(self.inputs.out_label_fusion): retval = '-o {0}'.format(self.inputs.out_intensity_fusion_name_format) + elif opt == 'atlas_image': + atlas_image_cmd = " ".join( + ['-g [{0}]'.format(", ".join("'%s'" % fn for fn in ai)) + for ai in self.inputs.atlas_image] + ) + retval = atlas_image_cmd + elif opt == 'target_image': + target_image_cmd = " ".join( + ['-t [{0}]'.format(", ".join("'%s'" % fn for fn in ai)) + for ai in self.inputs.target_image] + ) + retval = target_image_cmd + elif opt == 'atlas_segmentation_image': + assert len(val) == len(self.inputs.atlas_image), "Number of specified " \ + "segmentations should be identical to the number of atlas image " \ + "sets {0}!={1}".format(len(val), len(self.inputs.atlas_image)) + + atlas_segmentation_image_cmd = " ".join( + ['-l {0}'.format(fn) for fn in self.inputs.atlas_segmentation_image] + ) + retval = atlas_segmentation_image_cmd else: - if opt == 'atlas_segmentation_image': - assert len(val) == len(self.inputs.atlas_image), "Number of specified " \ - "segmentations should be identical to the number of atlas image " \ - "sets {0}!={1}".format(len(val), len(self.inputs.atlas_image)) - return super(ANTSCommand, self)._format_arg(opt, spec, val) + + return super(AntsJointFusion, self)._format_arg(opt, spec, val) return retval def _list_outputs(self): diff --git a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py index 4dd522a69b..f7aafb27be 100644 --- a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py +++ b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py @@ -11,7 +11,6 @@ def test_CreateJacobianDeterminantImage_inputs(): position=1, ), doLogJacobian=dict(argstr='%d', - mandatory=False, position=3, ), environ=dict(nohash=True, @@ -35,7 +34,6 @@ def test_CreateJacobianDeterminantImage_inputs(): terminal_output=dict(nohash=True, ), useGeometric=dict(argstr='%d', - mandatory=False, position=4, ), ) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py index ade303898c..b88ca57e14 100644 --- a/nipype/interfaces/ants/utils.py +++ b/nipype/interfaces/ants/utils.py @@ -145,9 +145,9 @@ class CreateJacobianDeterminantImageInputSpec(ANTSCommandInputSpec): outputImage = File(argstr='%s', mandatory=True, position=2, desc='output filename') - doLogJacobian = traits.Enum(0, 1, argstr='%d', mandatory=False, position=3, + doLogJacobian = traits.Enum(0, 1, argstr='%d', position=3, desc='return the log jacobian') - useGeometric = traits.Enum(0, 1, argstr='%d', mandatory=False, position=4, + useGeometric = traits.Enum(0, 1, argstr='%d', position=4, desc='return the geometric jacobian') class CreateJacobianDeterminantImageOutputSpec(TraitedSpec): diff --git a/nipype/interfaces/base.py b/nipype/interfaces/base.py index b46712e701..62ea7851ac 100644 --- a/nipype/interfaces/base.py +++ b/nipype/interfaces/base.py @@ -1844,7 +1844,7 @@ def _filename_from_source(self, name, chain=None): # special treatment for files try: _, base, source_ext = split_filename(source) - except AttributeError: + except (AttributeError, TypeError): base = source else: if name in chain: diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index 02ae960c5b..9c25f93e43 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -632,10 +632,42 @@ class ReconAllInputSpec(CommandLineInputSpec): desc="Number of processors to use in parallel") parallel = traits.Bool(argstr="-parallel", desc="Enable parallel execution") + hires = traits.Bool(argstr="-hires", min_ver='6.0.0', + desc="Conform to minimum voxel size (for voxels < 1mm)") + expert = File(exists=True, argstr='-expert %s', + desc="Set parameters using expert file") subjects_dir = Directory(exists=True, argstr='-sd %s', hash_files=False, desc='path to subjects directory', genfile=True) flags = traits.Str(argstr='%s', desc='additional parameters') + # Expert options + talairach = traits.Str(desc="Flags to pass to talairach commands", xor=['expert']) + mri_normalize = traits.Str(desc="Flags to pass to mri_normalize commands", xor=['expert']) + mri_watershed = traits.Str(desc="Flags to pass to mri_watershed commands", xor=['expert']) + mri_em_register = traits.Str(desc="Flags to pass to mri_em_register commands", xor=['expert']) + mri_ca_normalize = traits.Str(desc="Flags to pass to mri_ca_normalize commands", xor=['expert']) + mri_ca_register = traits.Str(desc="Flags to pass to mri_ca_register commands", xor=['expert']) + mri_remove_neck = traits.Str(desc="Flags to pass to mri_remove_neck commands", xor=['expert']) + mri_ca_label = traits.Str(desc="Flags to pass to mri_ca_label commands", xor=['expert']) + mri_segstats = traits.Str(desc="Flags to pass to mri_segstats commands", xor=['expert']) + mri_mask = traits.Str(desc="Flags to pass to mri_mask commands", xor=['expert']) + mri_segment = traits.Str(desc="Flags to pass to mri_segment commands", xor=['expert']) + mri_edit_wm_with_aseg = traits.Str(desc="Flags to pass to mri_edit_wm_with_aseg commands", xor=['expert']) + mri_pretess = traits.Str(desc="Flags to pass to mri_pretess commands", xor=['expert']) + mri_fill = traits.Str(desc="Flags to pass to mri_fill commands", xor=['expert']) + mri_tessellate = traits.Str(desc="Flags to pass to mri_tessellate commands", xor=['expert']) + mris_smooth = traits.Str(desc="Flags to pass to mri_smooth commands", xor=['expert']) + mris_inflate = traits.Str(desc="Flags to pass to mri_inflate commands", xor=['expert']) + mris_sphere = traits.Str(desc="Flags to pass to mris_sphere commands", xor=['expert']) + mris_fix_topology = traits.Str(desc="Flags to pass to mris_fix_topology commands", xor=['expert']) + mris_make_surfaces = traits.Str(desc="Flags to pass to mris_make_surfaces commands", xor=['expert']) + mris_surf2vol = traits.Str(desc="Flags to pass to mris_surf2vol commands", xor=['expert']) + mris_register = traits.Str(desc="Flags to pass to mris_register commands", xor=['expert']) + mrisp_paint = traits.Str(desc="Flags to pass to mrisp_paint commands", xor=['expert']) + mris_ca_label = traits.Str(desc="Flags to pass to mris_ca_label commands", xor=['expert']) + mris_anatomical_stats = traits.Str(desc="Flags to pass to mris_anatomical_stats commands", xor=['expert']) + mri_aparc2aseg = traits.Str(desc="Flags to pass to mri_aparc2aseg commands", xor=['expert']) + class ReconAllOutputSpec(FreeSurferSource.output_spec): subjects_dir = Directory(exists=True, desc='Freesurfer subjects directory.') @@ -851,6 +883,16 @@ class ReconAll(CommandLine): _steps = _autorecon1_steps + _autorecon2_steps + _autorecon3_steps + _binaries = ['talairach', 'mri_normalize', 'mri_watershed', + 'mri_em_register', 'mri_ca_normalize', 'mri_ca_register', + 'mri_remove_neck', 'mri_ca_label', 'mri_segstats', + 'mri_mask', 'mri_segment', 'mri_edit_wm_with_aseg', + 'mri_pretess', 'mri_fill', 'mri_tessellate', 'mris_smooth', + 'mris_inflate', 'mris_sphere', 'mris_fix_topology', + 'mris_make_surfaces', 'mris_surf2vol', 'mris_register', + 'mrisp_paint', 'mris_ca_label', 'mris_anatomical_stats', + 'mri_aparc2aseg'] + def _gen_subjects_dir(self): return os.getcwd() @@ -900,6 +942,11 @@ def _format_arg(self, name, trait_spec, value): @property def cmdline(self): cmd = super(ReconAll, self).cmdline + + # Adds '-expert' flag if expert flags are passed + # Mutually exclusive with 'expert' input parameter + cmd += self._prep_expert_file() + if not self._is_resuming(): return cmd subjects_dir = self.inputs.subjects_dir @@ -933,6 +980,24 @@ def cmdline(self): iflogger.info('resume recon-all : %s' % cmd) return cmd + def _prep_expert_file(self): + if isdefined(self.inputs.expert): + return '' + + lines = [] + for binary in self._binaries: + args = getattr(self.inputs, binary) + if isdefined(args): + lines.append('{} {}\n'.format(binary, args)) + + if lines == []: + return '' + + expert_fname = os.path.abspath('expert.opts') + with open(expert_fname, 'w') as fobj: + fobj.write(''.join(lines)) + return ' -expert {}'.format(expert_fname) + class BBRegisterInputSpec(FSTraitedSpec): subject_id = traits.Str(argstr='--s %s', @@ -1503,11 +1568,11 @@ class MNIBiasCorrectionInputSpec(FSTraitedSpec): # mandatory in_file = File(exists=True, mandatory=True, argstr="--i %s", desc="input volume. Input can be any format accepted by mri_convert.") + # optional out_file = File(argstr="--o %s", name_source=['in_file'], name_template='%s_output', hash_files=False, keep_extension=True, desc="output volume. Output can be any format accepted by mri_convert. " + "If the output format is COR, then the directory must exist.") - # optional iterations = traits.Int(4, argstr="--n %d", desc="Number of iterations to run nu_correct. Default is 4. This is the number of times " + "that nu_correct is repeated (ie, using the output from the previous run as the input for " + @@ -1528,7 +1593,7 @@ class MNIBiasCorrectionInputSpec(FSTraitedSpec): desc="Shrink parameter for finer sampling (default is 4)") class MNIBiasCorrectionOutputSpec(TraitedSpec): - out_file = File(desc="output volume") + out_file = File(exists=True, desc="output volume") class MNIBiasCorrection(FSCommand): @@ -1563,11 +1628,6 @@ class MNIBiasCorrection(FSCommand): input_spec = MNIBiasCorrectionInputSpec output_spec = MNIBiasCorrectionOutputSpec - def _list_outputs(self): - outputs = self._outputs().get() - outputs["out_file"] = os.path.abspath(self.inputs.out_file) - return outputs - class WatershedSkullStripInputSpec(FSTraitedSpec): # required diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py index f86d934d7a..b0e89e9a3f 100644 --- a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py +++ b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py @@ -18,13 +18,68 @@ def test_ReconAll_inputs(): environ=dict(nohash=True, usedefault=True, ), + expert=dict(argstr='-expert %s', + ), flags=dict(argstr='%s', ), hemi=dict(argstr='-hemi %s', ), + hires=dict(argstr='-hires', + min_ver='6.0.0', + ), ignore_exception=dict(nohash=True, usedefault=True, ), + mri_aparc2aseg=dict(xor=['expert'], + ), + mri_ca_label=dict(xor=['expert'], + ), + mri_ca_normalize=dict(xor=['expert'], + ), + mri_ca_register=dict(xor=['expert'], + ), + mri_edit_wm_with_aseg=dict(xor=['expert'], + ), + mri_em_register=dict(xor=['expert'], + ), + mri_fill=dict(xor=['expert'], + ), + mri_mask=dict(xor=['expert'], + ), + mri_normalize=dict(xor=['expert'], + ), + mri_pretess=dict(xor=['expert'], + ), + mri_remove_neck=dict(xor=['expert'], + ), + mri_segment=dict(xor=['expert'], + ), + mri_segstats=dict(xor=['expert'], + ), + mri_tessellate=dict(xor=['expert'], + ), + mri_watershed=dict(xor=['expert'], + ), + mris_anatomical_stats=dict(xor=['expert'], + ), + mris_ca_label=dict(xor=['expert'], + ), + mris_fix_topology=dict(xor=['expert'], + ), + mris_inflate=dict(xor=['expert'], + ), + mris_make_surfaces=dict(xor=['expert'], + ), + mris_register=dict(xor=['expert'], + ), + mris_smooth=dict(xor=['expert'], + ), + mris_sphere=dict(xor=['expert'], + ), + mris_surf2vol=dict(xor=['expert'], + ), + mrisp_paint=dict(xor=['expert'], + ), openmp=dict(argstr='-openmp %d', ), parallel=dict(argstr='-parallel', @@ -36,6 +91,8 @@ def test_ReconAll_inputs(): genfile=True, hash_files=False, ), + talairach=dict(xor=['expert'], + ), terminal_output=dict(nohash=True, ), use_T2=dict(argstr='-T2pial', diff --git a/nipype/interfaces/freesurfer/tests/test_preprocess.py b/nipype/interfaces/freesurfer/tests/test_preprocess.py index da6ba65fc3..deb475a6b4 100644 --- a/nipype/interfaces/freesurfer/tests/test_preprocess.py +++ b/nipype/interfaces/freesurfer/tests/test_preprocess.py @@ -85,3 +85,36 @@ def test_synthesizeflash(create_files_in_directory): syn2 = freesurfer.SynthesizeFLASH(t1_image=filelist[0], pd_image=filelist[1], flip_angle=20, te=5, tr=25) assert syn2.cmdline == ('mri_synthesize 25.00 20.00 5.000 %s %s %s' % (filelist[0], filelist[1], os.path.join(outdir, 'synth-flash_20.mgz'))) + +@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed") +def test_mandatory_outvol(create_files_in_directory): + filelist, outdir = create_files_in_directory + mni = freesurfer.MNIBiasCorrection() + + # make sure command gets called + assert mni.cmd == "mri_nu_correct.mni" + + # test raising error with mandatory args absent + with pytest.raises(ValueError): mni.cmdline + + # test with minimal args + mni.inputs.in_file = filelist[0] + base, ext = os.path.splitext(os.path.basename(filelist[0])) + if ext == '.gz': + base, ext2 = os.path.splitext(base) + ext = ext2 + ext + + assert mni.cmdline == ( + 'mri_nu_correct.mni --i %s --o %s_output%s' % (filelist[0], base, ext)) + + # test with custom outfile + mni.inputs.out_file = 'new_corrected_file.mgz' + assert mni.cmdline == ('mri_nu_correct.mni --i %s --o new_corrected_file.mgz' + % (filelist[0])) + + # constructor based tests + mni2 = freesurfer.MNIBiasCorrection(in_file=filelist[0], + out_file='bias_corrected_output', + iterations=4) + assert mni2.cmdline == ('mri_nu_correct.mni --i %s --n 4 --o bias_corrected_output' + % filelist[0]) diff --git a/nipype/interfaces/fsl/__init__.py b/nipype/interfaces/fsl/__init__.py index 58b7321416..b0a8b5a88e 100644 --- a/nipype/interfaces/fsl/__init__.py +++ b/nipype/interfaces/fsl/__init__.py @@ -8,7 +8,7 @@ """ from .base import (FSLCommand, Info, check_fsl, no_fsl, no_fsl_course_data) -from .preprocess import (FAST, FLIRT, ApplyXfm, ApplyXFM, BET, MCFLIRT, FNIRT, +from .preprocess import (FAST, FLIRT, ApplyXFM, BET, MCFLIRT, FNIRT, ApplyWarp, SliceTimer, SUSAN, PRELUDE, FUGUE, FIRST) from .model import (Level1Design, FEAT, FEATModel, FILMGLS, FEATRegister, FLAMEO, ContrastMgr, MultipleRegressDesign, L2Model, SMM, diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py index 5ede5fa1f8..8a00b44a76 100644 --- a/nipype/interfaces/fsl/model.py +++ b/nipype/interfaces/fsl/model.py @@ -49,7 +49,6 @@ class Level1DesignInputSpec(BaseInterfaceInputSpec): "{'dgamma': {'derivs': True}}")) orthogonalization = traits.Dict(traits.Int, traits.Dict(traits.Int, traits.Either(traits.Bool,traits.Int)), - mandatory=False, desc=("which regressors to make orthogonal e.g., " "{1: {0:0,1:0,2:0}, 2: {0:1,1:1,2:0}} to make the second " "regressor in a 2-regressor model orthogonal to the first."), diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py index eaab2a830e..808358a371 100644 --- a/nipype/interfaces/fsl/preprocess.py +++ b/nipype/interfaces/fsl/preprocess.py @@ -118,15 +118,17 @@ class BET(FSLCommand): """Use FSL BET command for skull stripping. For complete details, see the `BET Documentation. - `_ + `_ Examples -------- >>> from nipype.interfaces import fsl - >>> from nipype.testing import example_data >>> btr = fsl.BET() - >>> btr.inputs.in_file = example_data('structural.nii') + >>> btr.inputs.in_file = 'structural.nii' >>> btr.inputs.frac = 0.7 + >>> btr.inputs.out_file = 'brain_anat.nii' + >>> btr.cmdline # doctest: +ALLOW_UNICODE + 'bet structural.nii brain_anat.nii -f 0.70' >>> res = btr.run() # doctest: +SKIP """ @@ -275,7 +277,7 @@ class FASTOutputSpec(TraitedSpec): mixeltype = File(desc="path/name of mixeltype volume file _mixeltype") - partial_volume_map = File(desc="path/name of partial volume file _pveseg") + partial_volume_map = File(desc='path/name of partial volume file _pveseg') partial_volume_files = OutputMultiPath(File( desc='path/name of partial volumes files one for each class, _pve_x')) @@ -288,18 +290,17 @@ class FAST(FSLCommand): """ Use FSL FAST for segmenting and bias correction. For complete details, see the `FAST Documentation. - `_ + `_ Examples -------- >>> from nipype.interfaces import fsl - >>> from nipype.testing import example_data - - Assign options through the ``inputs`` attribute: - >>> fastr = fsl.FAST() - >>> fastr.inputs.in_files = example_data('structural.nii') - >>> out = fastr.run() #doctest: +SKIP + >>> fastr.inputs.in_files = 'structural.nii' + >>> fastr.inputs.out_basename = 'fast_' + >>> fastr.cmdline # doctest: +ALLOW_UNICODE + 'fast -o fast_ -S 1 structural.nii' + >>> out = fastr.run() # doctest: +SKIP """ _cmd = 'fast' @@ -308,12 +309,12 @@ class FAST(FSLCommand): def _format_arg(self, name, spec, value): # first do what should be done in general - formated = super(FAST, self)._format_arg(name, spec, value) + formatted = super(FAST, self)._format_arg(name, spec, value) if name == 'in_files': # FAST needs the -S parameter value to correspond to the number # of input images, otherwise it will ignore all but the first - formated = "-S %d %s" % (len(value), formated) - return formated + formatted = "-S %d %s" % (len(value), formatted) + return formatted def _list_outputs(self): outputs = self.output_spec().get() @@ -526,7 +527,7 @@ class FLIRT(FSLCommand): """Use FSL FLIRT for coregistration. For complete details, see the `FLIRT Documentation. - `_ + `_ To print out the command line help, use: fsl.FLIRT().inputs_help() @@ -665,14 +666,18 @@ class MCFLIRT(FSLCommand): """Use FSL MCFLIRT to do within-modality motion correction. For complete details, see the `MCFLIRT Documentation. - `_ + `_ Examples -------- >>> from nipype.interfaces import fsl - >>> from nipype.testing import example_data - >>> mcflt = fsl.MCFLIRT(in_file=example_data('functional.nii'), cost='mutualinfo') - >>> res = mcflt.run() # doctest: +SKIP + >>> mcflt = fsl.MCFLIRT() + >>> mcflt.inputs.in_file = 'functional.nii' + >>> mcflt.inputs.cost = 'mutualinfo' + >>> mcflt.inputs.out_file = 'moco.nii' + >>> mcflt.cmdline # doctest: +ALLOW_UNICODE + 'mcflirt -in functional.nii -cost mutualinfo -out moco.nii' + >>> res = mcflt.run() # doctest: +SKIP """ _cmd = 'mcflirt' @@ -751,7 +756,7 @@ class FNIRTInputSpec(FSLCommandInputSpec): inwarp_file = File(exists=True, argstr='--inwarp=%s', desc='name of file containing initial non-linear warps') in_intensitymap_file = traits.List(File(exists=True), argstr='--intin=%s', - copyfiles=False, minlen=1, maxlen=2, + copyfile=False, minlen=1, maxlen=2, desc=('name of file/files containing ' 'initial intensity mapping ' 'usually generated by previous ' @@ -918,6 +923,9 @@ class FNIRTOutputSpec(TraitedSpec): class FNIRT(FSLCommand): """Use FSL FNIRT for non-linear registration. + For complete details, see the `MCFLIRT Documentation. + `_ + Examples -------- >>> from nipype.interfaces import fsl @@ -1218,6 +1226,9 @@ class SUSANOutputSpec(TraitedSpec): class SUSAN(FSLCommand): """ use FSL SUSAN to perform smoothing + For complete details, see the `MCFLIRT Documentation. + `_ + Examples -------- diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyXfm.py b/nipype/interfaces/fsl/tests/test_auto_ApplyXfm.py index 3186317333..cf67b12b2b 100644 --- a/nipype/interfaces/fsl/tests/test_auto_ApplyXfm.py +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyXfm.py @@ -1,9 +1,9 @@ # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals -from ..preprocess import ApplyXfm +from ..preprocess import ApplyXFM -def test_ApplyXfm_inputs(): +def test_ApplyXFM_inputs(): input_map = dict(angle_rep=dict(argstr='-anglerep %s', ), apply_isoxfm=dict(argstr='-applyisoxfm %f', @@ -145,19 +145,19 @@ def test_ApplyXfm_inputs(): min_ver='5.0.0', ), ) - inputs = ApplyXfm.input_spec() + inputs = ApplyXFM.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value -def test_ApplyXfm_outputs(): +def test_ApplyXFM_outputs(): output_map = dict(out_file=dict(), out_log=dict(), out_matrix_file=dict(), ) - outputs = ApplyXfm.output_spec() + outputs = ApplyXFM.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): diff --git a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py index 7a93e351ed..8e4cf47fc3 100644 --- a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py +++ b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py @@ -48,7 +48,7 @@ def test_FNIRT_inputs(): sep=',', ), in_intensitymap_file=dict(argstr='--intin=%s', - copyfiles=False, + copyfile=False, ), inmask_file=dict(argstr='--inmask=%s', ), diff --git a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py index cc6402fb1a..f5fcfe4093 100644 --- a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py +++ b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py @@ -14,8 +14,7 @@ def test_Level1Design_inputs(): ), model_serial_correlations=dict(mandatory=True, ), - orthogonalization=dict(mandatory=False, - ), + orthogonalization=dict(), session_info=dict(mandatory=True, ), ) diff --git a/nipype/interfaces/fsl/tests/test_maths.py b/nipype/interfaces/fsl/tests/test_maths.py index e2c6c93bb7..408a74712f 100644 --- a/nipype/interfaces/fsl/tests/test_maths.py +++ b/nipype/interfaces/fsl/tests/test_maths.py @@ -12,7 +12,7 @@ from nipype.interfaces.fsl import no_fsl import pytest -from nipype.testing.fixtures import create_files_in_directory_plus_output_type +from nipype.testing.fixtures import create_files_in_directory_plus_output_type @pytest.mark.skipif(no_fsl(), reason="fsl is not installed") diff --git a/nipype/interfaces/fsl/tests/test_preprocess.py b/nipype/interfaces/fsl/tests/test_preprocess.py index f9242c4b26..e2ef8962a7 100644 --- a/nipype/interfaces/fsl/tests/test_preprocess.py +++ b/nipype/interfaces/fsl/tests/test_preprocess.py @@ -569,10 +569,3 @@ def test_first_genfname(): value = first._gen_fname(name='original_segmentations') expected_value = os.path.abspath('segment_all_none_origsegs.nii.gz') assert value == expected_value - - -@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") -def test_deprecation(): - interface = fsl.ApplyXfm() - assert isinstance(interface, fsl.ApplyXFM) - diff --git a/nipype/interfaces/tests/test_base.py b/nipype/interfaces/tests/test_base.py index b4d45c47f4..e27779ce02 100644 --- a/nipype/interfaces/tests/test_base.py +++ b/nipype/interfaces/tests/test_base.py @@ -209,7 +209,8 @@ class spec2(nib.CommandLineInputSpec): position=2) doo = nib.File(exists=True, argstr="%s", position=1) goo = traits.Int(argstr="%d", position=4) - poo = nib.File(name_source=['goo'], hash_files=False, argstr="%s", position=3) + poo = nib.File(name_source=['goo'], hash_files=False, argstr="%s", + position=3) class TestName(nib.CommandLine): _cmd = "mycommand" @@ -218,6 +219,7 @@ class TestName(nib.CommandLine): testobj.inputs.doo = tmp_infile testobj.inputs.goo = 99 assert '%s_generated' % nme in testobj.cmdline + assert '%d_generated' % testobj.inputs.goo in testobj.cmdline testobj.inputs.moo = "my_%s_template" assert 'my_%s_template' % nme in testobj.cmdline diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py index 32271799b8..43345bb8c6 100644 --- a/nipype/pipeline/engine/tests/test_engine.py +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -326,14 +326,14 @@ def test_doubleconnect(): flow1 = pe.Workflow(name='test') flow1.connect(a, 'a', b, 'a') x = lambda: flow1.connect(a, 'b', b, 'a') - with pytest.raises(Exception) as excinfo: + with pytest.raises(Exception) as excinfo: x() assert "Trying to connect" in str(excinfo.value) c = pe.Node(IdentityInterface(fields=['a', 'b']), name='c') flow1 = pe.Workflow(name='test2') x = lambda: flow1.connect([(a, c, [('b', 'b')]), (b, c, [('a', 'b')])]) - with pytest.raises(Exception) as excinfo: + with pytest.raises(Exception) as excinfo: x() assert "Trying to connect" in str(excinfo.value) @@ -481,7 +481,7 @@ def func1(in1): name='n1') n2.inputs.in1 = [[1, [2]], 3, [4, 5]] - with pytest.raises(Exception) as excinfo: + with pytest.raises(Exception) as excinfo: n2.run() assert "can only concatenate list" in str(excinfo.value) @@ -518,7 +518,7 @@ def func2(a): # create dummy distributed plugin class from nipype.pipeline.plugins.base import DistributedPluginBase - # create a custom exception + # create a custom exception class EngineTestException(Exception): pass @@ -529,7 +529,7 @@ def _submit_job(self, node, updatehash=False): # check if a proper exception is raised with pytest.raises(EngineTestException) as excinfo: w1.run(plugin=RaiseError()) - assert 'Submit called' == str(excinfo.value) + assert 'Submit called' == str(excinfo.value) # rerun to ensure we have outputs w1.run(plugin='Linear') @@ -539,7 +539,7 @@ def _submit_job(self, node, updatehash=False): 'crashdump_dir': wd} w1.run(plugin=RaiseError()) - + def test_old_config(tmpdir): wd = str(tmpdir) @@ -607,7 +607,7 @@ def func1(in1): w1.config['execution'].update(**{'stop_on_first_rerun': True}) w1.run() - + def test_parameterize_dirs_false(tmpdir): from ....interfaces.utility import IdentityInterface @@ -665,7 +665,7 @@ def func1(in1): # test running the workflow on serial conditions w1.run(plugin='MultiProc') - + def test_write_graph_runs(tmpdir): os.chdir(str(tmpdir)) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index ca5cd66f07..6d27e56aa0 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -31,6 +31,7 @@ related_filetype_sets = [ ('.hdr', '.img', '.mat'), + ('.nii', '.mat'), ('.BRIK', '.HEAD'), ] diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index 644a055f4b..eba0794e52 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -306,7 +306,7 @@ def touch(fname): else: assert False, "Should raise OSError on missing dependency" - shutil.rmtree(tmpdir) + shutil.rmtree(tmpdir) def test_json(): @@ -324,7 +324,7 @@ def test_json(): ('/path/test.hdr', 3, ['/path/test.hdr', '/path/test.img', '/path/test.mat']), ('/path/test.BRIK', 2, ['/path/test.BRIK', '/path/test.HEAD']), ('/path/test.HEAD', 2, ['/path/test.BRIK', '/path/test.HEAD']), - ('/path/foo.nii', 1, []) + ('/path/foo.nii', 2, ['/path/foo.nii', '/path/foo.mat']) ]) def test_related_files(file, length, expected_files): related_files = get_related_files(file) diff --git a/nipype/workflows/dmri/fsl/utils.py b/nipype/workflows/dmri/fsl/utils.py index 9c0ba6ab32..d2ea118a9a 100644 --- a/nipype/workflows/dmri/fsl/utils.py +++ b/nipype/workflows/dmri/fsl/utils.py @@ -96,7 +96,6 @@ def dwi_flirt(name='DWICoregistration', excl_nodiff=False, dilate = pe.Node(fsl.maths.MathsCommand( nan2zeros=True, args='-kernel sphere 5 -dilM'), name='MskDilate') split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') - pick_ref = pe.Node(niu.Select(), name='Pick_b0') n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='Bias') enhb0 = pe.Node(niu.Function( input_names=['in_file', 'in_mask', 'clip_limit'], diff --git a/requirements.txt b/requirements.txt index f16073919e..6953af5507 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,6 @@ future>=0.15.2 simplejson>=3.8.0 prov>=1.4.0 click>=6.6.0 -xvfbwrapper psutil funcsigs configparser diff --git a/rtd_requirements.txt b/rtd_requirements.txt index 43acce5e1f..1f60cd351f 100644 --- a/rtd_requirements.txt +++ b/rtd_requirements.txt @@ -9,7 +9,6 @@ pytest-cov future>=0.15.2 simplejson>=3.8.0 prov>=1.4.0 -xvfbwrapper psutil funcsigs configparser diff --git a/setup.py b/setup.py index 4bf982902f..79941f0625 100755 --- a/setup.py +++ b/setup.py @@ -97,6 +97,9 @@ def main(): pjoin('testing', 'data', 'dicomdir', '*'), pjoin('testing', 'data', 'bedpostxout', '*'), pjoin('testing', 'data', 'tbss_dir', '*'), + pjoin('testing', 'data', 'brukerdir', '*'), + pjoin('testing', 'data', 'brukerdir', 'pdata', '*'), + pjoin('testing', 'data', 'brukerdir', 'pdata', '1', '*'), pjoin('workflows', 'data', '*'), pjoin('pipeline', 'engine', 'report_template.html'), pjoin('external', 'd3.js'), @@ -130,7 +133,7 @@ def main(): install_requires=ldict['REQUIRES'], setup_requires=['future', 'configparser'], provides=ldict['PROVIDES'], - packages=find_packages(exclude=['*.tests']), + packages=find_packages(), package_data={'nipype': testdatafiles}, scripts=glob('bin/*'), cmdclass={'build_py': BuildWithCommitInfoCommand},