Skip to content

WIP logging and validations in CompCor, SignalExtraction, ApplyTopUp #1676

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 31 commits into from
Nov 9, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
73b9a0e
debug tcompcor, add log messages and tests
Oct 6, 2016
e811b96
save outputs to cwd
Oct 6, 2016
0c7c649
explicity check + error out if mask file and func file don't match in…
Oct 7, 2016
8313923
python3, pep8
Oct 7, 2016
974a42b
use abs paths; +appropriate way to test this
Oct 7, 2016
741405f
make transforms arg in ApplyTransforms opt
Oct 8, 2016
d38562c
allow ANTS ApplyTransforms to use identity transform
Oct 8, 2016
a512faf
traits mandatory=False is incorrect; ignore unicode in doctests
Oct 8, 2016
6448ee8
test specs auto
Oct 8, 2016
e98bd95
Add more informative error msg
Oct 8, 2016
4f27943
less mysterious error messages
Oct 10, 2016
b04d4e7
better test
Oct 10, 2016
7c7dcd2
check and error if input to fsl ApplyTopUp is not 4 dimensional
Oct 11, 2016
8cfa00f
don't load the whole thing into memory
Oct 11, 2016
dd8dfb4
make specs
Oct 12, 2016
a0a31c4
merge with master
Oct 12, 2016
7f84dff
Merge branch 'master' of http://github.com/nipy/nipype into debugging
Oct 12, 2016
b3187f3
pull from nipy/nipype master
Oct 15, 2016
bba591b
add headers to outputs of compcor, framewise displacement + test fix
Oct 17, 2016
8a660dc
Merge http://github.com/nipy/nipype into debugging
Oct 17, 2016
89b9856
revert 4d validation, fix input spec desc
Oct 18, 2016
a42439f
chdir back to original dir before deleting tempdir
Oct 18, 2016
b85fd5f
fix up test (e.g. pep8)
Oct 18, 2016
35a0bb3
Merge http://github.com/nipy/nipype into debugging
Oct 19, 2016
4f80b2a
use from io import open
Oct 24, 2016
c217a23
revert identity transform
Oct 24, 2016
cd5385e
Merge http://github.com/nipy/nipype into debugging
Oct 24, 2016
9239f7b
try longer timeout
Oct 25, 2016
d5e1a0b
specify tab delimiter
Oct 26, 2016
b48395d
don't let divide by zero errors pass by
Oct 28, 2016
fb3c550
don't calculate var/stddev twice
Oct 31, 2016
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion circle.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ test:
- docker run -v /etc/localtime:/etc/localtime:ro -e FSL_COURSE_DATA="/root/examples/nipype-fsl_course_data" -v ~/examples:/root/examples:ro -v ~/scratch:/scratch -w /root/src/nipype nipype/nipype_test:py35 /usr/bin/run_nosetests.sh py35 :
timeout: 2600
- docker run -v /etc/localtime:/etc/localtime:ro -e FSL_COURSE_DATA="/root/examples/nipype-fsl_course_data" -v ~/examples:/root/examples:ro -v ~/scratch:/scratch -w /root/src/nipype nipype/nipype_test:py27 /usr/bin/run_nosetests.sh py27 :
timeout: 2600
timeout: 5200
- docker run -v /etc/localtime:/etc/localtime:ro -v ~/examples:/root/examples:ro -v ~/scratch:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh test_spm Linear /root/examples/ workflow3d :
timeout: 1600
- docker run -v /etc/localtime:/etc/localtime:ro -v ~/examples:/root/examples:ro -v ~/scratch:/scratch -w /scratch nipype/nipype_test:py35 /usr/bin/run_examples.sh test_spm Linear /root/examples/ workflow4d :
Expand Down
88 changes: 63 additions & 25 deletions nipype/algorithms/confounds.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
class ComputeDVARSInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc='functional data, after HMC')
in_mask = File(exists=True, mandatory=True, desc='a brain mask')
remove_zerovariance = traits.Bool(False, usedefault=True,
remove_zerovariance = traits.Bool(True, usedefault=True,
desc='remove voxels with zero variance')
save_std = traits.Bool(True, usedefault=True,
desc='save standardized DVARS')
Expand Down Expand Up @@ -255,7 +255,7 @@ def _run_interface(self, runtime):
'out_file': op.abspath(self.inputs.out_file),
'fd_average': float(fd_res.mean())
}
np.savetxt(self.inputs.out_file, fd_res)
np.savetxt(self.inputs.out_file, fd_res, header='framewise_displacement')

if self.inputs.save_plot:
tr = None
Expand Down Expand Up @@ -291,6 +291,8 @@ class CompCorInputSpec(BaseInterfaceInputSpec):
'pre-component extraction')
regress_poly_degree = traits.Range(low=1, default=1, usedefault=True,
desc='the degree polynomial to use')
header = traits.Str(desc='the desired header for the output tsv file (one column).'
'If undefined, will default to "CompCor"')

class CompCorOutputSpec(TraitedSpec):
components_file = File(exists=True,
Expand Down Expand Up @@ -329,6 +331,13 @@ class CompCor(BaseInterface):
def _run_interface(self, runtime):
imgseries = nb.load(self.inputs.realigned_file).get_data()
mask = nb.load(self.inputs.mask_file).get_data()

if imgseries.shape[:3] != mask.shape:
raise ValueError('Inputs for CompCor, func {} and mask {}, do not have matching '
'spatial dimensions ({} and {}, respectively)'
.format(self.inputs.realigned_file, self.inputs.mask_file,
imgseries.shape[:3], mask.shape))

voxel_timecourses = imgseries[mask > 0]
# Zero-out any bad values
voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
Expand All @@ -352,7 +361,10 @@ def _run_interface(self, runtime):
u, _, _ = linalg.svd(M, full_matrices=False)
components = u[:, :self.inputs.num_components]
components_file = os.path.join(os.getcwd(), self.inputs.components_file)
np.savetxt(components_file, components, fmt=b"%.10f")

self._set_header()
np.savetxt(components_file, components, fmt=b"%.10f", delimiter='\t',
header=self._make_headers(components.shape[1]))
return runtime

def _list_outputs(self):
Expand All @@ -367,6 +379,26 @@ def _compute_tSTD(self, M, x):
stdM[np.isnan(stdM)] = x
return stdM

def _set_header(self, header='CompCor'):
self.inputs.header = self.inputs.header if isdefined(self.inputs.header) else header

def _make_headers(self, num_col):
headers = []
for i in range(num_col):
headers.append(self.inputs.header + str(i))
return '\t'.join(headers)


class ACompCor(CompCor):
''' Anatomical compcor; for input/output, see CompCor.
If the mask provided is an anatomical mask, CompCor == ACompCor '''

def __init__(self, *args, **kwargs):
''' exactly the same as compcor except the header '''
super(ACompCor, self).__init__(*args, **kwargs)
self._set_header('aCompCor')


class TCompCorInputSpec(CompCorInputSpec):
# and all the fields in CompCorInputSpec
percentile_threshold = traits.Range(low=0., high=1., value=.02,
Expand Down Expand Up @@ -401,6 +433,11 @@ class TCompCor(CompCor):
def _run_interface(self, runtime):
imgseries = nb.load(self.inputs.realigned_file).get_data()

if imgseries.ndim != 4:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why here you are using the numpy array .ndim property, and you have changed the same in your modification of ApplyTopup to use the nibabel header object? Which one is the appropriate option? If both are equivalent, I prefer this (numpy.ndim) rather than your change to ApplyTopup.

Copy link
Contributor Author

@berleant berleant Oct 12, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The difference is that using numpy requires loading the data into numpy memory, which is expensive. In fact, in ApplyTopUp, it was erroring out. Here, in TCompCor we are loading data into numpy memory anyway, so it is not an additional cost.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks! That's very sound.

raise ValueError('tCompCor expected a 4-D nifti file. Input {} has {} dimensions '
'(shape {})'
.format(self.inputs.realigned_file, imgseries.ndim, imgseries.shape))

# From the paper:
# "For each voxel time series, the temporal standard deviation is
# defined as the standard deviation of the time series after the removal
Expand All @@ -419,18 +456,19 @@ def _run_interface(self, runtime):
threshold_index = int(num_voxels * (1. - self.inputs.percentile_threshold))
threshold_std = sortSTD[threshold_index]
mask = tSTD >= threshold_std
mask = mask.astype(int)
mask = mask.astype(int).T

# save mask
mask_file = 'mask.nii'
mask_file = os.path.abspath('mask.nii')
nb.nifti1.save(nb.Nifti1Image(mask, np.eye(4)), mask_file)
IFLOG.debug('tCompcor computed and saved mask of shape {} to mask_file {}'
.format(mask.shape, mask_file))
self.inputs.mask_file = mask_file
self._set_header('tCompCor')

super(TCompCor, self)._run_interface(runtime)
return runtime

ACompCor = CompCor

class TSNRInputSpec(BaseInterfaceInputSpec):
in_file = InputMultiPath(File(exists=True), mandatory=True,
desc='realigned 4D file or a list of 3D files')
Expand Down Expand Up @@ -512,6 +550,8 @@ def regress_poly(degree, data, remove_mean=True, axis=-1):
If remove_mean is True (default), the data is demeaned (i.e. degree 0).
If remove_mean is false, the data is not.
'''
IFLOG.debug('Performing polynomial regression on data of shape ' + str(data.shape))

datashape = data.shape
timepoints = datashape[axis]

Expand Down Expand Up @@ -570,6 +610,7 @@ def compute_dvars(in_file, in_mask, remove_zerovariance=False):
import numpy as np
import nibabel as nb
from nitime.algorithms import AR_est_YW
import warnings

func = nb.load(in_file).get_data().astype(np.float32)
mask = nb.load(in_mask).get_data().astype(np.uint8)
Expand All @@ -585,7 +626,7 @@ def compute_dvars(in_file, in_mask, remove_zerovariance=False):

if remove_zerovariance:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We took a closer look on this (w. @chrisfilo) and I realized that you were 100% right about this. Sorry about that.

Since this remove_zerovariance input does not make any sense, let's get rid of it (including the zero_remove function that is more readable inline).

Again, my apologies for not looking at this more thoroughly before.

# Remove zero-variance voxels across time axis
mask = zero_variance(func, mask)
mask = zero_remove(func_sd, mask)

idx = np.where(mask > 0)
mfunc = func[idx[0], idx[1], idx[2], :]
Expand All @@ -609,31 +650,28 @@ def compute_dvars(in_file, in_mask, remove_zerovariance=False):
# standardization
dvars_stdz = dvars_nstd / diff_sd_mean

# voxelwise standardization
diff_vx_stdz = func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T
dvars_vx_stdz = diff_vx_stdz.std(axis=0, ddof=1)
with warnings.catch_warnings(): # catch, e.g., divide by zero errors
warnings.filterwarnings('error')

# voxelwise standardization
diff_vx_stdz = func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T
dvars_vx_stdz = diff_vx_stdz.std(axis=0, ddof=1)

return (dvars_stdz, dvars_nstd, dvars_vx_stdz)

def zero_variance(func, mask):
def zero_remove(data, mask):
"""
Mask out voxels with zero variance across t-axis
Modify inputted mask to also mask out zero values

:param numpy.ndarray func: input fMRI dataset, after motion correction
:param numpy.ndarray mask: 3D brain mask
:return: the 3D mask of voxels with nonzero variance across :math:`t`.
:param numpy.ndarray data: e.g. voxelwise stddev of fMRI dataset, after motion correction
:param numpy.ndarray mask: brain mask (same dimensions as data)
:return: the mask with any additional zero voxels removed (same dimensions as inputs)
:rtype: numpy.ndarray

"""
idx = np.where(mask > 0)
func = func[idx[0], idx[1], idx[2], :]
tvariance = func.var(axis=1)
tv_mask = np.zeros_like(tvariance, dtype=np.uint8)
tv_mask[tvariance > 0] = 1

newmask = np.zeros_like(mask, dtype=np.uint8)
newmask[idx] = tv_mask
return newmask
new_mask = mask.copy()
new_mask[data == 0] = 0
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we can just do this directly on line 627

return new_mask

def plot_confound(tseries, figsize, name, units=None,
series_tr=None, normalize=False):
Expand Down
64 changes: 45 additions & 19 deletions nipype/algorithms/tests/test_compcor.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import nibabel as nb
import numpy as np

from ...testing import assert_equal, assert_true, utils
from ...testing import assert_equal, assert_true, utils, assert_in
from ..confounds import CompCor, TCompCor, ACompCor

class TestCompCor(unittest.TestCase):
Expand Down Expand Up @@ -38,25 +38,20 @@ def test_compcor(self):
['0.4206466244', '-0.3361270124'],
['-0.1246655485', '-0.1235705610']]

ccresult = self.run_cc(CompCor(realigned_file=self.realigned_file,
mask_file=self.mask_file),
expected_components)
self.run_cc(CompCor(realigned_file=self.realigned_file, mask_file=self.mask_file),
expected_components)

accresult = self.run_cc(ACompCor(realigned_file=self.realigned_file,
mask_file=self.mask_file,
components_file='acc_components_file'),
expected_components)

assert_equal(os.path.getsize(ccresult.outputs.components_file),
os.path.getsize(accresult.outputs.components_file))
self.run_cc(ACompCor(realigned_file=self.realigned_file, mask_file=self.mask_file,
components_file='acc_components_file'),
expected_components, 'aCompCor')

def test_tcompcor(self):
ccinterface = TCompCor(realigned_file=self.realigned_file, percentile_threshold=0.75)
self.run_cc(ccinterface, [['-0.1114536190', '-0.4632908609'],
['0.4566907310', '0.6983205193'],
['-0.7132557407', '0.1340170559'],
['0.5022537643', '-0.5098322262'],
['-0.1342351356', '0.1407855119']])
['-0.1342351356', '0.1407855119']], 'tCompCor')

def test_tcompcor_no_percentile(self):
ccinterface = TCompCor(realigned_file=self.realigned_file)
Expand All @@ -74,7 +69,29 @@ def test_compcor_no_regress_poly(self):
['-0.5367548139', '0.0059943226'],
['-0.0520809054', '0.2940637551']])

def run_cc(self, ccinterface, expected_components):
def test_tcompcor_asymmetric_dim(self):
asymmetric_shape = (2, 3, 4, 5)
asymmetric_data = utils.save_toy_nii(np.zeros(asymmetric_shape), 'asymmetric.nii')

TCompCor(realigned_file=asymmetric_data).run()
self.assertEqual(nb.load('mask.nii').get_data().shape, asymmetric_shape[:3])

def test_compcor_bad_input_shapes(self):
shape_less_than = (1, 2, 2, 5) # dim 0 is < dim 0 of self.mask_file (2)
shape_more_than = (3, 3, 3, 5) # dim 0 is > dim 0 of self.mask_file (2)

for data_shape in (shape_less_than, shape_more_than):
data_file = utils.save_toy_nii(np.zeros(data_shape), 'temp.nii')
interface = CompCor(realigned_file=data_file, mask_file=self.mask_file)
self.assertRaisesRegexp(ValueError, "dimensions", interface.run)

def test_tcompcor_bad_input_dim(self):
bad_dims = (2, 2, 2)
data_file = utils.save_toy_nii(np.zeros(bad_dims), 'temp.nii')
interface = TCompCor(realigned_file=data_file)
self.assertRaisesRegexp(ValueError, '4-D', interface.run)

def run_cc(self, ccinterface, expected_components, expected_header='CompCor'):
# run
ccresult = ccinterface.run()

Expand All @@ -86,12 +103,21 @@ def run_cc(self, ccinterface, expected_components):
assert_equal(ccinterface.inputs.num_components, 6)

with open(ccresult.outputs.components_file, 'r') as components_file:
components_data = [line.split() for line in components_file]
num_got_components = len(components_data)
assert_true(num_got_components == ccinterface.inputs.num_components
or num_got_components == self.fake_data.shape[3])
first_two = [row[:2] for row in components_data]
assert_equal(first_two, expected_components)
expected_n_components = min(ccinterface.inputs.num_components, self.fake_data.shape[3])

components_data = [line.split('\t') for line in components_file]

header = components_data.pop(0) # the first item will be '#', we can throw it out
expected_header = [expected_header + str(i) for i in range(expected_n_components)]
for i, heading in enumerate(header):
assert_in(expected_header[i], heading)

num_got_timepoints = len(components_data)
assert_equal(num_got_timepoints, self.fake_data.shape[3])
for index, timepoint in enumerate(components_data):
assert_true(len(timepoint) == ccinterface.inputs.num_components
or len(timepoint) == self.fake_data.shape[3])
assert_equal(timepoint[:2], expected_components[index])
return ccresult

def tearDown(self):
Expand Down
16 changes: 15 additions & 1 deletion nipype/algorithms/tests/test_confounds.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@
from tempfile import mkdtemp
from shutil import rmtree

from nipype.testing import (assert_equal, example_data, skipif, assert_true)
from io import open

from nipype.testing import (assert_equal, example_data, skipif, assert_true, assert_in)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS
import numpy as np

Expand All @@ -24,8 +26,14 @@ def test_fd():
out_file=tempdir + '/fd.txt')
res = fdisplacement.run()

with open(res.outputs.out_file) as all_lines:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add from io import open in the beginning to ensure python 2-3 compatibility (http://python-future.org/compatible_idioms.html#file-io-with-open)

for line in all_lines:
yield assert_in, 'framewise_displacement', line
break

yield assert_true, np.allclose(ground_truth, np.loadtxt(res.outputs.out_file), atol=.16)
yield assert_true, np.abs(ground_truth.mean() - res.outputs.fd_average) < 1e-2

rmtree(tempdir)

@skipif(nonitime)
Expand All @@ -35,8 +43,14 @@ def test_dvars():
dvars = ComputeDVARS(in_file=example_data('ds003_sub-01_mc.nii.gz'),
in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'),
save_all=True)

origdir = os.getcwd()
os.chdir(tempdir)

res = dvars.run()

dv1 = np.loadtxt(res.outputs.out_std)
yield assert_equal, (np.abs(dv1 - ground_truth).sum()/ len(dv1)) < 0.05, True

os.chdir(origdir)
rmtree(tempdir)
9 changes: 4 additions & 5 deletions nipype/interfaces/ants/resampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,9 +246,10 @@ class ApplyTransformsInputSpec(ANTSCommandInputSpec):
interpolation_parameters = traits.Either(traits.Tuple(traits.Int()), # BSpline (order)
traits.Tuple(traits.Float(), # Gaussian/MultiLabel (sigma, alpha)
traits.Float())
)
transforms = InputMultiPath(
File(exists=True), argstr='%s', mandatory=True, desc='transform files: will be applied in reverse order. For example, the last specified transform will be applied first')
)
transforms = InputMultiPath(File(exists=True), argstr='%s', mandatory=True,
desc='transform files: will be applied in reverse order. For '
'example, the last specified transform will be applied first.')
invert_transform_flags = InputMultiPath(traits.Bool())
default_value = traits.Float(0.0, argstr='--default-value %g', usedefault=True)
print_out_composite_warp_file = traits.Bool(False, requires=["output_image"],
Expand Down Expand Up @@ -296,8 +297,6 @@ class ApplyTransforms(ANTSCommand):
'antsApplyTransforms --default-value 0 --dimensionality 3 --input moving1.nii --interpolation BSpline[ 5 ] \
--output deformed_moving1.nii --reference-image fixed1.nii --transform [ ants_Warp.nii.gz, 0 ] \
--transform [ trans.mat, 0 ]'


"""
_cmd = 'antsApplyTransforms'
input_spec = ApplyTransformsInputSpec
Expand Down
2 changes: 1 addition & 1 deletion nipype/interfaces/fsl/epi.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def _overload_extension(self, value, name=None):

class ApplyTOPUPInputSpec(FSLCommandInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True,
desc='name of 4D file with images',
desc='name of file with images',
argstr='--imain=%s', sep=',')
encoding_file = File(exists=True, mandatory=True,
desc='name of text file with PE directions/times',
Expand Down
Loading