Skip to content

DVARS fix & improvements #1827

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Feb 23, 2017
2 changes: 2 additions & 0 deletions CHANGES
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
Upcoming release 0.13
=====================

* ENH: DVARS includes intensity normalization feature - turned on by default (https://github.com/nipy/nipype/pull/1827)
* FIX: DVARS is correctly using sum of squares instead of standard deviation (https://github.com/nipy/nipype/pull/1827)
* FIX: Issues in Docker image permissions, and docker documentation (https://github.com/nipy/nipype/pull/1825)
* ENH: Revised all Dockerfiles and automated deployment to Docker Hub
from CircleCI (https://github.com/nipy/nipype/pull/1815)
Expand Down
66 changes: 34 additions & 32 deletions nipype/algorithms/confounds.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,16 @@ class ComputeDVARSInputSpec(BaseInterfaceInputSpec):
desc='output figure size')
figformat = traits.Enum('png', 'pdf', 'svg', usedefault=True,
desc='output format for figures')
intensity_normalization = traits.Float(1000.0, usedefault=True,
desc='Divide value in each voxel at each timepoint '
'by the median calculated across all voxels'
'and timepoints within the mask (if specified)'
'and then multiply by the value specified by'
'this parameter. By using the default (1000)' \
'output DVARS will be expressed in ' \
'x10 % BOLD units compatible with Power et al.' \
'2012. Set this to 0 to disable intensity' \
'normalization altogether.')



Expand Down Expand Up @@ -128,7 +138,8 @@ def _gen_fname(self, suffix, ext=None):

def _run_interface(self, runtime):
dvars = compute_dvars(self.inputs.in_file, self.inputs.in_mask,
remove_zerovariance=self.inputs.remove_zerovariance)
remove_zerovariance=self.inputs.remove_zerovariance,
intensity_normalization=self.inputs.intensity_normalization)

(self._results['avg_std'],
self._results['avg_nstd'],
Expand Down Expand Up @@ -595,7 +606,8 @@ def regress_poly(degree, data, remove_mean=True, axis=-1):
# Back to original shape
return regressed_data.reshape(datashape)

def compute_dvars(in_file, in_mask, remove_zerovariance=False):
def compute_dvars(in_file, in_mask, remove_zerovariance=False,
intensity_normalization=1000):
"""
Compute the :abbr:`DVARS (D referring to temporal
derivative of timecourses, VARS referring to RMS variance over voxels)`
Expand Down Expand Up @@ -636,59 +648,49 @@ def compute_dvars(in_file, in_mask, remove_zerovariance=False):
raise RuntimeError(
"Input fMRI dataset should be 4-dimensional")

# Robust standard deviation
func_sd = (np.percentile(func, 75, axis=3) -
np.percentile(func, 25, axis=3)) / 1.349
func_sd[mask <= 0] = 0

if remove_zerovariance:
# Remove zero-variance voxels across time axis
mask = zero_remove(func_sd, mask)

idx = np.where(mask > 0)
mfunc = func[idx[0], idx[1], idx[2], :]

# Demean
mfunc = regress_poly(0, mfunc, remove_mean=True).astype(np.float32)
if intensity_normalization != 0:
mfunc = (mfunc / np.median(mfunc)) * intensity_normalization

# Robust standard deviation (we are using "lower" interpolation
# because this is what FSL is doing
func_sd = (np.percentile(mfunc, 75, axis=1, interpolation="lower") -
np.percentile(mfunc, 25, axis=1, interpolation="lower")) / 1.349

if remove_zerovariance:
mfunc = mfunc[func_sd != 0, :]
func_sd = func_sd[func_sd != 0]

# Compute (non-robust) estimate of lag-1 autocorrelation
ar1 = np.apply_along_axis(AR_est_YW, 1, mfunc, 1)[:, 0]
ar1 = np.apply_along_axis(AR_est_YW, 1,
regress_poly(0, mfunc, remove_mean=True).astype(
np.float32), 1)[:, 0]

# Compute (predicted) standard deviation of temporal difference time series
diff_sdhat = np.squeeze(np.sqrt(((1 - ar1) * 2).tolist())) * func_sd[mask > 0].reshape(-1)
diff_sdhat = np.squeeze(np.sqrt(((1 - ar1) * 2).tolist())) * func_sd
diff_sd_mean = diff_sdhat.mean()

# Compute temporal difference time series
func_diff = np.diff(mfunc, axis=1)

# DVARS (no standardization)
dvars_nstd = func_diff.std(axis=0)
dvars_nstd = np.sqrt(np.square(func_diff).mean(axis=0))

# standardization
dvars_stdz = dvars_nstd / diff_sd_mean

with warnings.catch_warnings(): # catch, e.g., divide by zero errors
with warnings.catch_warnings(): # catch, e.g., divide by zero errors
warnings.filterwarnings('error')

# voxelwise standardization
diff_vx_stdz = func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T
dvars_vx_stdz = diff_vx_stdz.std(axis=0, ddof=1)
diff_vx_stdz = np.square(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

**2 would be more readable to me than np.square, but it is just cosmetic. Maybe you prefer np.square :)

func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T)
dvars_vx_stdz = np.sqrt(diff_vx_stdz.mean(axis=0))

return (dvars_stdz, dvars_nstd, dvars_vx_stdz)

def zero_remove(data, mask):
"""
Modify inputted mask to also mask out zero values

:param numpy.ndarray data: e.g. voxelwise stddev of fMRI dataset, after motion correction
:param numpy.ndarray mask: brain mask (same dimensions as data)
:return: the mask with any additional zero voxels removed (same dimensions as inputs)
:rtype: numpy.ndarray

"""
new_mask = mask.copy()
new_mask[data == 0] = 0
return new_mask

def plot_confound(tseries, figsize, name, units=None,
series_tr=None, normalize=False):
Expand Down
24 changes: 21 additions & 3 deletions nipype/algorithms/tests/test_confounds.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,27 @@ def test_dvars(tmpdir):
ground_truth = np.loadtxt(example_data('ds003_sub-01_mc.DVARS'))
dvars = ComputeDVARS(in_file=example_data('ds003_sub-01_mc.nii.gz'),
in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'),
save_all=True)
save_all=True,
intensity_normalization=0)
os.chdir(str(tmpdir))
res = dvars.run()

dv1 = np.loadtxt(res.outputs.out_std)
assert (np.abs(dv1 - ground_truth).sum()/ len(dv1)) < 0.05
dv1 = np.loadtxt(res.outputs.out_all, skiprows=1)
assert (np.abs(dv1[:, 0] - ground_truth[:, 0]).sum()/ len(dv1)) < 0.05

assert (np.abs(dv1[:, 1] - ground_truth[:, 1]).sum() / len(dv1)) < 0.05

assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05

dvars = ComputeDVARS(in_file=example_data('ds003_sub-01_mc.nii.gz'),
in_mask=example_data(
'ds003_sub-01_mc_brainmask.nii.gz'),
save_all=True)
res = dvars.run()

dv1 = np.loadtxt(res.outputs.out_all, skiprows=1)
assert (np.abs(dv1[:, 0] - ground_truth[:, 0]).sum() / len(dv1)) < 0.05

assert (np.abs(dv1[:, 1] - ground_truth[:, 1]).sum() / len(dv1)) > 0.05

assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05
38 changes: 19 additions & 19 deletions nipype/testing/data/ds003_sub-01_mc.DVARS
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
1.54062
1.31972
0.921541
1.26107
0.99986
0.929237
0.715096
1.05153
1.29109
0.700641
0.844657
0.884972
0.807096
0.881976
0.843652
0.780457
1.05401
1.32161
0.686738
2.02915 5.2016 1.74221
1.54871 3.97002 1.18108
0.921419 2.362 0.784497
1.26058 3.23142 0.734119
1.00079 2.56548 0.787452
0.929074 2.38163 0.828835
0.741207 1.90004 0.746263
1.07913 2.7663 0.779829
1.2969 3.32452 0.73856
0.767387 1.96715 0.772047
0.847059 2.17138 0.774103
0.984061 2.52258 0.88097
0.852897 2.18635 0.794655
0.927778 2.3783 0.756786
0.857544 2.19826 0.796125
0.780098 1.99973 0.731265
1.05496 2.70434 0.788584
1.32099 3.38628 0.831803
0.691529 1.77269 0.738788