Skip to content

Commit 0ac8143

Browse files
authored
Merge pull request #1827 from chrisfilo/fix/dvars
DVARS fix & improvements
2 parents c234a01 + 473d0dd commit 0ac8143

File tree

4 files changed

+76
-54
lines changed

4 files changed

+76
-54
lines changed

CHANGES

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
Upcoming release 0.13
22
=====================
33

4+
* ENH: DVARS includes intensity normalization feature - turned on by default (https://github.com/nipy/nipype/pull/1827)
5+
* FIX: DVARS is correctly using sum of squares instead of standard deviation (https://github.com/nipy/nipype/pull/1827)
46
* ENH: Refactoring of nipype.interfaces.utility (https://github.com/nipy/nipype/pull/1828)
57
* FIX: CircleCI were failing silently. Some fixes to tests (https://github.com/nipy/nipype/pull/1833)
68
* FIX: Issues in Docker image permissions, and docker documentation (https://github.com/nipy/nipype/pull/1825)

nipype/algorithms/confounds.py

Lines changed: 34 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,16 @@ class ComputeDVARSInputSpec(BaseInterfaceInputSpec):
5252
desc='output figure size')
5353
figformat = traits.Enum('png', 'pdf', 'svg', usedefault=True,
5454
desc='output format for figures')
55+
intensity_normalization = traits.Float(1000.0, usedefault=True,
56+
desc='Divide value in each voxel at each timepoint '
57+
'by the median calculated across all voxels'
58+
'and timepoints within the mask (if specified)'
59+
'and then multiply by the value specified by'
60+
'this parameter. By using the default (1000)' \
61+
'output DVARS will be expressed in ' \
62+
'x10 % BOLD units compatible with Power et al.' \
63+
'2012. Set this to 0 to disable intensity' \
64+
'normalization altogether.')
5565

5666

5767

@@ -128,7 +138,8 @@ def _gen_fname(self, suffix, ext=None):
128138

129139
def _run_interface(self, runtime):
130140
dvars = compute_dvars(self.inputs.in_file, self.inputs.in_mask,
131-
remove_zerovariance=self.inputs.remove_zerovariance)
141+
remove_zerovariance=self.inputs.remove_zerovariance,
142+
intensity_normalization=self.inputs.intensity_normalization)
132143

133144
(self._results['avg_std'],
134145
self._results['avg_nstd'],
@@ -595,7 +606,8 @@ def regress_poly(degree, data, remove_mean=True, axis=-1):
595606
# Back to original shape
596607
return regressed_data.reshape(datashape)
597608

598-
def compute_dvars(in_file, in_mask, remove_zerovariance=False):
609+
def compute_dvars(in_file, in_mask, remove_zerovariance=False,
610+
intensity_normalization=1000):
599611
"""
600612
Compute the :abbr:`DVARS (D referring to temporal
601613
derivative of timecourses, VARS referring to RMS variance over voxels)`
@@ -636,59 +648,49 @@ def compute_dvars(in_file, in_mask, remove_zerovariance=False):
636648
raise RuntimeError(
637649
"Input fMRI dataset should be 4-dimensional")
638650

639-
# Robust standard deviation
640-
func_sd = (np.percentile(func, 75, axis=3) -
641-
np.percentile(func, 25, axis=3)) / 1.349
642-
func_sd[mask <= 0] = 0
643-
644-
if remove_zerovariance:
645-
# Remove zero-variance voxels across time axis
646-
mask = zero_remove(func_sd, mask)
647-
648651
idx = np.where(mask > 0)
649652
mfunc = func[idx[0], idx[1], idx[2], :]
650653

651-
# Demean
652-
mfunc = regress_poly(0, mfunc, remove_mean=True).astype(np.float32)
654+
if intensity_normalization != 0:
655+
mfunc = (mfunc / np.median(mfunc)) * intensity_normalization
656+
657+
# Robust standard deviation (we are using "lower" interpolation
658+
# because this is what FSL is doing
659+
func_sd = (np.percentile(mfunc, 75, axis=1, interpolation="lower") -
660+
np.percentile(mfunc, 25, axis=1, interpolation="lower")) / 1.349
661+
662+
if remove_zerovariance:
663+
mfunc = mfunc[func_sd != 0, :]
664+
func_sd = func_sd[func_sd != 0]
653665

654666
# Compute (non-robust) estimate of lag-1 autocorrelation
655-
ar1 = np.apply_along_axis(AR_est_YW, 1, mfunc, 1)[:, 0]
667+
ar1 = np.apply_along_axis(AR_est_YW, 1,
668+
regress_poly(0, mfunc, remove_mean=True).astype(
669+
np.float32), 1)[:, 0]
656670

657671
# Compute (predicted) standard deviation of temporal difference time series
658-
diff_sdhat = np.squeeze(np.sqrt(((1 - ar1) * 2).tolist())) * func_sd[mask > 0].reshape(-1)
672+
diff_sdhat = np.squeeze(np.sqrt(((1 - ar1) * 2).tolist())) * func_sd
659673
diff_sd_mean = diff_sdhat.mean()
660674

661675
# Compute temporal difference time series
662676
func_diff = np.diff(mfunc, axis=1)
663677

664678
# DVARS (no standardization)
665-
dvars_nstd = func_diff.std(axis=0)
679+
dvars_nstd = np.sqrt(np.square(func_diff).mean(axis=0))
666680

667681
# standardization
668682
dvars_stdz = dvars_nstd / diff_sd_mean
669683

670-
with warnings.catch_warnings(): # catch, e.g., divide by zero errors
684+
with warnings.catch_warnings(): # catch, e.g., divide by zero errors
671685
warnings.filterwarnings('error')
672686

673687
# voxelwise standardization
674-
diff_vx_stdz = func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T
675-
dvars_vx_stdz = diff_vx_stdz.std(axis=0, ddof=1)
688+
diff_vx_stdz = np.square(
689+
func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T)
690+
dvars_vx_stdz = np.sqrt(diff_vx_stdz.mean(axis=0))
676691

677692
return (dvars_stdz, dvars_nstd, dvars_vx_stdz)
678693

679-
def zero_remove(data, mask):
680-
"""
681-
Modify inputted mask to also mask out zero values
682-
683-
:param numpy.ndarray data: e.g. voxelwise stddev of fMRI dataset, after motion correction
684-
:param numpy.ndarray mask: brain mask (same dimensions as data)
685-
:return: the mask with any additional zero voxels removed (same dimensions as inputs)
686-
:rtype: numpy.ndarray
687-
688-
"""
689-
new_mask = mask.copy()
690-
new_mask[data == 0] = 0
691-
return new_mask
692694

693695
def plot_confound(tseries, figsize, name, units=None,
694696
series_tr=None, normalize=False):

nipype/algorithms/tests/test_confounds.py

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,9 +39,27 @@ def test_dvars(tmpdir):
3939
ground_truth = np.loadtxt(example_data('ds003_sub-01_mc.DVARS'))
4040
dvars = ComputeDVARS(in_file=example_data('ds003_sub-01_mc.nii.gz'),
4141
in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'),
42-
save_all=True)
42+
save_all=True,
43+
intensity_normalization=0)
4344
os.chdir(str(tmpdir))
4445
res = dvars.run()
4546

46-
dv1 = np.loadtxt(res.outputs.out_std)
47-
assert (np.abs(dv1 - ground_truth).sum()/ len(dv1)) < 0.05
47+
dv1 = np.loadtxt(res.outputs.out_all, skiprows=1)
48+
assert (np.abs(dv1[:, 0] - ground_truth[:, 0]).sum()/ len(dv1)) < 0.05
49+
50+
assert (np.abs(dv1[:, 1] - ground_truth[:, 1]).sum() / len(dv1)) < 0.05
51+
52+
assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05
53+
54+
dvars = ComputeDVARS(in_file=example_data('ds003_sub-01_mc.nii.gz'),
55+
in_mask=example_data(
56+
'ds003_sub-01_mc_brainmask.nii.gz'),
57+
save_all=True)
58+
res = dvars.run()
59+
60+
dv1 = np.loadtxt(res.outputs.out_all, skiprows=1)
61+
assert (np.abs(dv1[:, 0] - ground_truth[:, 0]).sum() / len(dv1)) < 0.05
62+
63+
assert (np.abs(dv1[:, 1] - ground_truth[:, 1]).sum() / len(dv1)) > 0.05
64+
65+
assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05
Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,19 @@
1-
1.54062
2-
1.31972
3-
0.921541
4-
1.26107
5-
0.99986
6-
0.929237
7-
0.715096
8-
1.05153
9-
1.29109
10-
0.700641
11-
0.844657
12-
0.884972
13-
0.807096
14-
0.881976
15-
0.843652
16-
0.780457
17-
1.05401
18-
1.32161
19-
0.686738
1+
2.02915 5.2016 1.74221
2+
1.54871 3.97002 1.18108
3+
0.921419 2.362 0.784497
4+
1.26058 3.23142 0.734119
5+
1.00079 2.56548 0.787452
6+
0.929074 2.38163 0.828835
7+
0.741207 1.90004 0.746263
8+
1.07913 2.7663 0.779829
9+
1.2969 3.32452 0.73856
10+
0.767387 1.96715 0.772047
11+
0.847059 2.17138 0.774103
12+
0.984061 2.52258 0.88097
13+
0.852897 2.18635 0.794655
14+
0.927778 2.3783 0.756786
15+
0.857544 2.19826 0.796125
16+
0.780098 1.99973 0.731265
17+
1.05496 2.70434 0.788584
18+
1.32099 3.38628 0.831803
19+
0.691529 1.77269 0.738788

0 commit comments

Comments
 (0)