Skip to content

Commit 8b2ecf7

Browse files
authored
Merge pull request #1796 from oesteban/fix/1795
Do not open nifti files with mmap if numpy < 1.12.0
2 parents 5f1c984 + 20e10f9 commit 8b2ecf7

38 files changed

+250
-184
lines changed

.travis.yml

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
cache:
2-
- apt
2+
apt: true
3+
34
language: python
45
python:
56
- 2.7
@@ -10,11 +11,7 @@ env:
1011
- INSTALL_DEB_DEPENDECIES=false NIPYPE_EXTRAS="doc,tests,fmri,profiler"
1112
- INSTALL_DEB_DEPENDECIES=true NIPYPE_EXTRAS="doc,tests,fmri,profiler,duecredit"
1213
before_install:
13-
- function bef_inst {
14-
wget http://repo.continuum.io/miniconda/Miniconda${TRAVIS_PYTHON_VERSION:0:1}-latest-Linux-x86_64.sh
15-
-O /home/travis/.cache/miniconda.sh &&
16-
bash /home/travis/.cache/miniconda.sh -b -p /home/travis/miniconda &&
17-
export PATH=/home/travis/miniconda/bin:$PATH &&
14+
- function apt_inst {
1815
if $INSTALL_DEB_DEPENDECIES; then sudo rm -rf /dev/shm; fi &&
1916
if $INSTALL_DEB_DEPENDECIES; then sudo ln -s /run/shm /dev/shm; fi &&
2017
bash <(wget -q -O- http://neuro.debian.net/_files/neurodebian-travis.sh) &&
@@ -26,18 +23,24 @@ before_install:
2623
source /etc/fsl/fsl.sh;
2724
source /etc/afni/afni.sh;
2825
export FSLOUTPUTTYPE=NIFTI_GZ; fi }
29-
- travis_retry bef_inst
30-
install:
31-
# Add install of vtk and mayavi to test mesh (disabled): conda install -y vtk mayavi &&
32-
- function inst {
26+
- function conda_inst {
27+
export CONDA_HOME=$HOME/conda &&
28+
wget https://repo.continuum.io/miniconda/Miniconda${TRAVIS_PYTHON_VERSION:0:1}-latest-Linux-x86_64.sh
29+
-O /home/travis/.cache/conda.sh &&
30+
bash /home/travis/.cache/conda.sh -b -p ${CONDA_HOME} &&
31+
export PATH=${CONDA_HOME}/bin:$PATH &&
32+
hash -r &&
33+
conda config --set always_yes yes --set changeps1 no &&
34+
conda update -q conda &&
35+
conda install python=${TRAVIS_PYTHON_VERSION} &&
3336
conda config --add channels conda-forge &&
34-
conda update --yes conda &&
35-
conda update --all -y python=$TRAVIS_PYTHON_VERSION &&
3637
conda install -y nipype icu &&
37-
rm -r /home/travis/miniconda/lib/python${TRAVIS_PYTHON_VERSION}/site-packages/nipype* &&
38-
pip install -r requirements.txt &&
39-
pip install -e .[$NIPYPE_EXTRAS]; }
40-
- travis_retry inst
38+
rm -r ${CONDA_HOME}/lib/python${TRAVIS_PYTHON_VERSION}/site-packages/nipype*; }
39+
# Add install of vtk and mayavi to test mesh (disabled): conda install -y vtk mayavi
40+
- travis_retry apt_inst
41+
- travis_retry conda_inst
42+
install:
43+
- travis_retry pip install -e .[$NIPYPE_EXTRAS]
4144
script:
4245
- py.test --doctest-modules nipype
4346
deploy:

examples/dmri_camino_dti.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,14 @@
1818
Import necessary modules from nipype.
1919
"""
2020

21+
import os # system functions
2122
import nipype.interfaces.io as nio # Data i/o
2223
import nipype.interfaces.utility as util # utility
2324
import nipype.pipeline.engine as pe # pypeline engine
2425
import nipype.interfaces.camino as camino
2526
import nipype.interfaces.fsl as fsl
2627
import nipype.interfaces.camino2trackvis as cam2trk
2728
import nipype.algorithms.misc as misc
28-
import os # system functions
2929

3030
"""
3131
We use the following functions to scrape the voxel and data dimensions of the input images. This allows the
@@ -36,27 +36,30 @@
3636

3737
def get_vox_dims(volume):
3838
import nibabel as nb
39+
from nipype.utils import NUMPY_MMAP
3940
if isinstance(volume, list):
4041
volume = volume[0]
41-
nii = nb.load(volume)
42+
nii = nb.load(volume, mmap=NUMPY_MMAP)
4243
hdr = nii.header
4344
voxdims = hdr.get_zooms()
4445
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]
4546

4647

4748
def get_data_dims(volume):
4849
import nibabel as nb
50+
from nipype.utils import NUMPY_MMAP
4951
if isinstance(volume, list):
5052
volume = volume[0]
51-
nii = nb.load(volume)
53+
nii = nb.load(volume, mmap=NUMPY_MMAP)
5254
hdr = nii.header
5355
datadims = hdr.get_data_shape()
5456
return [int(datadims[0]), int(datadims[1]), int(datadims[2])]
5557

5658

5759
def get_affine(volume):
5860
import nibabel as nb
59-
nii = nb.load(volume)
61+
from nipype.utils import NUMPY_MMAP
62+
nii = nb.load(volume, mmap=NUMPY_MMAP)
6063
return nii.affine
6164

6265
subject_list = ['subj1']

examples/dmri_connectivity.py

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,10 @@
4747
First, we import the necessary modules from nipype.
4848
"""
4949

50+
import inspect
51+
52+
import os.path as op # system functions
53+
import cmp # connectome mapper
5054
import nipype.interfaces.io as nio # Data i/o
5155
import nipype.interfaces.utility as util # utility
5256
import nipype.pipeline.engine as pe # pypeline engine
@@ -56,10 +60,6 @@
5660
import nipype.interfaces.freesurfer as fs # freesurfer
5761
import nipype.interfaces.cmtk as cmtk
5862
import nipype.algorithms.misc as misc
59-
import inspect
60-
61-
import os.path as op # system functions
62-
import cmp # connectome mapper
6363

6464
"""
6565
We define the following functions to scrape the voxel and data dimensions of the input images. This allows the
@@ -74,27 +74,30 @@
7474

7575
def get_vox_dims(volume):
7676
import nibabel as nb
77+
from nipype.utils import NUMPY_MMAP
7778
if isinstance(volume, list):
7879
volume = volume[0]
79-
nii = nb.load(volume)
80+
nii = nb.load(volume, mmap=NUMPY_MMAP)
8081
hdr = nii.header
8182
voxdims = hdr.get_zooms()
8283
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]
8384

8485

8586
def get_data_dims(volume):
8687
import nibabel as nb
88+
from nipype.utils import NUMPY_MMAP
8789
if isinstance(volume, list):
8890
volume = volume[0]
89-
nii = nb.load(volume)
91+
nii = nb.load(volume, mmap=NUMPY_MMAP)
9092
hdr = nii.header
9193
datadims = hdr.get_data_shape()
9294
return [int(datadims[0]), int(datadims[1]), int(datadims[2])]
9395

9496

9597
def get_affine(volume):
9698
import nibabel as nb
97-
nii = nb.load(volume)
99+
from nipype.utils import NUMPY_MMAP
100+
nii = nb.load(volume, mmap=NUMPY_MMAP)
98101
return nii.affine
99102

100103

examples/fmri_ants_openfmri.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@
3636
from nipype.workflows.fmri.fsl import (create_featreg_preproc,
3737
create_modelfit_workflow,
3838
create_fixed_effects_flow)
39+
from nipype.utils import NUMPY_MMAP
40+
3941

4042
config.enable_provenance()
4143
version = 0
@@ -68,7 +70,7 @@ def median(in_files):
6870
"""
6971
average = None
7072
for idx, filename in enumerate(filename_to_list(in_files)):
71-
img = nb.load(filename)
73+
img = nb.load(filename, mmap=NUMPY_MMAP)
7274
data = np.median(img.get_data(), axis=3)
7375
if average is None:
7476
average = data

examples/fmri_fsl.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,10 +106,11 @@ def pickfirst(files):
106106

107107
def getmiddlevolume(func):
108108
from nibabel import load
109+
from nipype.utils import NUMPY_MMAP
109110
funcfile = func
110111
if isinstance(func, list):
111112
funcfile = func[0]
112-
_, _, _, timepoints = load(funcfile).shape
113+
_, _, _, timepoints = load(funcfile, mmap=NUMPY_MMAP).shape
113114
return int(timepoints / 2) - 1
114115

115116
preproc.connect(inputnode, ('func', getmiddlevolume), extract_ref, 't_min')

examples/fmri_spm_auditory.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
import nipype.algorithms.modelgen as model # model specification
3030
import os # system functions
3131

32+
3233
"""
3334
3435
Preliminaries
@@ -120,9 +121,10 @@
120121

121122
def get_vox_dims(volume):
122123
import nibabel as nb
124+
from nipype.utils import NUMPY_MMAP
123125
if isinstance(volume, list):
124126
volume = volume[0]
125-
nii = nb.load(volume)
127+
nii = nb.load(volume, mmap=NUMPY_MMAP)
126128
hdr = nii.header
127129
voxdims = hdr.get_zooms()
128130
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]

examples/fmri_spm_face.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,13 @@
2020
from __future__ import division
2121
from builtins import range
2222

23+
import os # system functions
2324
import nipype.interfaces.io as nio # Data i/o
2425
import nipype.interfaces.spm as spm # spm
2526
import nipype.interfaces.matlab as mlab # how to run matlab
2627
import nipype.interfaces.utility as util # utility
2728
import nipype.pipeline.engine as pe # pypeline engine
2829
import nipype.algorithms.modelgen as model # model specification
29-
import os # system functions
3030

3131
"""
3232
@@ -114,9 +114,10 @@
114114

115115
def get_vox_dims(volume):
116116
import nibabel as nb
117+
from nipype.utils import NUMPY_MMAP
117118
if isinstance(volume, list):
118119
volume = volume[0]
119-
nii = nb.load(volume)
120+
nii = nb.load(volume, mmap=NUMPY_MMAP)
120121
hdr = nii.header
121122
voxdims = hdr.get_zooms()
122123
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]

examples/rsfmri_vol_surface_preprocessing.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@
7676
import scipy as sp
7777
import nibabel as nb
7878

79+
7980
imports = ['import os',
8081
'import nibabel as nb',
8182
'import numpy as np',
@@ -117,9 +118,10 @@ def median(in_files):
117118
"""
118119
import numpy as np
119120
import nibabel as nb
121+
from nipype.utils import NUMPY_MMAP
120122
average = None
121123
for idx, filename in enumerate(filename_to_list(in_files)):
122-
img = nb.load(filename)
124+
img = nb.load(filename, mmap=NUMPY_MMAP)
123125
data = np.median(img.get_data(), axis=3)
124126
if average is None:
125127
average = data
@@ -145,11 +147,12 @@ def bandpass_filter(files, lowpass_freq, highpass_freq, fs):
145147
from nipype.utils.filemanip import split_filename, list_to_filename
146148
import numpy as np
147149
import nibabel as nb
150+
from nipype.utils import NUMPY_MMAP
148151
out_files = []
149152
for filename in filename_to_list(files):
150153
path, name, ext = split_filename(filename)
151154
out_file = os.path.join(os.getcwd(), name + '_bp' + ext)
152-
img = nb.load(filename)
155+
img = nb.load(filename, mmap=NUMPY_MMAP)
153156
timepoints = img.shape[-1]
154157
F = np.zeros((timepoints))
155158
lowidx = int(timepoints / 2) + 1
@@ -260,11 +263,12 @@ def extract_noise_components(realigned_file, mask_file, num_components=5,
260263
from scipy.linalg.decomp_svd import svd
261264
import numpy as np
262265
import nibabel as nb
266+
from nipype.utils import NUMPY_MMAP
263267
import os
264-
imgseries = nb.load(realigned_file)
268+
imgseries = nb.load(realigned_file, mmap=NUMPY_MMAP)
265269
components = None
266270
for filename in filename_to_list(mask_file):
267-
mask = nb.load(filename).get_data()
271+
mask = nb.load(filename, mmap=NUMPY_MMAP).get_data()
268272
if len(np.nonzero(mask > 0)[0]) == 0:
269273
continue
270274
voxel_timecourses = imgseries.get_data()[mask > 0]
@@ -329,10 +333,11 @@ def extract_subrois(timeseries_file, label_file, indices):
329333
"""
330334
from nipype.utils.filemanip import split_filename
331335
import nibabel as nb
336+
from nipype.utils import NUMPY_MMAP
332337
import os
333-
img = nb.load(timeseries_file)
338+
img = nb.load(timeseries_file, mmap=NUMPY_MMAP)
334339
data = img.get_data()
335-
roiimg = nb.load(label_file)
340+
roiimg = nb.load(label_file, mmap=NUMPY_MMAP)
336341
rois = roiimg.get_data()
337342
prefix = split_filename(timeseries_file)[1]
338343
out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
@@ -352,8 +357,9 @@ def combine_hemi(left, right):
352357
"""
353358
import os
354359
import numpy as np
355-
lh_data = nb.load(left).get_data()
356-
rh_data = nb.load(right).get_data()
360+
from nipype.utils import NUMPY_MMAP
361+
lh_data = nb.load(left, mmap=NUMPY_MMAP).get_data()
362+
rh_data = nb.load(right, mmap=NUMPY_MMAP).get_data()
357363

358364
indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None],
359365
2000000 + np.arange(0, rh_data.shape[0])[:, None]))

examples/rsfmri_vol_surface_preprocessing_nipy.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@
7575
import numpy as np
7676
import scipy as sp
7777
import nibabel as nb
78+
from nipype.utils import NUMPY_MMAP
7879

7980
imports = ['import os',
8081
'import nibabel as nb',
@@ -116,7 +117,7 @@ def median(in_files):
116117
"""
117118
average = None
118119
for idx, filename in enumerate(filename_to_list(in_files)):
119-
img = nb.load(filename)
120+
img = nb.load(filename, mmap=NUMPY_MMAP)
120121
data = np.median(img.get_data(), axis=3)
121122
if average is None:
122123
average = data
@@ -143,7 +144,7 @@ def bandpass_filter(files, lowpass_freq, highpass_freq, fs):
143144
for filename in filename_to_list(files):
144145
path, name, ext = split_filename(filename)
145146
out_file = os.path.join(os.getcwd(), name + '_bp' + ext)
146-
img = nb.load(filename)
147+
img = nb.load(filename, mmap=NUMPY_MMAP)
147148
timepoints = img.shape[-1]
148149
F = np.zeros((timepoints))
149150
lowidx = int(timepoints / 2) + 1
@@ -268,9 +269,9 @@ def extract_subrois(timeseries_file, label_file, indices):
268269
The first four columns are: freesurfer index, i, j, k positions in the
269270
label file
270271
"""
271-
img = nb.load(timeseries_file)
272+
img = nb.load(timeseries_file, mmap=NUMPY_MMAP)
272273
data = img.get_data()
273-
roiimg = nb.load(label_file)
274+
roiimg = nb.load(label_file, mmap=NUMPY_MMAP)
274275
rois = roiimg.get_data()
275276
prefix = split_filename(timeseries_file)[1]
276277
out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
@@ -288,8 +289,8 @@ def extract_subrois(timeseries_file, label_file, indices):
288289
def combine_hemi(left, right):
289290
"""Combine left and right hemisphere time series into a single text file
290291
"""
291-
lh_data = nb.load(left).get_data()
292-
rh_data = nb.load(right).get_data()
292+
lh_data = nb.load(left, mmap=NUMPY_MMAP).get_data()
293+
rh_data = nb.load(right, mmap=NUMPY_MMAP).get_data()
293294

294295
indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None],
295296
2000000 + np.arange(0, rh_data.shape[0])[:, None]))

0 commit comments

Comments
 (0)