diff --git a/.travis.yml b/.travis.yml index b6e69d09ba..f6f07e513c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -96,7 +96,7 @@ before_install: - virtualenv --python=python venv - source venv/bin/activate - python --version # just to check - - pip install -U pip wheel # needed at one point + - pip install -U pip setuptools>=27.0 wheel - retry pip install nose flake8 mock # always - pip install $EXTRA_PIP_FLAGS $DEPENDS $OPTIONAL_DEPENDS - if [ "${COVERAGE}" == "1" ]; then diff --git a/appveyor.yml b/appveyor.yml index 772bfa142d..93438cfc0f 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -22,6 +22,7 @@ install: - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH% # Install the dependencies of the project. + - python -m pip install --upgrade pip setuptools wheel - pip install numpy scipy matplotlib nose h5py mock pydicom - pip install . - SET NIBABEL_DATA_DIR=%CD%\nibabel-data diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index 9a2dcec728..ce71b92bcc 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -58,7 +58,7 @@ def test_cifti2_metadata(): assert_equal(md.data, dict(metadata_test)) assert_equal(list(iter(md)), list(iter(collections.OrderedDict(metadata_test)))) - + md.update({'a': 'aval', 'b': 'bval'}) assert_equal(md.data, dict(metadata_test)) @@ -310,7 +310,7 @@ def test_matrix(): assert_raises(ci.Cifti2HeaderError, m.insert, 0, mim_none) assert_equal(m.mapped_indices, []) - + h = ci.Cifti2Header(matrix=m) assert_equal(m.mapped_indices, []) m.insert(0, mim_0) diff --git a/nibabel/externals/netcdf.py b/nibabel/externals/netcdf.py index 24b17706b8..e485533cd7 100644 --- a/nibabel/externals/netcdf.py +++ b/nibabel/externals/netcdf.py @@ -37,7 +37,7 @@ import numpy as np # noqa from ..py3k import asbytes, asstr -from numpy import fromstring, ndarray, dtype, empty, array, asarray +from numpy import frombuffer, ndarray, dtype, empty, array, asarray from numpy import little_endian as LITTLE_ENDIAN from functools import reduce @@ -519,7 +519,7 @@ def _read(self): if not magic == b'CDF': raise TypeError("Error: %s is not a valid NetCDF 3 file" % self.filename) - self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0] + self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0] # Read file headers and set data. self._read_numrecs() @@ -608,7 +608,7 @@ def _read_var_array(self): # Calculate size to avoid problems with vsize (above) a_size = reduce(mul, shape, 1) * size if self.file_bytes >= 0 and begin_ + a_size > self.file_bytes: - data = fromstring(b'\x00'*a_size, dtype=dtype_) + data = frombuffer(b'\x00'*a_size, dtype=dtype_) elif self.use_mmap: mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ) data = ndarray.__new__(ndarray, shape, dtype=dtype_, @@ -622,7 +622,7 @@ def _read_var_array(self): buf = self.fp.read(a_size) if len(buf) < a_size: buf = b'\x00'*a_size - data = fromstring(buf, dtype=dtype_) + data = frombuffer(buf, dtype=dtype_) data.shape = shape self.fp.seek(pos) @@ -644,7 +644,7 @@ def _read_var_array(self): else: pos = self.fp.tell() self.fp.seek(begin) - rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes) + rec_array = frombuffer(self.fp.read(self._recs*self._recsize), dtype=dtypes) rec_array.shape = (self._recs,) self.fp.seek(pos) @@ -687,7 +687,7 @@ def _read_values(self): self.fp.read(-count % 4) # read padding if typecode is not 'c': - values = fromstring(values, dtype='>%s' % typecode) + values = frombuffer(values, dtype='>%s' % typecode) if values.shape == (1,): values = values[0] else: @@ -705,14 +705,14 @@ def _pack_int(self, value): _pack_int32 = _pack_int def _unpack_int(self): - return int(fromstring(self.fp.read(4), '>i')[0]) + return int(frombuffer(self.fp.read(4), '>i')[0]) _unpack_int32 = _unpack_int def _pack_int64(self, value): self.fp.write(array(value, '>q').tostring()) def _unpack_int64(self): - return fromstring(self.fp.read(8), '>q')[0] + return frombuffer(self.fp.read(8), '>q')[0] def _pack_string(self, s): count = len(s) diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 4cdbd3d768..de02f4c76b 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -47,7 +47,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data): dec = base64.b64decode(data.encode('ascii')) dt = data_type_codes.type[datatype] sh = tuple(shape) - newarr = np.fromstring(dec, dtype=dt) + newarr = np.frombuffer(dec, dtype=dt) if len(newarr.shape) != len(sh): newarr = newarr.reshape(sh, order=ord) @@ -59,7 +59,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data): zdec = zlib.decompress(dec) dt = data_type_codes.type[datatype] sh = tuple(shape) - newarr = np.fromstring(zdec, dtype=dt) + newarr = np.frombuffer(zdec, dtype=dt) if len(newarr.shape) != len(sh): newarr = newarr.reshape(sh, order=ord) diff --git a/nibabel/info.py b/nibabel/info.py index 204412c5d2..56cdcb2c80 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -209,4 +209,5 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): ISRELEASE = _version_extra == '' VERSION = __version__ PROVIDES = ["nibabel", 'nisext'] -REQUIRES = ["numpy (>=%s)" % NUMPY_MIN_VERSION] +REQUIRES = ["numpy>=%s" % NUMPY_MIN_VERSION, + 'bz2file; python_version < "3.0"'] diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 60ff818e57..84cfed956a 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -579,7 +579,7 @@ def from_fileobj(klass, fileobj, size, byteswap): # otherwise there should be a full extension header if not len(ext_def) == 8: raise HeaderDataError('failed to read extension header') - ext_def = np.fromstring(ext_def, dtype=np.int32) + ext_def = np.frombuffer(ext_def, dtype=np.int32) if byteswap: ext_def = ext_def.byteswap() # be extra verbose diff --git a/nibabel/openers.py b/nibabel/openers.py index f64ab23b37..e551404561 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -9,7 +9,11 @@ """ Context manager openers for various fileobject types """ -import bz2 +import sys +if sys.version_info[0] < 3: + from bz2file import BZ2File +else: + from bz2 import BZ2File import gzip import sys import warnings @@ -127,7 +131,7 @@ class Opener(object): for \*args """ gz_def = (_gzip_open, ('mode', 'compresslevel', 'keep_open')) - bz2_def = (bz2.BZ2File, ('mode', 'buffering', 'compresslevel')) + bz2_def = (BZ2File, ('mode', 'buffering', 'compresslevel')) compress_ext_map = { '.gz': gz_def, '.bz2': bz2_def, @@ -209,6 +213,9 @@ def fileno(self): def read(self, *args, **kwargs): return self.fobj.read(*args, **kwargs) + def readinto(self, *args, **kwargs): + return self.fobj.readinto(*args, **kwargs) + def write(self, *args, **kwargs): return self.fobj.write(*args, **kwargs) diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 1ba2d76625..31f0be0ab5 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -405,18 +405,21 @@ def _read(cls, fileobj, header, buffer_size=4): n_streams = 0 while not eof: + buff = bytearray(buffer_size) + n_read = f.readinto(buff) + eof = n_read != buffer_size + if eof: + buff = buff[:n_read] - bytes_read = f.read(buffer_size) - buffs.append(bytes_read) - eof = len(bytes_read) != buffer_size + buffs.append(buff) # Make sure we've read enough to find a streamline delimiter. - if fiber_marker not in bytes_read: + if fiber_marker not in buff: # If we've read the whole file, then fail. if eof: # Could have minimal buffering, and have read only the # EOF delimiter - buffs = [b''.join(buffs)] + buffs = [bytearray().join(buffs)] if not buffs[0] == eof_marker: raise DataError( "Cannot find a streamline delimiter. This file" @@ -425,15 +428,13 @@ def _read(cls, fileobj, header, buffer_size=4): # Otherwise read a bit more. continue - all_parts = b''.join(buffs).split(fiber_marker) + all_parts = bytearray().join(buffs).split(fiber_marker) point_parts, buffs = all_parts[:-1], all_parts[-1:] point_parts = [p for p in point_parts if p != b''] for point_part in point_parts: # Read floats. pts = np.frombuffer(point_part, dtype=dtype) - # Enforce ability to write to underlying bytes object - pts.flags.writeable = True # Convert data to little-endian if needed. yield pts.astype(' 0: - # setup_egg imports setuptools setup, thus monkeypatching distutils. - import setup_egg # noqa - -from distutils.core import setup +from setuptools import setup # Commit hash writing, and dependency checking from nisext.sexts import (get_comrec_build, package_check, install_scripts_bat, @@ -77,8 +70,8 @@ def main(**extra_args): author_email=INFO.AUTHOR_EMAIL, platforms=INFO.PLATFORMS, version=INFO.VERSION, - requires=INFO.REQUIRES, provides=INFO.PROVIDES, + install_requires=INFO.REQUIRES, packages = ['nibabel', 'nibabel.externals', 'nibabel.externals.tests', @@ -127,4 +120,6 @@ def main(**extra_args): if __name__ == "__main__": + # Do not use nisext's dynamically updated install_requires + extra_setuptools_args.pop('install_requires', None) main(**extra_setuptools_args)