From 7eaec0afcf6ad7ccecc181e16b57139d840c5e92 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 01/17] Add .is_contiguous and .as_slice_no_order() --- src/dimension.rs | 47 ++++++++++++++++++++------------------------ src/impl_methods.rs | 48 +++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 2 +- tests/array.rs | 24 +++++++++++++++++++++++ tests/dimension.rs | 13 ++++++++++++ 5 files changed, 107 insertions(+), 27 deletions(-) diff --git a/src/dimension.rs b/src/dimension.rs index f7c768bc7..c07fa83d3 100644 --- a/src/dimension.rs +++ b/src/dimension.rs @@ -8,6 +8,7 @@ use std::cmp::Ordering; use std::fmt::Debug; use std::slice; +use itertools::free::enumerate; use super::{Si, Ix, Ixs}; use super::zipsl; @@ -25,24 +26,6 @@ fn stride_is_positive(stride: Ix) -> bool { (stride as Ixs) > 0 } -/// Return the axis ordering corresponding to the fastest variation -/// -/// Assumes that no stride value appears twice. This cannot yield the correct -/// result the strides are not positive. -fn fastest_varying_order(strides: &D) -> D { - let mut sorted = strides.clone(); - sorted.slice_mut().sort(); - let mut res = strides.clone(); - for (index, &val) in strides.slice().iter().enumerate() { - let sorted_ind = sorted.slice() - .iter() - .position(|&x| x == val) - .unwrap(); // cannot panic by construction - res.slice_mut()[sorted_ind] = index; - } - res -} - /// Check whether the given `dim` and `stride` lead to overlapping indices /// /// There is overlap if, when iterating through the dimensions in the order @@ -51,7 +34,7 @@ fn fastest_varying_order(strides: &D) -> D { /// /// The current implementation assumes strides to be positive pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool { - let order = fastest_varying_order(strides); + let order = strides._fastest_varying_stride_order(); let mut prev_offset = 1; for &index in order.slice().iter() { @@ -335,6 +318,21 @@ pub unsafe trait Dimension : Clone + Eq + Debug + Send + Sync { offset } + /// Return the axis ordering corresponding to the fastest variation + /// (in ascending order). + /// + /// Assumes that no stride value appears twice. This cannot yield the correct + /// result the strides are not positive. + #[doc(hidden)] + fn _fastest_varying_stride_order(&self) -> Self { + let mut indices = self.clone(); + for (i, elt) in enumerate(indices.slice_mut()) { + *elt = i; + } + let strides = self.slice(); + indices.slice_mut().sort_by_key(|&i| strides[i]); + indices + } } /// Implementation-specific extensions to `Dimension` @@ -484,6 +482,10 @@ unsafe impl Dimension for (Ix, Ix) { (self.1, 1) } + fn _fastest_varying_stride_order(&self) -> Self { + if self.0 as Ixs <= self.1 as Ixs { (0, 1) } else { (1, 0) } + } + #[inline] fn first_index(&self) -> Option<(Ix, Ix)> { let (m, n) = *self; @@ -742,13 +744,6 @@ mod test { use super::Dimension; use error::StrideError; - #[test] - fn fastest_varying_order() { - let strides = (2, 8, 4, 1); - let order = super::fastest_varying_order(&strides); - assert_eq!(order.slice(), &[3, 0, 2, 1]); - } - #[test] fn slice_indexing_uncommon_strides() { let v: Vec<_> = (0..12).collect(); diff --git a/src/impl_methods.rs b/src/impl_methods.rs index fa9e00227..d52a81f57 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -561,6 +561,27 @@ impl ArrayBase where S: Data, D: Dimension true } + fn is_contiguous(&self) -> bool { + let defaults = self.dim.default_strides(); + if self.ndim() == 0 || self.strides == defaults { + return true; + } + let order = self.strides._fastest_varying_stride_order(); + let strides = self.strides.slice(); + + // if any stride is negative + let dim = self.dim.slice(); + let mut cstride = 1; + for &i in order.slice() { + // a dimension of length 1 can have unequal strides + if dim[i] != 1 && strides[i] != cstride { + return false; + } + cstride *= dim[i]; + } + true + } + #[inline(always)] pub fn as_ptr(&self) -> *const A { self.ptr @@ -601,6 +622,33 @@ impl ArrayBase where S: Data, D: Dimension } } + /// Return the array’s data as a slice if it is contiguous, + /// return `None` otherwise. + pub fn as_slice_no_order(&self) -> Option<&[A]> { + if self.is_contiguous() { + unsafe { + Some(slice::from_raw_parts(self.ptr, self.len())) + } + } else { + None + } + } + + /// Return the array’s data as a slice if it is contiguous, + /// return `None` otherwise. + pub fn as_slice_mut_no_order(&mut self) -> Option<&mut [A]> + where S: DataMut + { + if self.is_contiguous() { + self.ensure_unique(); + unsafe { + Some(slice::from_raw_parts_mut(self.ptr, self.len())) + } + } else { + None + } + } + /// Transform the array into `shape`; any shape with the same number of /// elements is accepted. /// diff --git a/src/lib.rs b/src/lib.rs index 33f97b07d..43c938c6c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -506,7 +506,7 @@ impl ArrayBase where S: DataMut, F: FnMut(&mut A) { - if let Some(slc) = self.as_slice_mut() { + if let Some(slc) = self.as_slice_mut_no_order() { for elt in slc { f(elt); } diff --git a/tests/array.rs b/tests/array.rs index 9bdefd236..e931013a4 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -761,3 +761,27 @@ fn test_f_order() { let dupf = &f + &f; assert_eq!(dupc, dupf); } + +#[test] +fn test_contiguous() { + let c = arr3(&[[[1, 2, 3], + [4, 5, 6]], + [[4, 5, 6], + [7, 7, 7]]]); + assert!(c.is_standard_layout()); + assert!(c.as_slice_no_order().is_some()); + let v = c.slice(s![.., 0..1, ..]); + assert!(!v.is_standard_layout()); + assert!(!v.as_slice_no_order().is_some()); + + let v = c.slice(s![1..2, .., ..]); + assert!(v.is_standard_layout()); + assert!(v.as_slice_no_order().is_some()); + let v = v.reversed_axes(); + assert!(!v.is_standard_layout()); + assert!(v.as_slice_no_order().is_some()); + let mut v = v.reversed_axes(); + v.swap_axes(1, 2); + assert!(!v.is_standard_layout()); + assert!(v.as_slice_no_order().is_some()); +} diff --git a/tests/dimension.rs b/tests/dimension.rs index 0f423eb6f..58ee998fb 100644 --- a/tests/dimension.rs +++ b/tests/dimension.rs @@ -6,6 +6,7 @@ use ndarray::{ RemoveAxis, arr2, Axis, + Dimension, }; #[test] @@ -40,3 +41,15 @@ fn dyn_dimension() let z = OwnedArray::::zeros(dim.clone()); assert_eq!(z.shape(), &dim[..]); } + +#[test] +fn fastest_varying_order() { + let strides = (2, 8, 4, 1); + let order = strides._fastest_varying_stride_order(); + assert_eq!(order.slice(), &[3, 0, 2, 1]); + + assert_eq!((1, 3)._fastest_varying_stride_order(), (0, 1)); + assert_eq!((7, 2)._fastest_varying_stride_order(), (1, 0)); + assert_eq!((6, 1, 3)._fastest_varying_stride_order(), (1, 2, 0)); +} + From e3e5fd0954f9b4aeb0b3b86588a963c8d33c99af Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 02/17] Use as_slice_no_order in scalar_sum --- src/impl_numeric.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/impl_numeric.rs b/src/impl_numeric.rs index 98486fa92..171a3d0a6 100644 --- a/src/impl_numeric.rs +++ b/src/impl_numeric.rs @@ -62,7 +62,7 @@ impl ArrayBase pub fn scalar_sum(&self) -> A where A: Clone + Add + libnum::Zero, { - if let Some(slc) = self.as_slice() { + if let Some(slc) = self.as_slice_no_order() { return numeric_util::unrolled_sum(slc); } let mut sum = A::zero(); From ab1c8724880d2589bda3aa34757a65270ccc5adc Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 03/17] Add fast exit in is standard layout / is contiguous for 1d arrays --- src/impl_methods.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/impl_methods.rs b/src/impl_methods.rs index d52a81f57..ecc4c24c4 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -550,6 +550,7 @@ impl ArrayBase where S: Data, D: Dimension if self.strides == defaults { return true; } + if self.ndim() == 1 { return false; } // check all dimensions -- a dimension of length 1 can have unequal strides for (&dim, (&s, &ds)) in zipsl(self.dim.slice(), zipsl(self.strides(), defaults.slice())) @@ -563,13 +564,14 @@ impl ArrayBase where S: Data, D: Dimension fn is_contiguous(&self) -> bool { let defaults = self.dim.default_strides(); - if self.ndim() == 0 || self.strides == defaults { + if self.strides == defaults { return true; } + if self.ndim() == 1 { return false; } let order = self.strides._fastest_varying_stride_order(); let strides = self.strides.slice(); - // if any stride is negative + // FIXME: Negative strides let dim = self.dim.slice(); let mut cstride = 1; for &i in order.slice() { From db68593bcdefa4532e12f0fc8c61f9009aa0e405 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 04/17] Add _fastest_varying_stride_order specialization for 3d case --- src/dimension.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/dimension.rs b/src/dimension.rs index c07fa83d3..00ab5c61c 100644 --- a/src/dimension.rs +++ b/src/dimension.rs @@ -565,6 +565,27 @@ unsafe impl Dimension for (Ix, Ix, Ix) { let (s, t, u) = *strides; stride_offset(i, s) + stride_offset(j, t) + stride_offset(k, u) } + + fn _fastest_varying_stride_order(&self) -> Self { + let mut stride = *self; + let mut order = (0, 1, 2); + macro_rules! swap { + ($stride:expr, $order:expr, $x:expr, $y:expr) => { + if $stride[$x] > $stride[$y] { + $stride.swap($x, $y); + $order.swap($x, $y); + } + } + } + { + let order = order.slice_mut(); + let strides = stride.slice_mut(); + swap![strides, order, 0, 1]; + swap![strides, order, 0, 2]; + swap![strides, order, 1, 2]; + } + order + } } macro_rules! large_dim { From e698f60476cdbee3799c12d3047ca6b0769ae412 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 05/17] Add Send + Sync to NdFloat This is a non-breaking change because these traits have no methods (no naming conflicts can occur), and the only legal NdFloat impls are f32 and f64, so no impls can break. --- src/linalg.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/linalg.rs b/src/linalg.rs index a4c57e257..672f38d2d 100644 --- a/src/linalg.rs +++ b/src/linalg.rs @@ -54,7 +54,7 @@ impl LinalgScalar for T pub trait NdFloat : Float + fmt::Display + fmt::Debug + fmt::LowerExp + fmt::UpperExp + - ScalarOperand + LinalgScalar + ScalarOperand + LinalgScalar + Send + Sync { } /// Floating-point element types `f32` and `f64`. @@ -70,7 +70,7 @@ pub trait NdFloat : Float + AddAssign + SubAssign + MulAssign + DivAssign + RemAssign + fmt::Display + fmt::Debug + fmt::LowerExp + fmt::UpperExp + - ScalarOperand + LinalgScalar + ScalarOperand + LinalgScalar + Send + Sync { } impl NdFloat for f32 { } From fc0041eaa9225fcf1981df60ca85c4db692c735b Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 06/17] Update Display for ShapeError --- src/error.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/error.rs b/src/error.rs index e5ff22b92..16fec770c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -87,13 +87,13 @@ impl Error for ShapeError { impl fmt::Display for ShapeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.description().fmt(f) + write!(f, "ShapeError/{:?}: {}", self.kind(), self.description()) } } impl fmt::Debug for ShapeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ShapeError {:?}: {}", self.kind(), self.description()) + write!(f, "ShapeError/{:?}: {}", self.kind(), self.description()) } } From 8c2c835bcfe39f2890882af4afa8282b59a35fa4 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 07/17] Add tests for from_vec_dim_stride --- tests/array.rs | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/tests/array.rs b/tests/array.rs index e931013a4..7160d7a21 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -12,6 +12,7 @@ use ndarray::{arr0, arr1, arr2, arr3, aview1, aview2, aview_mut1, + Dimension, }; use ndarray::Indexes; use ndarray::Axis; @@ -424,6 +425,92 @@ fn owned_array_with_stride() { assert_eq!(a.strides(), &[1, 4, 2]); } +macro_rules! assert_matches { + ($value:expr, $pat:pat) => { + match $value { + $pat => {} + ref err => panic!("assertion failed: `{}` matches `{}` found: {:?}", + stringify!($value), stringify!($pat), err), + } + } +} + +#[test] +fn from_vec_dim_stride_empty_1d() { + let empty: [f32; 0] = []; + assert_matches!(OwnedArray::from_vec_dim_stride(0, 1, empty.to_vec()), + Ok(_)); +} + +#[test] +fn from_vec_dim_stride_0d() { + let empty: [f32; 0] = []; + let one = [1.]; + let two = [1., 2.]; + // too few elements + assert_matches!(OwnedArray::from_vec_dim_stride((), (), empty.to_vec()), Err(_)); + // exact number of elements + assert_matches!(OwnedArray::from_vec_dim_stride((), (), one.to_vec()), Ok(_)); + // too many are ok + assert_matches!(OwnedArray::from_vec_dim_stride((), (), two.to_vec()), Ok(_)); +} + +#[test] +fn from_vec_dim_stride_2d_1() { + let two = [1., 2.]; + let d = (2, 1); + let s = d.default_strides(); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, two.to_vec()), Ok(_)); +} + +#[test] +fn from_vec_dim_stride_2d_2() { + let two = [1., 2.]; + let d = (1, 2); + let s = d.default_strides(); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, two.to_vec()), Ok(_)); +} + +#[test] +fn from_vec_dim_stride_2d_3() { + let a = arr3(&[[[1]], + [[2]], + [[3]]]); + let d = a.dim(); + let s = d.default_strides(); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, a.as_slice().unwrap().to_vec()), Ok(_)); +} + +#[test] +fn from_vec_dim_stride_2d_4() { + let a = arr3(&[[[1]], + [[2]], + [[3]]]); + let d = a.dim(); + let s = d.fortran_strides(); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, a.as_slice().unwrap().to_vec()), Ok(_)); +} + +#[test] +fn from_vec_dim_stride_2d_5() { + let a = arr3(&[[[1, 2, 3]]]); + let d = a.dim(); + let s = d.fortran_strides(); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, a.as_slice().unwrap().to_vec()), Ok(_)); +} + +#[test] +fn from_vec_dim_stride_2d_rejects() { + let two = [1., 2.]; + let d = (2, 2); + let s = (1, 0); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, two.to_vec()), Err(_)); + + let d = (2, 2); + let s = (0, 1); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, two.to_vec()), Err(_)); +} + #[test] fn views() { let a = RcArray::from_vec(vec![1, 2, 3, 4]).reshape((2, 2)); From c8e0528b75f6455840bae38d24c68e00d3798066 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 08/17] Fix dim_stride_overlap for axes with length 1 --- src/dimension.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/dimension.rs b/src/dimension.rs index 00ab5c61c..b56220677 100644 --- a/src/dimension.rs +++ b/src/dimension.rs @@ -36,13 +36,17 @@ fn stride_is_positive(stride: Ix) -> bool { pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool { let order = strides._fastest_varying_stride_order(); + let dim = dim.slice(); + let strides = strides.slice(); let mut prev_offset = 1; - for &index in order.slice().iter() { - let s = strides.slice()[index]; - if (s as isize) < prev_offset { + for &index in order.slice() { + let d = dim[index]; + let s = strides[index]; + // any stride is ok if dimension is 1 + if d != 1 && (s as isize) < prev_offset { return true; } - prev_offset = stride_offset(dim.slice()[index], s); + prev_offset = stride_offset(d, s); } false } From 051ed25af4ef58da176117eaf9966afc2ac5e075 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 09/17] Add more from_vec_dim_stride tests --- tests/array.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/array.rs b/tests/array.rs index 7160d7a21..feb4d8aeb 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -499,6 +499,18 @@ fn from_vec_dim_stride_2d_5() { assert_matches!(OwnedArray::from_vec_dim_stride(d, s, a.as_slice().unwrap().to_vec()), Ok(_)); } +#[test] +fn from_vec_dim_stride_2d_6() { + let a = [1., 2., 3., 4., 5., 6.]; + let d = (2, 1, 1); + let s = (2, 2, 1); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, a.to_vec()), Ok(_)); + + let d = (1, 2, 1); + let s = (2, 2, 1); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, a.to_vec()), Ok(_)); +} + #[test] fn from_vec_dim_stride_2d_rejects() { let two = [1., 2.]; From 1a095f511ea11e6c21a02370f51d9340897c0140 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 10/17] Fix can_index_slice for arrays with no elements --- src/dimension.rs | 59 +++++++++++++++++++++++++----------------------- tests/array.rs | 19 ++++++++++++++++ 2 files changed, 50 insertions(+), 28 deletions(-) diff --git a/src/dimension.rs b/src/dimension.rs index b56220677..c66e53c19 100644 --- a/src/dimension.rs +++ b/src/dimension.rs @@ -20,12 +20,6 @@ pub fn stride_offset(n: Ix, stride: Ix) -> isize { (n as isize) * ((stride as Ixs) as isize) } -/// Check whether `stride` is strictly positive -#[inline] -fn stride_is_positive(stride: Ix) -> bool { - (stride as Ixs) > 0 -} - /// Check whether the given `dim` and `stride` lead to overlapping indices /// /// There is overlap if, when iterating through the dimensions in the order @@ -61,33 +55,42 @@ pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool { pub fn can_index_slice(data: &[A], dim: &D, strides: &D) -> Result<(), ShapeError> { - if strides.slice().iter().cloned().all(stride_is_positive) { - if dim.size_checked().is_none() { + // check lengths of axes. + let len = match dim.size_checked() { + Some(l) => l, + None => return Err(from_kind(ErrorKind::OutOfBounds)), + }; + // check if strides are strictly positive (zero ok for len 0) + for &s in strides.slice() { + let s = s as Ixs; + if s < 1 && (len != 0 || s < 0) { + return Err(from_kind(ErrorKind::Unsupported)); + } + } + if len == 0 { + return Ok(()); + } + // check that the maximum index is in bounds + let mut last_index = dim.clone(); + for mut index in last_index.slice_mut().iter_mut() { + *index -= 1; + } + if let Some(offset) = stride_offset_checked_arithmetic(dim, + strides, + &last_index) + { + // offset is guaranteed to be positive so no issue converting + // to usize here + if (offset as usize) >= data.len() { return Err(from_kind(ErrorKind::OutOfBounds)); } - let mut last_index = dim.clone(); - for mut index in last_index.slice_mut().iter_mut() { - *index -= 1; - } - if let Some(offset) = stride_offset_checked_arithmetic(dim, - strides, - &last_index) - { - // offset is guaranteed to be positive so no issue converting - // to usize here - if (offset as usize) >= data.len() { - return Err(from_kind(ErrorKind::OutOfBounds)); - } - if dim_stride_overlap(dim, strides) { - return Err(from_kind(ErrorKind::Unsupported)); - } - } else { - return Err(from_kind(ErrorKind::OutOfBounds)); + if dim_stride_overlap(dim, strides) { + return Err(from_kind(ErrorKind::Unsupported)); } - Ok(()) } else { - return Err(from_kind(ErrorKind::Unsupported)); + return Err(from_kind(ErrorKind::OutOfBounds)); } + Ok(()) } /// Return stride offset for this dimension and index. diff --git a/tests/array.rs b/tests/array.rs index feb4d8aeb..279d06f59 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -511,6 +511,25 @@ fn from_vec_dim_stride_2d_6() { assert_matches!(OwnedArray::from_vec_dim_stride(d, s, a.to_vec()), Ok(_)); } +#[test] +fn from_vec_dim_stride_2d_7() { + // empty arrays can have 0 strides + let a: [f32; 0] = []; + // [[]] shape=[4, 0], strides=[0, 1] + let d = (4, 0); + let s = (0, 1); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, a.to_vec()), Ok(_)); +} + +#[test] +fn from_vec_dim_stride_2d_8() { + // strides must be strictly positive (nonzero) + let a = [1.]; + let d = (1, 1); + let s = (0, 1); + assert_matches!(OwnedArray::from_vec_dim_stride(d, s, a.to_vec()), Err(_)); +} + #[test] fn from_vec_dim_stride_2d_rejects() { let two = [1., 2.]; From 1efd0829cde668da5172f73d6c4ffd7203840b26 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:09:42 +0100 Subject: [PATCH 11/17] Fix _fastest_varying_stride_order to always be a stable sort order This adds a test that ensures order indices are unique even when strides are equal. --- src/dimension.rs | 3 ++- tests/dimension.rs | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/dimension.rs b/src/dimension.rs index c66e53c19..c81881f6b 100644 --- a/src/dimension.rs +++ b/src/dimension.rs @@ -585,10 +585,11 @@ unsafe impl Dimension for (Ix, Ix, Ix) { } } { + // stable sorting network for 3 elements let order = order.slice_mut(); let strides = stride.slice_mut(); + swap![strides, order, 1, 2]; swap![strides, order, 0, 1]; - swap![strides, order, 0, 2]; swap![strides, order, 1, 2]; } order diff --git a/tests/dimension.rs b/tests/dimension.rs index 58ee998fb..cbc843f3e 100644 --- a/tests/dimension.rs +++ b/tests/dimension.rs @@ -51,5 +51,11 @@ fn fastest_varying_order() { assert_eq!((1, 3)._fastest_varying_stride_order(), (0, 1)); assert_eq!((7, 2)._fastest_varying_stride_order(), (1, 0)); assert_eq!((6, 1, 3)._fastest_varying_stride_order(), (1, 2, 0)); + + // it's important that it produces distinct indices. Prefer the stable order + // where 0 is before 1 when they are equal. + assert_eq!((2, 2)._fastest_varying_stride_order(), (0, 1)); + assert_eq!((2, 2, 1)._fastest_varying_stride_order(), (2, 0, 1)); + assert_eq!((2, 2, 3, 1, 2)._fastest_varying_stride_order(), (3, 0, 1, 4, 2)); } From b28bf52796ad1f01909b4a3805243e96495a4cca Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:27:56 +0100 Subject: [PATCH 12/17] Add benchmarks for .assign_scalar(&0.) Should be memset but is not in current rust nightly. --- benches/bench1.rs | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/benches/bench1.rs b/benches/bench1.rs index 382ec94fb..6afc442b9 100644 --- a/benches/bench1.rs +++ b/benches/bench1.rs @@ -373,12 +373,12 @@ fn muladd_2d_f32_blas(bench: &mut test::Bencher) } #[bench] -fn assign_scalar_2d_large(bench: &mut test::Bencher) +fn assign_scalar_2d_corder(bench: &mut test::Bencher) { let a = OwnedArray::zeros((64, 64)); let mut a = black_box(a); let s = 3.; - bench.iter(|| a.assign_scalar(&s)) + bench.iter(move || a.assign_scalar(&s)) } #[bench] @@ -388,26 +388,43 @@ fn assign_scalar_2d_cutout(bench: &mut test::Bencher) let a = a.slice_mut(s![1..-1, 1..-1]); let mut a = black_box(a); let s = 3.; - bench.iter(|| a.assign_scalar(&s)) + bench.iter(move || a.assign_scalar(&s)) } #[bench] -fn assign_scalar_2d_transposed_large(bench: &mut test::Bencher) +fn assign_scalar_2d_forder(bench: &mut test::Bencher) { let mut a = OwnedArray::zeros((64, 64)); a.swap_axes(0, 1); let mut a = black_box(a); let s = 3.; - bench.iter(|| a.assign_scalar(&s)) + bench.iter(move || a.assign_scalar(&s)) } #[bench] -fn assign_scalar_2d_raw_large(bench: &mut test::Bencher) +fn assign_zero_2d_corder(bench: &mut test::Bencher) { let a = OwnedArray::zeros((64, 64)); let mut a = black_box(a); - let s = 3.; - bench.iter(|| for elt in a.raw_data_mut() { *elt = s; }); + bench.iter(|| a.assign_scalar(&0.)) +} + +#[bench] +fn assign_zero_2d_cutout(bench: &mut test::Bencher) +{ + let mut a = OwnedArray::zeros((66, 66)); + let a = a.slice_mut(s![1..-1, 1..-1]); + let mut a = black_box(a); + bench.iter(|| a.assign_scalar(&0.)) +} + +#[bench] +fn assign_zero_2d_forder(bench: &mut test::Bencher) +{ + let mut a = OwnedArray::zeros((64, 64)); + a.swap_axes(0, 1); + let mut a = black_box(a); + bench.iter(|| a.assign_scalar(&0.)) } #[bench] From 87caeccf1ea063b68d617f0ea5020a1afa347442 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 18:33:09 +0100 Subject: [PATCH 13/17] Rename as_slice_no_order to as_slice_memory_order --- src/impl_methods.rs | 20 ++++++++++++++------ src/impl_numeric.rs | 2 +- src/lib.rs | 2 +- tests/array.rs | 10 +++++----- 4 files changed, 21 insertions(+), 13 deletions(-) diff --git a/src/impl_methods.rs b/src/impl_methods.rs index ecc4c24c4..1bd4769b1 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -597,8 +597,11 @@ impl ArrayBase where S: Data, D: Dimension self.ptr } - /// Return the array’s data as a slice, if it is contiguous and - /// the element order corresponds to the memory order. Return `None` otherwise. + /// Return the array’s data as a slice, if it is contiguous and in standard order. + /// Return `None` otherwise. + /// + /// If this function returns `Some(_)`, then the element order in the slice + /// corresponds to the logical order of the array’s elements. pub fn as_slice(&self) -> Option<&[A]> { if self.is_standard_layout() { unsafe { @@ -609,8 +612,8 @@ impl ArrayBase where S: Data, D: Dimension } } - /// Return the array’s data as a slice, if it is contiguous and - /// the element order corresponds to the memory order. Return `None` otherwise. + /// Return the array’s data as a slice, if it is contiguous and in standard order. + /// Return `None` otherwise. pub fn as_slice_mut(&mut self) -> Option<&mut [A]> where S: DataMut { @@ -626,7 +629,12 @@ impl ArrayBase where S: Data, D: Dimension /// Return the array’s data as a slice if it is contiguous, /// return `None` otherwise. - pub fn as_slice_no_order(&self) -> Option<&[A]> { + /// + /// If this function returns `Some(_)`, then the elements in the slice + /// have whatever order the elements have in memory. + /// + /// Implementation notes: Does not yet support negatively strided arrays. + pub fn as_slice_memory_order(&self) -> Option<&[A]> { if self.is_contiguous() { unsafe { Some(slice::from_raw_parts(self.ptr, self.len())) @@ -638,7 +646,7 @@ impl ArrayBase where S: Data, D: Dimension /// Return the array’s data as a slice if it is contiguous, /// return `None` otherwise. - pub fn as_slice_mut_no_order(&mut self) -> Option<&mut [A]> + pub fn as_slice_memory_order_mut(&mut self) -> Option<&mut [A]> where S: DataMut { if self.is_contiguous() { diff --git a/src/impl_numeric.rs b/src/impl_numeric.rs index 171a3d0a6..601128c0a 100644 --- a/src/impl_numeric.rs +++ b/src/impl_numeric.rs @@ -62,7 +62,7 @@ impl ArrayBase pub fn scalar_sum(&self) -> A where A: Clone + Add + libnum::Zero, { - if let Some(slc) = self.as_slice_no_order() { + if let Some(slc) = self.as_slice_memory_order() { return numeric_util::unrolled_sum(slc); } let mut sum = A::zero(); diff --git a/src/lib.rs b/src/lib.rs index 43c938c6c..df7fcd081 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -506,7 +506,7 @@ impl ArrayBase where S: DataMut, F: FnMut(&mut A) { - if let Some(slc) = self.as_slice_mut_no_order() { + if let Some(slc) = self.as_slice_memory_order_mut() { for elt in slc { f(elt); } diff --git a/tests/array.rs b/tests/array.rs index 279d06f59..cf14d9307 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -887,19 +887,19 @@ fn test_contiguous() { [[4, 5, 6], [7, 7, 7]]]); assert!(c.is_standard_layout()); - assert!(c.as_slice_no_order().is_some()); + assert!(c.as_slice_memory_order().is_some()); let v = c.slice(s![.., 0..1, ..]); assert!(!v.is_standard_layout()); - assert!(!v.as_slice_no_order().is_some()); + assert!(!v.as_slice_memory_order().is_some()); let v = c.slice(s![1..2, .., ..]); assert!(v.is_standard_layout()); - assert!(v.as_slice_no_order().is_some()); + assert!(v.as_slice_memory_order().is_some()); let v = v.reversed_axes(); assert!(!v.is_standard_layout()); - assert!(v.as_slice_no_order().is_some()); + assert!(v.as_slice_memory_order().is_some()); let mut v = v.reversed_axes(); v.swap_axes(1, 2); assert!(!v.is_standard_layout()); - assert!(v.as_slice_no_order().is_some()); + assert!(v.as_slice_memory_order().is_some()); } From 5d4f72d71452963ff510902785929cca3fc50574 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 19:16:29 +0100 Subject: [PATCH 14/17] Make _fastest_varying_stride_order inlinable --- src/dimension.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/dimension.rs b/src/dimension.rs index c81881f6b..c22540bd3 100644 --- a/src/dimension.rs +++ b/src/dimension.rs @@ -489,6 +489,7 @@ unsafe impl Dimension for (Ix, Ix) { (self.1, 1) } + #[inline] fn _fastest_varying_stride_order(&self) -> Self { if self.0 as Ixs <= self.1 as Ixs { (0, 1) } else { (1, 0) } } @@ -573,6 +574,7 @@ unsafe impl Dimension for (Ix, Ix, Ix) { stride_offset(i, s) + stride_offset(j, t) + stride_offset(k, u) } + #[inline] fn _fastest_varying_stride_order(&self) -> Self { let mut stride = *self; let mut order = (0, 1, 2); From 9cd2a631cba3fc0bb82c1e6278d2a848829c86f7 Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 19:50:34 +0100 Subject: [PATCH 15/17] Deprecte raw_data, raw_data_mut in favour of as_slice_memory_order --- benches/bench1.rs | 4 ++-- src/impl_methods.rs | 2 ++ tests/array.rs | 15 +++++++++------ 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/benches/bench1.rs b/benches/bench1.rs index 6afc442b9..992143b74 100644 --- a/benches/bench1.rs +++ b/benches/bench1.rs @@ -48,7 +48,7 @@ fn sum_1d_raw(bench: &mut test::Bencher) let a = black_box(a); bench.iter(|| { let mut sum = 0; - for &elt in a.raw_data() { + for &elt in a.as_slice_memory_order().unwrap() { sum += elt; } sum @@ -93,7 +93,7 @@ fn sum_2d_raw(bench: &mut test::Bencher) let a = black_box(a); bench.iter(|| { let mut sum = 0; - for &elt in a.raw_data() { + for &elt in a.as_slice_memory_order().unwrap() { sum += elt; } sum diff --git a/src/impl_methods.rs b/src/impl_methods.rs index 1bd4769b1..260364e74 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -878,6 +878,7 @@ impl ArrayBase where S: Data, D: Dimension /// **Note:** Data memory order may not correspond to the index order /// of the array. Neither is the raw data slice is restricted to just the /// array’s view.
+ #[cfg_attr(has_deprecated, deprecated(note="Use .as_slice_memory_order() instead"))] pub fn raw_data(&self) -> &[A] where S: DataOwned, { @@ -892,6 +893,7 @@ impl ArrayBase where S: Data, D: Dimension /// /// **Note:** The data is uniquely held and nonaliased /// while it is mutably borrowed. + #[cfg_attr(has_deprecated, deprecated(note="Use .as_slice_memory_order_mut() instead"))] pub fn raw_data_mut(&mut self) -> &mut [A] where S: DataOwned + DataMut, { diff --git a/tests/array.rs b/tests/array.rs index cf14d9307..a7ed8f523 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -7,7 +7,9 @@ extern crate itertools; use ndarray::{RcArray, S, Si, OwnedArray, }; -use ndarray::{arr0, arr1, arr2, arr3, +use ndarray::{ + rcarr2, + arr0, arr1, arr2, arr3, aview0, aview1, aview2, @@ -244,8 +246,8 @@ fn swapaxes() assert_eq!(a, b); a.swap_axes(1, 1); assert_eq!(a, b); - assert!(a.raw_data() == [1., 2., 3., 4.]); - assert!(b.raw_data() == [1., 3., 2., 4.]); + assert_eq!(a.as_slice_memory_order(), Some(&[1., 2., 3., 4.][..])); + assert_eq!(b.as_slice_memory_order(), Some(&[1., 3., 2., 4.][..])); } #[test] @@ -380,11 +382,12 @@ fn map1() } #[test] -fn raw_data_mut() +fn as_slice_memory_order() { - let a = arr2(&[[1., 2.], [3., 4.0f32]]); + // test that mutation breaks sharing + let a = rcarr2(&[[1., 2.], [3., 4.0f32]]); let mut b = a.clone(); - for elt in b.raw_data_mut() { + for elt in b.as_slice_memory_order_mut().unwrap() { *elt = 0.; } assert!(a != b, "{:?} != {:?}", a, b); From 13574d5deaa230cc6413a00196d570d74bb8382e Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 19:56:21 +0100 Subject: [PATCH 16/17] Edit docs to mention memory order vs logical order --- src/lib.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index df7fcd081..51a2bd971 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -237,11 +237,14 @@ pub type Ixs = isize; /// for element indices in `.get()` and `array[index]`. The dimension type `Vec` /// allows a dynamic number of axes. /// -/// The default memory order of an array is *row major* order, where each -/// row is contiguous in memory. -/// A *column major* (a.k.a. fortran) memory order array has +/// The default memory order of an array is *row major* order (a.k.a ”c” order), +/// where each row is contiguous in memory. +/// A *column major* (a.k.a. “f” or fortran) memory order array has /// columns (or, in general, the outermost axis) with contiguous elements. /// +/// The logical order of any array’s elements is the row major order. +/// The iterators `.iter(), .iter_mut()` always adhere to this order, for example. +/// /// ## Slicing /// /// You can use slicing to create a view of a subset of the data in From d363b0acb71f08557c7f118bd64d69187f06d0de Mon Sep 17 00:00:00 2001 From: bluss Date: Sun, 13 Mar 2016 20:05:17 +0100 Subject: [PATCH 17/17] Edit docs for .as_ptr() --- src/impl_methods.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/impl_methods.rs b/src/impl_methods.rs index 260364e74..7372b8be0 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -584,11 +584,21 @@ impl ArrayBase where S: Data, D: Dimension true } + /// Return a pointer to the first element in the array. + /// + /// Raw access to array elements needs to follow the strided indexing + /// scheme: an element at multi-index *I* in an array with strides *S* is + /// located at offset + /// + /// *Σ0 ≤ k < d Ik × Sk* + /// + /// where *d* is `self.ndim()`. #[inline(always)] pub fn as_ptr(&self) -> *const A { self.ptr } + /// Return a mutable pointer to the first element in the array. #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut A where S: DataMut