@@ -27,6 +27,14 @@ def convert_strides_to_counts(strides, itemsize):
27
27
np .empty (3 , dtype = np .int32 ),
28
28
np .empty ((6 , 6 ), dtype = np .float64 )[::2 , ::2 ],
29
29
np .empty ((3 , 4 ), order = "F" ),
30
+ np .empty ((), dtype = np .float16 ),
31
+ # readonly is fixed recently (numpy/numpy#26501)
32
+ pytest .param (
33
+ np .frombuffer (b"" ),
34
+ marks = pytest .mark .skipif (
35
+ tuple (int (i ) for i in np .__version__ .split ("." )[:2 ]) < (2 , 1 ), reason = "need numpy 2.1.0+"
36
+ ),
37
+ ),
30
38
),
31
39
)
32
40
class TestViewCPU :
@@ -57,22 +65,23 @@ def _check_view(self, view, in_arr):
57
65
assert view .device_id == - 1
58
66
assert view .is_device_accessible is False
59
67
assert view .exporting_obj is in_arr
68
+ assert view .readonly is not in_arr .flags .writeable
60
69
61
70
62
71
def gpu_array_samples ():
63
72
# TODO: this function would initialize the device at test collection time
64
73
samples = []
65
74
if cp is not None :
66
75
samples += [
67
- (cp .empty (3 , dtype = cp .complex64 ), None ),
76
+ (cp .empty (3 , dtype = cp .complex64 ), False ),
68
77
(cp .empty ((6 , 6 ), dtype = cp .float64 )[::2 , ::2 ], True ),
69
78
(cp .empty ((3 , 4 ), order = "F" ), True ),
70
79
]
71
80
# Numba's device_array is the only known array container that does not
72
81
# support DLPack (so that we get to test the CAI coverage).
73
82
if numba_cuda is not None :
74
83
samples += [
75
- (numba_cuda .device_array ((2 ,), dtype = np .int8 ), None ),
84
+ (numba_cuda .device_array ((2 ,), dtype = np .int8 ), False ),
76
85
(numba_cuda .device_array ((4 , 2 ), dtype = np .float32 ), True ),
77
86
]
78
87
return samples
@@ -86,14 +95,14 @@ def gpu_array_ptr(arr):
86
95
raise NotImplementedError (f"{ arr = } " )
87
96
88
97
89
- @pytest .mark .parametrize ("in_arr,stream " , (* gpu_array_samples (),))
98
+ @pytest .mark .parametrize ("in_arr,use_stream " , (* gpu_array_samples (),))
90
99
class TestViewGPU :
91
- def test_args_viewable_as_strided_memory_gpu (self , in_arr , stream ):
100
+ def test_args_viewable_as_strided_memory_gpu (self , in_arr , use_stream ):
92
101
# TODO: use the device fixture?
93
102
dev = Device ()
94
103
dev .set_current ()
95
104
# This is the consumer stream
96
- s = dev .create_stream () if stream else None
105
+ s = dev .create_stream () if use_stream else None
97
106
98
107
@args_viewable_as_strided_memory ((0 ,))
99
108
def my_func (arr ):
@@ -102,12 +111,12 @@ def my_func(arr):
102
111
103
112
my_func (in_arr )
104
113
105
- def test_strided_memory_view_cpu (self , in_arr , stream ):
114
+ def test_strided_memory_view_cpu (self , in_arr , use_stream ):
106
115
# TODO: use the device fixture?
107
116
dev = Device ()
108
117
dev .set_current ()
109
118
# This is the consumer stream
110
- s = dev .create_stream () if stream else None
119
+ s = dev .create_stream () if use_stream else None
111
120
112
121
view = StridedMemoryView (in_arr , stream_ptr = s .handle if s else - 1 )
113
122
self ._check_view (view , in_arr , dev )
@@ -125,3 +134,4 @@ def _check_view(self, view, in_arr, dev):
125
134
assert view .device_id == dev .device_id
126
135
assert view .is_device_accessible is True
127
136
assert view .exporting_obj is in_arr
137
+ # can't test view.readonly with CuPy or Numba...
0 commit comments