Skip to content

Commit 2cfe81f

Browse files
author
Steffen Larsen
committed
[SYCL][CUDA] Fixes uses of PI descriptors after renaming
Signed-off-by: Steffen Larsen <[email protected]>
1 parent 52a63fa commit 2cfe81f

File tree

3 files changed

+33
-31
lines changed

3 files changed

+33
-31
lines changed

sycl/plugins/cuda/pi_cuda.cpp

Lines changed: 30 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -691,6 +691,8 @@ pi_result cuda_piContextGetInfo(pi_context context, pi_context_info param_name,
691691
case PI_CONTEXT_INFO_REFERENCE_COUNT:
692692
return getInfo(param_value_size, param_value, param_value_size_ret,
693693
context->get_reference_count());
694+
default:
695+
PI_HANDLE_UNKNOWN_PARAM_NAME(param_name);
694696
}
695697

696698
return PI_OUT_OF_RESOURCES;
@@ -1010,7 +1012,7 @@ pi_result cuda_piDeviceGetInfo(pi_device device, pi_device_info param_name,
10101012
}
10111013
case PI_DEVICE_INFO_LOCAL_MEM_TYPE: {
10121014
return getInfo(param_value_size, param_value, param_value_size_ret,
1013-
PI_LOCAL_MEM_TYPE_LOCAL);
1015+
PI_DEVICE_LOCAL_MEM_TYPE_LOCAL);
10141016
}
10151017
case PI_DEVICE_INFO_LOCAL_MEM_SIZE: {
10161018
// OpenCL's "local memory" maps most closely to CUDA's "shared memory".
@@ -1051,16 +1053,16 @@ pi_result cuda_piDeviceGetInfo(pi_device device, pi_device_info param_name,
10511053
return getInfo(param_value_size, param_value, param_value_size_ret,
10521054
size_t{1000u});
10531055
}
1054-
case PI_DEVICE_INFO_IS_ENDIAN_LITTLE: {
1056+
case PI_DEVICE_INFO_ENDIAN_LITTLE: {
10551057
return getInfo(param_value_size, param_value, param_value_size_ret, true);
10561058
}
1057-
case PI_DEVICE_INFO_IS_AVAILABLE: {
1059+
case PI_DEVICE_INFO_AVAILABLE: {
10581060
return getInfo(param_value_size, param_value, param_value_size_ret, true);
10591061
}
1060-
case PI_DEVICE_INFO_IS_COMPILER_AVAILABLE: {
1062+
case PI_DEVICE_INFO_COMPILER_AVAILABLE: {
10611063
return getInfo(param_value_size, param_value, param_value_size_ret, true);
10621064
}
1063-
case PI_DEVICE_INFO_IS_LINKER_AVAILABLE: {
1065+
case PI_DEVICE_INFO_LINKER_AVAILABLE: {
10641066
return getInfo(param_value_size, param_value, param_value_size_ret, true);
10651067
}
10661068
case PI_DEVICE_INFO_EXECUTION_CAPABILITIES: {
@@ -1630,8 +1632,8 @@ pi_result cuda_piEnqueueMemBufferWrite(pi_queue command_queue, pi_mem buffer,
16301632
event_wait_list, nullptr);
16311633

16321634
if (event) {
1633-
retImplEv = std::unique_ptr<_pi_event>(
1634-
_pi_event::make_native(PI_COMMAND_MEMBUFFER_WRITE, command_queue));
1635+
retImplEv = std::unique_ptr<_pi_event>(_pi_event::make_native(
1636+
PI_COMMAND_TYPE_MEM_BUFFER_WRITE, command_queue));
16351637
retImplEv->start();
16361638
}
16371639

@@ -1675,8 +1677,8 @@ pi_result cuda_piEnqueueMemBufferRead(pi_queue command_queue, pi_mem buffer,
16751677
event_wait_list, nullptr);
16761678

16771679
if (retEvent) {
1678-
retImplEv = std::unique_ptr<_pi_event>(
1679-
_pi_event::make_native(PI_COMMAND_MEMBUFFER_READ, command_queue));
1680+
retImplEv = std::unique_ptr<_pi_event>(_pi_event::make_native(
1681+
PI_COMMAND_TYPE_MEM_BUFFER_READ, command_queue));
16801682
retImplEv->start();
16811683
}
16821684

@@ -1915,8 +1917,8 @@ pi_result cuda_piEnqueueKernelLaunch(
19151917
auto argIndices = kernel->get_arg_indices();
19161918

19171919
if (event) {
1918-
retImplEv = std::unique_ptr<_pi_event>(
1919-
_pi_event::make_native(PI_COMMAND_KERNEL_LAUNCH, command_queue));
1920+
retImplEv = std::unique_ptr<_pi_event>(_pi_event::make_native(
1921+
PI_COMMAND_TYPE_NDRANGE_KERNEL, command_queue));
19201922
retImplEv->start();
19211923
}
19221924

@@ -2153,15 +2155,15 @@ pi_result cuda_piKernelGetGroupInfo(pi_kernel kernel, pi_device device,
21532155
if (kernel != nullptr) {
21542156

21552157
switch (param_name) {
2156-
case PI_KERNEL_GROUP_INFO_SIZE: {
2158+
case PI_KERNEL_GROUP_INFO_WORK_GROUP_SIZE: {
21572159
int max_threads = 0;
21582160
cl::sycl::detail::pi::assertion(cuFuncGetAttribute(&max_threads,
21592161
CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK,
21602162
kernel->get()) == CUDA_SUCCESS);
21612163
return getInfo(param_value_size, param_value, param_value_size_ret,
21622164
size_t(max_threads));
21632165
}
2164-
case PI_KERNEL_COMPILE_GROUP_INFO_SIZE: {
2166+
case PI_KERNEL_GROUP_INFO_COMPILE_WORK_GROUP_SIZE: {
21652167
// Returns the work-group size specified in the kernel source or IL.
21662168
// If the work-group size is not specified in the kernel source or IL,
21672169
// (0, 0, 0) is returned.
@@ -2172,7 +2174,7 @@ pi_result cuda_piKernelGetGroupInfo(pi_kernel kernel, pi_device device,
21722174
return getInfoArray(3, param_value_size, param_value,
21732175
param_value_size_ret, group_size);
21742176
}
2175-
case PI_KERNEL_LOCAL_MEM_SIZE: {
2177+
case PI_KERNEL_GROUP_INFO_LOCAL_MEM_SIZE: {
21762178
// OpenCL LOCAL == CUDA SHARED
21772179
int bytes = 0;
21782180
cl::sycl::detail::pi::assertion(cuFuncGetAttribute(&bytes,
@@ -2181,7 +2183,7 @@ pi_result cuda_piKernelGetGroupInfo(pi_kernel kernel, pi_device device,
21812183
return getInfo(param_value_size, param_value, param_value_size_ret,
21822184
pi_uint64(bytes));
21832185
}
2184-
case PI_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE: {
2186+
case PI_KERNEL_GROUP_INFO_PREFERRED_WORK_GROUP_SIZE_MULTIPLE: {
21852187
// Work groups should be multiples of the warp size
21862188
int warpSize = 0;
21872189
cl::sycl::detail::pi::assertion(
@@ -2190,7 +2192,7 @@ pi_result cuda_piKernelGetGroupInfo(pi_kernel kernel, pi_device device,
21902192
return getInfo(param_value_size, param_value, param_value_size_ret,
21912193
static_cast<size_t>(warpSize));
21922194
}
2193-
case PI_KERNEL_PRIVATE_MEM_SIZE: {
2195+
case PI_KERNEL_GROUP_INFO_PRIVATE_MEM_SIZE: {
21942196
// OpenCL PRIVATE == CUDA LOCAL
21952197
int bytes = 0;
21962198
cl::sycl::detail::pi::assertion(
@@ -2279,7 +2281,7 @@ pi_result cuda_piEventGetInfo(pi_event event, pi_event_info param_name,
22792281
assert(event != nullptr);
22802282

22812283
switch (param_name) {
2282-
case PI_EVENT_INFO_QUEUE:
2284+
case PI_EVENT_INFO_COMMAND_QUEUE:
22832285
return getInfo<pi_queue>(param_value_size, param_value,
22842286
param_value_size_ret, event->get_queue());
22852287
case PI_EVENT_INFO_COMMAND_TYPE:
@@ -2456,7 +2458,7 @@ pi_result cuda_piEnqueueEventsWait(pi_queue command_queue,
24562458

24572459
if (event) {
24582460
auto new_event =
2459-
_pi_event::make_native(PI_COMMAND_EVENTS_WAIT, command_queue);
2461+
_pi_event::make_native(PI_COMMAND_TYPE_MARKER, command_queue);
24602462
new_event->start();
24612463
new_event->record();
24622464
*event = new_event;
@@ -2550,8 +2552,8 @@ pi_result cuda_piEnqueueMemBufferReadRect(
25502552
event_wait_list, nullptr);
25512553

25522554
if (retEvent) {
2553-
retImplEv = std::unique_ptr<_pi_event>(
2554-
_pi_event::make_native(PI_COMMAND_MEMBUFFER_READ, command_queue));
2555+
retImplEv = std::unique_ptr<_pi_event>(_pi_event::make_native(
2556+
PI_COMMAND_TYPE_MEM_BUFFER_READ, command_queue));
25552557
retImplEv->start();
25562558
}
25572559

@@ -2601,8 +2603,8 @@ pi_result cuda_piEnqueueMemBufferWriteRect(
26012603
event_wait_list, nullptr);
26022604

26032605
if (retEvent) {
2604-
retImplEv = std::unique_ptr<_pi_event>(
2605-
_pi_event::make_native(PI_COMMAND_MEMBUFFER_WRITE, command_queue));
2606+
retImplEv = std::unique_ptr<_pi_event>(_pi_event::make_native(
2607+
PI_COMMAND_TYPE_MEM_BUFFER_WRITE, command_queue));
26062608
retImplEv->start();
26072609
}
26082610

@@ -2656,8 +2658,8 @@ pi_result cuda_piEnqueueMemBufferCopy(pi_queue command_queue, pi_mem src_buffer,
26562658
result = PI_CHECK_ERROR(cuMemcpyDtoDAsync(dst, src, size, stream));
26572659

26582660
if (event) {
2659-
auto new_event =
2660-
_pi_event::make_native(PI_COMMAND_MEMBUFFER_COPY, command_queue);
2661+
auto new_event = _pi_event::make_native(PI_COMMAND_TYPE_MEM_BUFFER_COPY,
2662+
command_queue);
26612663
new_event->record();
26622664
*event = new_event;
26632665
}
@@ -2694,8 +2696,8 @@ pi_result cuda_piEnqueueMemBufferCopyRect(
26942696
event_wait_list, nullptr);
26952697

26962698
if (event) {
2697-
retImplEv = std::unique_ptr<_pi_event>(
2698-
_pi_event::make_native(PI_COMMAND_MEMBUFFER_COPY, command_queue));
2699+
retImplEv = std::unique_ptr<_pi_event>(_pi_event::make_native(
2700+
PI_COMMAND_TYPE_MEM_BUFFER_COPY, command_queue));
26992701
retImplEv->start();
27002702
}
27012703

@@ -2798,8 +2800,8 @@ pi_result cuda_piEnqueueMemBufferFill(pi_queue command_queue, pi_mem buffer,
27982800
}
27992801

28002802
if (event) {
2801-
auto new_event =
2802-
_pi_event::make_native(PI_COMMAND_MEMBUFFER_FILL, command_queue);
2803+
auto new_event = _pi_event::make_native(PI_COMMAND_TYPE_MEM_BUFFER_FILL,
2804+
command_queue);
28032805
new_event->record();
28042806
*event = new_event;
28052807
}

sycl/plugins/cuda/pi_cuda.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,7 @@ class _pi_event {
271271
pi_context get_context() const noexcept { return context_; };
272272

273273
bool is_user_event() const noexcept {
274-
return get_command_type() == PI_COMMAND_USER;
274+
return get_command_type() == PI_COMMAND_TYPE_USER;
275275
}
276276

277277
bool is_native_event() const noexcept { return !is_user_event(); }
@@ -288,7 +288,7 @@ class _pi_event {
288288
// make a user event. CUDA has no concept of user events, so this
289289
// functionality is implemented by the CUDA PI implementation.
290290
static pi_event make_user(pi_context context) {
291-
return new _pi_event(PI_COMMAND_USER, context, nullptr);
291+
return new _pi_event(PI_COMMAND_TYPE_USER, context, nullptr);
292292
}
293293

294294
// construct a native CUDA. This maps closely to the underlying CUDA event.

sycl/unittests/pi/cuda/test_events.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ TEST_F(DISABLED_CudaEventTests, PICreateEvent) {
8383

8484
TEST_F(DISABLED_CudaEventTests, piGetInfoNativeEvent) {
8585

86-
auto foo = _pi_event::make_native(PI_COMMAND_KERNEL_LAUNCH, _queue);
86+
auto foo = _pi_event::make_native(PI_COMMAND_TYPE_NDRANGE_KERNEL, _queue);
8787
ASSERT_NE(foo, nullptr);
8888

8989
pi_event_status paramValue = {};

0 commit comments

Comments
 (0)