@@ -214,6 +214,11 @@ static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_
214
214
static_assert (K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2 , " K_QUANTS_PER_ITERATION must be 1 or 2" );
215
215
#endif
216
216
217
+ struct ggml_tensor_extra_gpu {
218
+ void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors
219
+ cudaEvent_t events[GGML_CUDA_MAX_DEVICES]; // events for synchronizing multiple GPUs
220
+ };
221
+
217
222
static __global__ void add_f32 (const float * x, const float * y, float * dst, const int k) {
218
223
const int i = blockDim .x *blockIdx .x + threadIdx .x ;
219
224
@@ -1970,7 +1975,6 @@ inline void ggml_cuda_op_add(
1970
1975
} else {
1971
1976
GGML_ASSERT (false );
1972
1977
}
1973
- CUDA_CHECK (cudaGetLastError ());
1974
1978
1975
1979
(void ) src1;
1976
1980
(void ) dst;
@@ -2002,7 +2006,6 @@ inline void ggml_cuda_op_mul(
2002
2006
2003
2007
// compute
2004
2008
mul_f32_cuda (src0_ddf_i01, src1_ddf_i01, dst_ddf_i01, ne00, ne10, cudaStream_main);
2005
- CUDA_CHECK (cudaGetLastError ());
2006
2009
}
2007
2010
2008
2011
(void ) dst;
@@ -2023,7 +2026,6 @@ inline void ggml_cuda_op_silu(
2023
2026
2024
2027
// compute
2025
2028
silu_f32_cuda (src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
2026
- CUDA_CHECK (cudaGetLastError ());
2027
2029
2028
2030
(void ) src1;
2029
2031
(void ) dst;
@@ -2046,7 +2048,6 @@ inline void ggml_cuda_op_rms_norm(
2046
2048
2047
2049
// compute
2048
2050
rms_norm_f32_cuda (src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
2049
- CUDA_CHECK (cudaGetLastError ());
2050
2051
2051
2052
(void ) src1;
2052
2053
(void ) dst;
@@ -2125,7 +2126,6 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec(
2125
2126
GGML_ASSERT (false );
2126
2127
break ;
2127
2128
}
2128
- CUDA_CHECK (cudaGetLastError ());
2129
2129
2130
2130
#ifdef GGML_CUDA_DMMV_F16
2131
2131
if (src1_convert_f16) {
@@ -2202,7 +2202,6 @@ inline void ggml_cuda_op_rope(
2202
2202
2203
2203
// compute
2204
2204
rope_f32_cuda (src0_ddf_i, dst_ddf_i, ne00, i01_diff, p, theta_scale, cudaStream_main);
2205
- CUDA_CHECK (cudaGetLastError ());
2206
2205
2207
2206
(void ) dst;
2208
2207
(void ) src0_ddq_i;
@@ -2226,7 +2225,6 @@ inline void ggml_cuda_op_diag_mask_inf(
2226
2225
2227
2226
// compute
2228
2227
diag_mask_inf_f32_cuda (src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_past, cudaStream_main);
2229
- CUDA_CHECK (cudaGetLastError ());
2230
2228
2231
2229
(void ) dst;
2232
2230
(void ) src0_ddq_i;
@@ -2248,7 +2246,6 @@ inline void ggml_cuda_op_soft_max(
2248
2246
2249
2247
// compute
2250
2248
soft_max_f32_cuda (src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
2251
- CUDA_CHECK (cudaGetLastError ());
2252
2249
2253
2250
(void ) src1;
2254
2251
(void ) dst;
@@ -2344,10 +2341,11 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
2344
2341
size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0 };
2345
2342
size_t dst_asf[GGML_CUDA_MAX_DEVICES] = {0 };
2346
2343
2347
- // if multiple GPUs are used they need to wait for the main GPU to finish
2344
+ // if multiple devices are used they need to wait for the main device
2345
+ // here an event is recorded that signifies that the main device has finished calculating the input data
2348
2346
if (split && g_device_count > 1 ) {
2349
2347
CUDA_CHECK (cudaSetDevice (g_main_device));
2350
- CUDA_CHECK (cudaDeviceSynchronize ( ));
2348
+ CUDA_CHECK (cudaEventRecord (src0_extra-> events [g_main_device], g_cudaStreams_main[g_main_device] ));
2351
2349
}
2352
2350
2353
2351
for (int id = 0 ; id < g_device_count; ++id) {
@@ -2373,6 +2371,12 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
2373
2371
int64_t row_diff = row_high - row_low;
2374
2372
2375
2373
cudaSetDevice (id);
2374
+ cudaStream_t cudaStream_main = g_cudaStreams_main[id];
2375
+
2376
+ // wait for main GPU data if necessary
2377
+ if (split && id != g_main_device) {
2378
+ CUDA_CHECK (cudaStreamWaitEvent (cudaStream_main, src0_extra->events [g_main_device]));
2379
+ }
2376
2380
2377
2381
if (src0_on_device && src0_is_contiguous) {
2378
2382
if (src0_is_f32) {
@@ -2448,8 +2452,6 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
2448
2452
}
2449
2453
const int64_t i11 = i13*ne12 + i12;
2450
2454
2451
- cudaStream_t cudaStream_main = g_cudaStreams_main[id];
2452
-
2453
2455
// for split tensors the data begins at i0 == i0_offset_low
2454
2456
char * src0_ddq_i = src0_ddq[id] + (i0 - i0_offset_low)*src0_stride*src0_ts/src0_bs;
2455
2457
float * src0_ddf_i = src0_ddf[id] + (i0 - i0_offset_low)*src0_stride;
@@ -2509,6 +2511,7 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
2509
2511
2510
2512
// do the computation
2511
2513
op (src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
2514
+ CUDA_CHECK (cudaGetLastError ());
2512
2515
2513
2516
// copy dst to host or other device if necessary
2514
2517
if (!dst_on_device) {
@@ -2538,6 +2541,11 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
2538
2541
CUDA_CHECK (cudaMemcpyAsync (dhf_dst_i, dst_ddf_i, dst_stride*sizeof (float ), kind, cudaStream_main));
2539
2542
}
2540
2543
}
2544
+
2545
+ // signify to main device that other device is done
2546
+ if (split && g_device_count > 1 && id != g_main_device) {
2547
+ CUDA_CHECK (cudaEventRecord (src0_extra->events [id], cudaStream_main));
2548
+ }
2541
2549
}
2542
2550
}
2543
2551
}
@@ -2549,7 +2557,6 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
2549
2557
}
2550
2558
2551
2559
CUDA_CHECK (cudaSetDevice (id));
2552
- CUDA_CHECK (cudaDeviceSynchronize ());
2553
2560
2554
2561
if (src0_asq[id] > 0 ) {
2555
2562
ggml_cuda_pool_free (src0_ddq[id], src0_asq[id]);
@@ -2564,6 +2571,21 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
2564
2571
ggml_cuda_pool_free (dst_ddf[id], dst_asf[id]);
2565
2572
}
2566
2573
}
2574
+
2575
+ // main device waits for all other devices to be finished
2576
+ if (split && g_device_count > 1 ) {
2577
+ CUDA_CHECK (cudaSetDevice (g_main_device));
2578
+ for (int id = 0 ; id < g_device_count; ++id) {
2579
+ if (id != g_main_device) {
2580
+ CUDA_CHECK (cudaStreamWaitEvent (g_cudaStreams_main[g_main_device], src0_extra->events [id]));
2581
+ }
2582
+ }
2583
+ }
2584
+
2585
+ if (dst->backend == GGML_BACKEND_CPU) {
2586
+ CUDA_CHECK (cudaSetDevice (g_main_device));
2587
+ CUDA_CHECK (cudaDeviceSynchronize ());
2588
+ }
2567
2589
}
2568
2590
2569
2591
void ggml_cuda_add (const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
@@ -2803,6 +2825,10 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
2803
2825
cudaMemcpy (buf, buf_host, size, cudaMemcpyHostToDevice);
2804
2826
2805
2827
extra->data_device [id] = buf;
2828
+
2829
+ if (backend == GGML_BACKEND_GPU_SPLIT) {
2830
+ CUDA_CHECK (cudaEventCreateWithFlags (&extra->events [id], cudaEventDisableTiming));
2831
+ }
2806
2832
}
2807
2833
2808
2834
tensor->extra = extra;
0 commit comments