Skip to content

Commit 0208355

Browse files
CUDA: fix race conditions FlashAttention kernels (#13438)
1 parent d2a4ef0 commit 0208355

File tree

2 files changed

+3
-0
lines changed

2 files changed

+3
-0
lines changed

ggml/src/ggml-cuda/fattn-mma-f16.cuh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -874,6 +874,8 @@ static __device__ __forceinline__ void flash_attn_ext_f16_process_tile(
874874
}
875875
}
876876

877+
__syncthreads();
878+
877879
// Write back combined meta data:
878880
#pragma unroll
879881
for (int imeta = 0; imeta < nmeta; ++imeta) {

ggml/src/ggml-cuda/fattn-vec-f16.cuh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@ static __global__ void flash_attn_vec_ext_f16(
168168
for (int j = 0; j < ncols; ++j) {
169169
KQ[j*D + tid] = -HALF_MAX_HALF;
170170
}
171+
__syncthreads();
171172

172173
half2 VKQ[ncols] = {{0.0f, 0.0f}};
173174

0 commit comments

Comments
 (0)