Skip to content

Commit a5b57b0

Browse files
CUDA: enable Gemma FA for HIP/Pascal (#9581)
1 parent ecd5d6b commit a5b57b0

File tree

3 files changed

+10
-10
lines changed

3 files changed

+10
-10
lines changed

ggml/src/ggml-cuda.cu

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2976,19 +2976,19 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
29762976
case GGML_OP_LEAKY_RELU:
29772977
case GGML_OP_RWKV_WKV:
29782978
return true;
2979-
case GGML_OP_FLASH_ATTN_EXT:
2980-
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
2981-
return (op->src[0]->ne[0] == 64 && op->src[1]->type == GGML_TYPE_F16) || op->src[0]->ne[0] == 128;
2982-
#else
2979+
case GGML_OP_FLASH_ATTN_EXT: {
2980+
if (op->src[0]->ne[0] == 64 && op->src[1]->type == GGML_TYPE_F16) {
2981+
return true;
2982+
}
29832983
if (op->src[0]->ne[0] == 128) {
29842984
return true;
29852985
}
2986-
if (op->src[0]->ne[0] == 64 && op->src[1]->type == GGML_TYPE_F16) {
2986+
if (op->src[0]->ne[0] == 256 && op->src[1]->type == GGML_TYPE_F16 && op->src[2]->type == GGML_TYPE_F16) {
29872987
return true;
29882988
}
2989-
return ggml_cuda_info().devices[cuda_ctx->device].cc >= CC_VOLTA &&
2990-
op->src[1]->type == GGML_TYPE_F16 && op->src[2]->type == GGML_TYPE_F16;
2991-
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
2989+
const int cc = ggml_cuda_info().devices[cuda_ctx->device].cc;
2990+
return cc >= CC_VOLTA && cc < CC_OFFSET_AMD && op->src[1]->type == GGML_TYPE_F16 && op->src[2]->type == GGML_TYPE_F16;
2991+
}
29922992
case GGML_OP_CROSS_ENTROPY_LOSS:
29932993
case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
29942994
case GGML_OP_OPT_STEP_ADAMW:

ggml/src/ggml-cuda/fattn.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
314314
}
315315

316316
if (!fast_fp16_available(cc)) {
317-
if (Q->ne[1] <= 8) {
317+
if (Q->ne[1] <= 8 || Q->ne[0] == 256) {
318318
ggml_cuda_flash_attn_ext_vec_f32(ctx, dst);
319319
} else {
320320
ggml_cuda_flash_attn_ext_tile_f32(ctx, dst);

tests/test-backend-ops.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3599,7 +3599,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
35993599
if (hs != 128 && logit_softcap != 0.0f) continue;
36003600
for (int nh : { 32, }) {
36013601
for (int kv : { 512, 1024, }) {
3602-
for (int nb : { 1, 2, 4, 8, }) {
3602+
for (int nb : { 1, 3, 32, 35, }) {
36033603
for (ggml_type type_KV : {GGML_TYPE_F16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0}) {
36043604
test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, mask, max_bias, logit_softcap, type_KV));
36053605
}

0 commit comments

Comments
 (0)