@@ -326,6 +326,8 @@ enum llm_kv {
326
326
LLM_KV_POOLING_TYPE,
327
327
LLM_KV_LOGIT_SCALE,
328
328
LLM_KV_DECODER_START_TOKEN_ID,
329
+ LLM_KV_ATTN_LOGIT_SOFTCAPPING,
330
+ LLM_KV_FINAL_LOGIT_SOFTCAPPING,
329
331
330
332
LLM_KV_ATTENTION_HEAD_COUNT,
331
333
LLM_KV_ATTENTION_HEAD_COUNT_KV,
@@ -416,6 +418,8 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
416
418
{ LLM_KV_POOLING_TYPE , "%s.pooling_type" },
417
419
{ LLM_KV_LOGIT_SCALE, "%s.logit_scale" },
418
420
{ LLM_KV_DECODER_START_TOKEN_ID, "%s.decoder_start_token_id" },
421
+ { LLM_KV_ATTN_LOGIT_SOFTCAPPING, "%s.attn_logit_softcapping" },
422
+ { LLM_KV_FINAL_LOGIT_SOFTCAPPING, "%s.final_logit_softcapping" },
419
423
420
424
{ LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
421
425
{ LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
@@ -2127,6 +2131,9 @@ struct llama_hparams {
2127
2131
float f_norm_eps;
2128
2132
float f_norm_rms_eps;
2129
2133
2134
+ float f_attn_logit_softcapping = 50.0f;
2135
+ float f_final_logit_softcapping = 30.0f;
2136
+
2130
2137
float rope_attn_factor = 1.0f;
2131
2138
float rope_freq_base_train;
2132
2139
float rope_freq_scale_train;
@@ -2143,8 +2150,9 @@ struct llama_hparams {
2143
2150
float f_max_alibi_bias = 0.0f;
2144
2151
float f_logit_scale = 0.0f;
2145
2152
2146
- bool causal_attn = true;
2147
- bool use_alibi = false;
2153
+ bool causal_attn = true;
2154
+ bool use_alibi = false;
2155
+ bool attn_soft_cap = false;
2148
2156
2149
2157
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
2150
2158
enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
@@ -4822,6 +4830,9 @@ static void llm_load_hparams(
4822
4830
case LLM_ARCH_GEMMA2:
4823
4831
{
4824
4832
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
4833
+ ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false);
4834
+ ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
4835
+ hparams.attn_soft_cap = true;
4825
4836
4826
4837
switch (hparams.n_layer) {
4827
4838
case 42: model.type = e_model::MODEL_9B; break;
@@ -7737,6 +7748,12 @@ static struct ggml_tensor * llm_build_kqv(
7737
7748
kq = ggml_scale(ctx, kq, 30);
7738
7749
}
7739
7750
7751
+ if (hparams.attn_soft_cap) {
7752
+ kq = ggml_scale(ctx, kq, 1.0f / hparams.f_attn_logit_softcapping);
7753
+ kq = ggml_tanh(ctx, kq);
7754
+ kq = ggml_scale(ctx, kq, hparams.f_attn_logit_softcapping);
7755
+ }
7756
+
7740
7757
kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
7741
7758
cb(kq, "kq_soft_max_ext", il);
7742
7759
@@ -11197,7 +11214,7 @@ struct llm_build_context {
11197
11214
ext_factor, attn_factor, beta_fast, beta_slow);
11198
11215
cb(Qcur, "Qcur", il);
11199
11216
11200
- Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k )));
11217
+ Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head )));
11201
11218
cb(Qcur, "Qcur_scaled", il);
11202
11219
11203
11220
Kcur = ggml_rope_ext(
@@ -11264,6 +11281,12 @@ struct llm_build_context {
11264
11281
11265
11282
// lm_head
11266
11283
cur = ggml_mul_mat(ctx0, model.output, cur);
11284
+
11285
+ // final logit soft-capping
11286
+ cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_final_logit_softcapping);
11287
+ cur = ggml_tanh(ctx0, cur);
11288
+ cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping);
11289
+
11267
11290
cb(cur, "result_output", -1);
11268
11291
11269
11292
ggml_build_forward_expand(gf, cur);
@@ -20022,6 +20045,12 @@ struct llama_context * llama_new_context_with_model(
20022
20045
params.flash_attn = false;
20023
20046
}
20024
20047
20048
+ if (params.flash_attn && model->hparams.attn_soft_cap) {
20049
+ LLAMA_LOG_WARN("%s: flash_attn is not compatible with attn_soft_cap - forcing off\n", __func__);
20050
+ params.flash_attn = false;
20051
+ }
20052
+
20053
+
20025
20054
if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
20026
20055
LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
20027
20056
params.flash_attn = false;
0 commit comments