@@ -202,6 +202,7 @@ const char * llm_type_name(llm_type type) {
202202 case LLM_TYPE_17B_16E: return "17Bx16E (Scout)";
203203 case LLM_TYPE_17B_128E: return "17Bx128E (Maverick)";
204204 case LLM_TYPE_A13B: return "A13B";
205+ case LLM_TYPE_8B_A1B: return "8B.A1B";
205206 case LLM_TYPE_21B_A3B: return "21B.A3B";
206207 case LLM_TYPE_30B_A3B: return "30B.A3B";
207208 case LLM_TYPE_80B_A3B: return "80B.A3B";
@@ -2107,14 +2108,29 @@ void llama_model::load_hparams(llama_model_loader & ml) {
21072108 for (uint32_t il = 0; il < hparams.n_layer; ++il) {
21082109 hparams.recurrent_layer_arr[il] = hparams.n_head_kv(il) == 0;
21092110 }
2111+ hparams.n_layer_dense_lead = hparams.n_layer;
21102112 switch (hparams.n_ff()) {
21112113 case 4608: type = LLM_TYPE_350M; break;
21122114 case 6912: type = LLM_TYPE_700M; break;
21132115 case 8192: type = LLM_TYPE_1_2B; break;
21142116 case 10752: type = LLM_TYPE_2_6B; break;
2115- default: type = LLM_TYPE_UNKNOWN;
2117+ default: type = LLM_TYPE_UNKNOWN;
21162118 }
21172119 } break;
2120+ case LLM_ARCH_LFM2MOE:
2121+ {
2122+ ml.get_key(LLM_KV_SHORTCONV_L_CACHE, hparams.n_shortconv_l_cache);
2123+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
2124+ ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
2125+ ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
2126+ ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func);
2127+
2128+ for (uint32_t il = 0; il < hparams.n_layer; ++il) {
2129+ hparams.recurrent_layer_arr[il] = hparams.n_head_kv(il) == 0;
2130+ }
2131+
2132+ type = LLM_TYPE_8B_A1B;
2133+ } break;
21182134 case LLM_ARCH_SMALLTHINKER:
21192135 {
21202136 const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
@@ -5995,6 +6011,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
59956011 }
59966012 } break;
59976013 case LLM_ARCH_LFM2:
6014+ case LLM_ARCH_LFM2MOE:
59986015 {
59996016 tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
60006017 tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
@@ -6006,11 +6023,23 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
60066023
60076024 for (int i = 0; i < n_layer; ++i) {
60086025 auto & layer = layers[i];
6009- // ffn is same for transformer and conv layers
6026+
6027+ const bool is_moe_layer = i >= static_cast<int>(hparams.n_layer_dense_lead);
6028+
6029+ // ffn/moe is same for transformer and conv layers
60106030 layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
6011- layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
6012- layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
6013- layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
6031+ if (is_moe_layer) {
6032+ GGML_ASSERT(n_expert && n_expert_used);
6033+ layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
6034+ layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, hparams.n_ff_exp, n_expert}, 0);
6035+ layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {hparams.n_ff_exp, n_embd, n_expert}, 0);
6036+ layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, hparams.n_ff_exp, n_expert}, 0);
6037+ layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, 0);
6038+ } else { // dense
6039+ layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
6040+ layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0);
6041+ layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
6042+ }
60146043
60156044 // for operator_norm
60166045 layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
@@ -6492,7 +6521,7 @@ void llama_model::print_info() const {
64926521 LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm);
64936522 }
64946523
6495- if (arch == LLM_ARCH_SMALLTHINKER) {
6524+ if (arch == LLM_ARCH_SMALLTHINKER || arch == LLM_ARCH_LFM2MOE ) {
64966525 LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp);
64976526 LLAMA_LOG_INFO("%s: expert_gating_func = %s\n", __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func));
64986527 }
@@ -18784,6 +18813,8 @@ struct llm_build_lfm2 : public llm_graph_context {
1878418813 ggml_tensor * inp_out_ids = build_inp_out_ids();
1878518814
1878618815 for (int il = 0; il < n_layer; ++il) {
18816+ const bool is_moe_layer = il >= static_cast<int>(hparams.n_layer_dense_lead);
18817+
1878718818 auto * prev_cur = cur;
1878818819 cur = build_norm(cur, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
1878918820 cb(cur, "model.layers.{}.operator_norm", il);
@@ -18798,7 +18829,16 @@ struct llm_build_lfm2 : public llm_graph_context {
1879818829 }
1879918830
1880018831 cur = ggml_add(ctx0, prev_cur, cur);
18801- cur = ggml_add(ctx0, cur, build_feed_forward(cur, il));
18832+
18833+ auto * ffn_norm_out = build_norm(cur, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
18834+ cb(ffn_norm_out, "model.layers.{}.ffn_norm", il);
18835+
18836+ ggml_tensor * ffn_out = is_moe_layer ?
18837+ build_moe_feed_forward(ffn_norm_out, il) :
18838+ build_dense_feed_forward(ffn_norm_out, il);
18839+ cb(ffn_norm_out, "model.layers.{}.ffn_out", il);
18840+
18841+ cur = ggml_add(ctx0, cur, ffn_out);
1880218842 }
1880318843
1880418844 cur = build_norm(cur, model.tok_norm, NULL, LLM_NORM_RMS, -1);
@@ -18813,23 +18853,32 @@ struct llm_build_lfm2 : public llm_graph_context {
1881318853 ggml_build_forward_expand(gf, cur);
1881418854 }
1881518855
18816- ggml_tensor * build_feed_forward(ggml_tensor * cur,
18817- int il) const {
18818- cur = build_norm(cur, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
18819- cb(cur, "model.layers.{}.ffn_norm", il);
18856+ ggml_tensor * build_moe_feed_forward(ggml_tensor * cur,
18857+ int il) const {
18858+ return build_moe_ffn(cur,
18859+ model.layers[il].ffn_gate_inp,
18860+ model.layers[il].ffn_up_exps,
18861+ model.layers[il].ffn_gate_exps,
18862+ model.layers[il].ffn_down_exps,
18863+ model.layers[il].ffn_exp_probs_b,
18864+ n_expert, n_expert_used,
18865+ LLM_FFN_SILU, true,
18866+ false, 0.0,
18867+ static_cast<llama_expert_gating_func_type>(hparams.expert_gating_func),
18868+ il);
18869+ }
1882018870
18871+ ggml_tensor * build_dense_feed_forward(ggml_tensor * cur,
18872+ int il) const {
1882118873 GGML_ASSERT(!model.layers[il].ffn_up_b);
1882218874 GGML_ASSERT(!model.layers[il].ffn_gate_b);
1882318875 GGML_ASSERT(!model.layers[il].ffn_down_b);
18824- cur = build_ffn(cur,
18876+ return build_ffn(cur,
1882518877 model.layers[il].ffn_up, NULL, NULL,
1882618878 model.layers[il].ffn_gate, NULL, NULL,
1882718879 model.layers[il].ffn_down, NULL, NULL,
1882818880 NULL,
1882918881 LLM_FFN_SILU, LLM_FFN_PAR, il);
18830- cb(cur, "model.layers.{}.feed_forward.w2", il);
18831-
18832- return cur;
1883318882 }
1883418883
1883518884 ggml_tensor * build_attn_block(ggml_tensor * cur,
@@ -19999,6 +20048,7 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
1999920048 llm = std::make_unique<llm_build_falcon_h1>(*this, params);
2000020049 } break;
2000120050 case LLM_ARCH_LFM2:
20051+ case LLM_ARCH_LFM2MOE:
2000220052 {
2000320053 llm = std::make_unique<llm_build_lfm2>(*this, params);
2000420054 } break;
@@ -20226,6 +20276,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) {
2022620276 case LLM_ARCH_OPENAI_MOE:
2022720277 case LLM_ARCH_HUNYUAN_DENSE:
2022820278 case LLM_ARCH_LFM2:
20279+ case LLM_ARCH_LFM2MOE:
2022920280 case LLM_ARCH_SMALLTHINKER:
2023020281 case LLM_ARCH_GLM4_MOE:
2023120282 case LLM_ARCH_SEED_OSS:
0 commit comments