Skip to content

Commit bc57d02

Browse files
younesbelkadacompilade
authored andcommitted
llama : support for falcon-mamba architecture (ggml-org#9074)
* feat: initial support for llama.cpp * fix: lint * refactor: better refactor * Update src/llama.cpp Co-authored-by: compilade <[email protected]> * Update src/llama.cpp Co-authored-by: compilade <[email protected]> * fix: address comments * Update convert_hf_to_gguf.py Co-authored-by: compilade <[email protected]> * fix: add more cleanup and harmonization * fix: lint * Update gguf-py/gguf/gguf_writer.py Co-authored-by: compilade <[email protected]> * fix: change name * Apply suggestions from code review Co-authored-by: compilade <[email protected]> * add in operator * fix: add `dt_b_c_rms` in `llm_load_print_meta` * fix: correct printf format for bool * fix: correct print format * Update src/llama.cpp Co-authored-by: compilade <[email protected]> * llama : quantize more Mamba tensors * llama : use f16 as the fallback of fallback quant types --------- Co-authored-by: compilade <[email protected]>
1 parent ffa1667 commit bc57d02

File tree

4 files changed

+35
-24
lines changed

4 files changed

+35
-24
lines changed

convert_hf_to_gguf.py

Lines changed: 10 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -295,6 +295,7 @@ def prepare_tensors(self):
295295
gguf.MODEL_TENSOR.FFN_GATE_INP,
296296
gguf.MODEL_TENSOR.POS_EMBD,
297297
gguf.MODEL_TENSOR.TOKEN_TYPES,
298+
gguf.MODEL_TENSOR.SSM_CONV1D,
298299
)
299300
)
300301
or not name.endswith(".weight")
@@ -2728,7 +2729,7 @@ class StarCoder2Model(Model):
27282729
model_arch = gguf.MODEL_ARCH.STARCODER2
27292730

27302731

2731-
@Model.register("MambaForCausalLM", "MambaLMHeadModel")
2732+
@Model.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM")
27322733
class MambaModel(Model):
27332734
model_arch = gguf.MODEL_ARCH.MAMBA
27342735

@@ -2759,20 +2760,24 @@ def set_gguf_parameters(self):
27592760
# ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58
27602761
dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16)
27612762
rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5
2762-
2763+
use_dt_b_c_norm = False
2764+
# For falconmamba we do apply RMS norm on B / DT and C layers
2765+
if self.find_hparam(["model_type"], optional=True) in ("falcon_mamba",):
2766+
use_dt_b_c_norm = True
27632767
# Fail early for models which don't have a block expansion factor of 2
27642768
assert d_inner == 2 * d_model
27652769

27662770
self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default
27672771
self.gguf_writer.add_embedding_length(d_model)
27682772
self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading
27692773
self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading
2770-
self.gguf_writer.add_block_count(self.hparams["n_layer"])
2774+
self.gguf_writer.add_block_count(self.block_count)
27712775
self.gguf_writer.add_ssm_conv_kernel(d_conv)
27722776
self.gguf_writer.add_ssm_inner_size(d_inner)
27732777
self.gguf_writer.add_ssm_state_size(d_state)
27742778
self.gguf_writer.add_ssm_time_step_rank(dt_rank)
27752779
self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps)
2780+
self.gguf_writer.add_ssm_dt_b_c_rms(use_dt_b_c_norm) # For classic Mamba we don't apply rms norm on B / DT layers
27762781
self.gguf_writer.add_file_type(self.ftype)
27772782

27782783
_tok_embd = None
@@ -2799,23 +2804,6 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
27992804

28002805
return [(new_name, data_torch)]
28012806

2802-
def tensor_force_quant(self, name: str, new_name: str, bid: int | None, n_dims: int) -> gguf.GGMLQuantizationType | bool:
2803-
if bid is not None and new_name in (
2804-
self.format_tensor_name(
2805-
n, bid, ".weight" if name.endswith(".weight") else ""
2806-
)
2807-
for n in [
2808-
gguf.MODEL_TENSOR.SSM_CONV1D,
2809-
gguf.MODEL_TENSOR.SSM_X,
2810-
gguf.MODEL_TENSOR.SSM_DT,
2811-
gguf.MODEL_TENSOR.SSM_A,
2812-
gguf.MODEL_TENSOR.SSM_D,
2813-
]
2814-
):
2815-
return gguf.GGMLQuantizationType.F32
2816-
2817-
return super().tensor_force_quant(name, new_name, bid, n_dims)
2818-
28192807

28202808
@Model.register("CohereForCausalLM")
28212809
class CommandR2Model(Model):
@@ -3809,7 +3797,7 @@ class ExaoneModel(Model):
38093797
def set_gguf_parameters(self):
38103798
hparams = self.hparams
38113799

3812-
assert(hparams["activation_function"] == "silu")
3800+
assert (hparams["activation_function"] == "silu")
38133801

38143802
max_position_embeddings = hparams["max_position_embeddings"]
38153803
embed_dim = hparams["hidden_size"]
@@ -3872,8 +3860,8 @@ def prepare_tensors(self):
38723860

38733861
super().prepare_tensors()
38743862

3875-
###### CONVERSION LOGIC ######
38763863

3864+
###### CONVERSION LOGIC ######
38773865

38783866
# tree of lazy tensors
38793867
class LazyTorchTensor(gguf.LazyBase):

gguf-py/gguf/constants.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,7 @@ class SSM:
130130
INNER_SIZE = "{arch}.ssm.inner_size"
131131
STATE_SIZE = "{arch}.ssm.state_size"
132132
TIME_STEP_RANK = "{arch}.ssm.time_step_rank"
133+
DT_B_C_RMS = "{arch}.ssm.dt_b_c_rms"
133134

134135
class Tokenizer:
135136
MODEL = "tokenizer.ggml.model"
@@ -1391,6 +1392,7 @@ def get_type(val: Any) -> GGUFValueType:
13911392
KEY_SSM_INNER_SIZE = Keys.SSM.INNER_SIZE
13921393
KEY_SSM_STATE_SIZE = Keys.SSM.STATE_SIZE
13931394
KEY_SSM_TIME_STEP_RANK = Keys.SSM.TIME_STEP_RANK
1395+
KEY_SSM_DT_B_C_RMS = Keys.SSM.DT_B_C_RMS
13941396

13951397
# tokenization
13961398
KEY_TOKENIZER_MODEL = Keys.Tokenizer.MODEL

gguf-py/gguf/gguf_writer.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -730,6 +730,9 @@ def add_ssm_state_size(self, value: int) -> None:
730730
def add_ssm_time_step_rank(self, value: int) -> None:
731731
self.add_uint32(Keys.SSM.TIME_STEP_RANK.format(arch=self.arch), value)
732732

733+
def add_ssm_dt_b_c_rms(self, value: bool) -> None:
734+
self.add_bool(Keys.SSM.DT_B_C_RMS.format(arch=self.arch), value)
735+
733736
def add_tokenizer_model(self, model: str) -> None:
734737
self.add_string(Keys.Tokenizer.MODEL, model)
735738

src/llama.cpp

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,7 @@ enum llm_kv {
330330
LLM_KV_SSM_CONV_KERNEL,
331331
LLM_KV_SSM_STATE_SIZE,
332332
LLM_KV_SSM_TIME_STEP_RANK,
333+
LLM_KV_SSM_DT_B_C_RMS,
333334

334335
LLM_KV_TOKENIZER_MODEL,
335336
LLM_KV_TOKENIZER_PRE,
@@ -428,6 +429,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
428429
{ LLM_KV_SSM_INNER_SIZE, "%s.ssm.inner_size" },
429430
{ LLM_KV_SSM_STATE_SIZE, "%s.ssm.state_size" },
430431
{ LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" },
432+
{ LLM_KV_SSM_DT_B_C_RMS, "%s.ssm.dt_b_c_rms" },
431433

432434
{ LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
433435
{ LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" },
@@ -2239,6 +2241,7 @@ struct llama_hparams {
22392241
uint32_t ssm_d_inner = 0;
22402242
uint32_t ssm_d_state = 0;
22412243
uint32_t ssm_dt_rank = 0;
2244+
bool ssm_dt_b_c_rms = false;
22422245

22432246
float f_clamp_kqv = 0.0f;
22442247
float f_max_alibi_bias = 0.0f;
@@ -2288,6 +2291,7 @@ struct llama_hparams {
22882291
if (this->ssm_d_inner != other.ssm_d_inner) return true;
22892292
if (this->ssm_d_state != other.ssm_d_state) return true;
22902293
if (this->ssm_dt_rank != other.ssm_dt_rank) return true;
2294+
if (this->ssm_dt_b_c_rms != other.ssm_dt_b_c_rms) return true;
22912295

22922296
if (this->dec_start_token_id != other.dec_start_token_id) return true;
22932297

@@ -5100,6 +5104,7 @@ static void llm_load_hparams(
51005104
ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner);
51015105
ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state);
51025106
ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
5107+
ml.get_key(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms, false);
51035108

51045109
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
51055110

@@ -5962,6 +5967,7 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
59625967
LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
59635968
LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state);
59645969
LLAMA_LOG_INFO("%s: ssm_dt_rank = %u\n", __func__, hparams.ssm_dt_rank);
5970+
LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms = %d\n", __func__, hparams.ssm_dt_b_c_rms);
59655971
}
59665972

59675973
LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
@@ -12279,6 +12285,10 @@ struct llm_build_context {
1227912285
GGML_ASSERT(2 * d_model == d_inner);
1228012286
const int64_t d_state = hparams.ssm_d_state;
1228112287
const int64_t dt_rank = hparams.ssm_dt_rank;
12288+
// Some variants of Mamba arch (e.g. FalconMamba do apply layer norm on B and Dt layers)
12289+
const bool ssm_dt_b_c_rms = hparams.ssm_dt_b_c_rms;
12290+
// Use the same RMS norm as the final layer norm
12291+
const float norm_rms_eps = hparams.f_norm_rms_eps;
1228212292

1228312293
struct ggml_tensor * cur;
1228412294
struct ggml_tensor * inpL;
@@ -12359,6 +12369,13 @@ struct llm_build_context {
1235912369
struct ggml_tensor * B = ggml_view_2d(ctx0, x_db, d_state, n_tokens, x_db->nb[1], ggml_element_size(x_db)*dt_rank);
1236012370
struct ggml_tensor * C = ggml_view_2d(ctx0, x_db, d_state, n_tokens, x_db->nb[1], ggml_element_size(x_db)*(dt_rank+d_state));
1236112371

12372+
// Some Mamba variants (e.g. FalconMamba) apply RMS norm in B, C & Dt layers
12373+
if (ssm_dt_b_c_rms) {
12374+
dt = ggml_rms_norm(ctx0, dt, norm_rms_eps);
12375+
B = ggml_rms_norm(ctx0, B, norm_rms_eps);
12376+
C = ggml_rms_norm(ctx0, C, norm_rms_eps);
12377+
}
12378+
1236212379
// {dt_rank, d_inner} * {dt_rank, n_tokens} => {d_inner, n_tokens}
1236312380
dt = llm_build_lora_mm(lctx, ctx0, model.layers[il].ssm_dt, dt);
1236412381
dt = ggml_add(ctx0, dt, model.layers[il].ssm_dt_b);
@@ -16480,6 +16497,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
1648016497
case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
1648116498
default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
1648216499
}
16500+
if (tensor->ne[0] % ggml_blck_size(new_type) != 0) {
16501+
new_type = GGML_TYPE_F16;
16502+
}
1648316503
LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
1648416504
++qs.n_fallback;
1648516505
}
@@ -16822,8 +16842,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
1682216842
// do not quantize Mamba's small yet 2D weights
1682316843
// NOTE: can't use LLM_TN here because the layer number is not known
1682416844
quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
16825-
quantize &= name.find("ssm_x.weight") == std::string::npos;
16826-
quantize &= name.find("ssm_dt.weight") == std::string::npos;
1682716845

1682816846
// do not quantize relative position bias (T5)
1682916847
quantize &= name.find("attn_rel_b.weight") == std::string::npos;

0 commit comments

Comments
 (0)