Skip to content

Commit d514a8b

Browse files
authored
use lora_rank_tokenshift and lora_rank_decay if present
1 parent 2c3f8b8 commit d514a8b

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

convert_hf_to_gguf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3557,8 +3557,8 @@ def set_gguf_parameters(self):
35573557
head_size = hidden_size // num_attention_heads
35583558
rms_norm_eps = self.hparams["rms_norm_eps"]
35593559
intermediate_size = self.hparams["intermediate_size"]
3560-
time_mix_extra_dim = 64 if hidden_size >= 4096 else 32
3561-
time_decay_extra_dim = 128 if hidden_size >= 4096 else 64
3560+
time_mix_extra_dim = self.hparams.get("lora_rank_tokenshift", 64 if hidden_size >= 4096 else 32)
3561+
time_decay_extra_dim = self.hparams.get("lora_rank_decay", 128 if hidden_size >= 4096 else 64)
35623562

35633563
# RWKV isn't context limited
35643564
self.gguf_writer.add_context_length(1048576)

0 commit comments

Comments
 (0)