Skip to content

Commit 6fe6247

Browse files
authored
llama : add Minerva 7B model support (ggml-org#10673)
* Support for Minerva 7B * Update convert_hf_to_gguf_update.py
1 parent 0cd182e commit 6fe6247

File tree

5 files changed

+9
-0
lines changed

5 files changed

+9
-0
lines changed

convert_hf_to_gguf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -658,6 +658,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
658658
if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450":
659659
# ref: https://huggingface.co/facebook/chameleon-7b
660660
res = "chameleon"
661+
if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
662+
# ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
663+
res = "minerva-7b"
661664

662665
if res is None:
663666
logger.warning("\n")

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ class TOKENIZER_TYPE(IntEnum):
102102
{"name": "exaone", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", },
103103
{"name": "phi-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/microsoft/phi-2", },
104104
{"name": "chameleon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/facebook/chameleon-7b", },
105+
{"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", },
105106
]
106107

107108

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ extern "C" {
104104
LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24,
105105
LLAMA_VOCAB_PRE_TYPE_EXAONE = 25,
106106
LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26,
107+
LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
107108
};
108109

109110
enum llama_rope_type {

src/llama-vocab.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -418,6 +418,7 @@ struct llm_tokenizer_bpe : llm_tokenizer {
418418
case LLAMA_VOCAB_PRE_TYPE_SMOLLM:
419419
case LLAMA_VOCAB_PRE_TYPE_CODESHELL:
420420
case LLAMA_VOCAB_PRE_TYPE_EXAONE:
421+
case LLAMA_VOCAB_PRE_TYPE_MINERVA:
421422
regex_exprs = {
422423
"\\p{N}",
423424
"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)",

src/llama.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6479,6 +6479,9 @@ static void llm_load_vocab(
64796479
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHAMELEON;
64806480
vocab.tokenizer_add_bos = true;
64816481
vocab.tokenizer_clean_spaces = false;
6482+
} else if (
6483+
tokenizer_pre == "minerva-7b") {
6484+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MINERVA;
64826485
} else {
64836486
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
64846487
}

0 commit comments

Comments
 (0)