Skip to content

Commit 05659d3

Browse files
author
Joan Martinez
committed
fix: remove ollama patches
1 parent 3b44f8f commit 05659d3

File tree

1 file changed

+20
-20
lines changed

1 file changed

+20
-20
lines changed

llama.cpp

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4653,8 +4653,16 @@ static void llm_load_vocab(
46534653

46544654
// for now, only BPE models have pre-tokenizers
46554655
if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
4656-
if (
4657-
tokenizer_pre == "default") {
4656+
if (tokenizer_pre.empty()) {
4657+
LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
4658+
LLAMA_LOG_WARN("%s: \n", __func__);
4659+
LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
4660+
LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__);
4661+
LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__);
4662+
LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
4663+
LLAMA_LOG_WARN("%s: \n", __func__);
4664+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
4665+
} else if (tokenizer_pre == "default") {
46584666
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
46594667
} else if (
46604668
tokenizer_pre == "llama3" ||
@@ -4706,8 +4714,7 @@ static void llm_load_vocab(
47064714
tokenizer_pre == "smaug-bpe") {
47074715
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG;
47084716
} else {
4709-
LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__);
4710-
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
4717+
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
47114718
}
47124719
} else {
47134720
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
@@ -6623,7 +6630,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
66236630
}
66246631
} catch (const std::exception & err) {
66256632
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
6626-
throw;
6633+
return -1;
66276634
}
66286635

66296636
return 0;
@@ -16246,23 +16253,16 @@ struct llama_model * llama_load_model_from_file(
1624616253
}
1624716254
model->rpc_servers.push_back(servers);
1624816255
}
16249-
16250-
try {
16251-
int status = llama_model_load(path_model, *model, params);
16252-
GGML_ASSERT(status <= 0);
16253-
if (status < 0) {
16254-
if (status == -1) {
16255-
LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
16256-
} else if (status == -2) {
16257-
LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
16258-
}
16259-
delete model;
16260-
return nullptr;
16256+
int status = llama_model_load(path_model, *model, params);
16257+
GGML_ASSERT(status <= 0);
16258+
if (status < 0) {
16259+
if (status == -1) {
16260+
LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
16261+
} else if (status == -2) {
16262+
LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
1626116263
}
16262-
} catch (...) {
16263-
LLAMA_LOG_ERROR("%s: exception loading model\n", __func__);
1626416264
delete model;
16265-
throw;
16265+
return nullptr;
1626616266
}
1626716267

1626816268
return model;

0 commit comments

Comments
 (0)