From 57441ba33f9b572f8ee587650b97d16060ded734 Mon Sep 17 00:00:00 2001 From: CSY Date: Tue, 18 Feb 2025 09:42:45 +0800 Subject: [PATCH] fix lm-eval & vllm check tokenizer type --- gptqmodel/models/auto.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gptqmodel/models/auto.py b/gptqmodel/models/auto.py index 0760a2745..9a21df5d7 100644 --- a/gptqmodel/models/auto.py +++ b/gptqmodel/models/auto.py @@ -328,12 +328,13 @@ def eval( if isinstance(model, BaseGPTQModel): tokenizer = model.tokenizer elif isinstance(model, PreTrainedModel) or model_id_or_path.strip(): - tokenizer = Tokenicer.load(model_id_or_path) + tokenizer = Tokenicer.load(model_id_or_path).tokenizer # lm-eval checks if tokenizer's type is PretrainedTokenizer if tokenizer is None: raise ValueError("Tokenizer: Auto-loading of tokenizer failed with `model_or_id_or_path`. Please pass in `tokenizer` as argument.") - model_args["tokenizer"] = tokenizer + if backend=="gptqmodel": # vllm loads tokenizer + model_args["tokenizer"] = tokenizer if framework == EVAL.LM_EVAL: for task in tasks: @@ -473,5 +474,4 @@ def push_to_hub(repo_id: str, folder_path=quantized_path, repo_id=repo_id, repo_type=repo_type, - ) - + ) \ No newline at end of file