Skip to content

Commit ac7a454

Browse files
committed
add missing GenerationConfig
1 parent eded411 commit ac7a454

File tree

1 file changed

+1
-0
lines changed

1 file changed

+1
-0
lines changed

tools/llm_bench/task/text_generation.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -315,6 +315,7 @@ def run_text_generation_genai(input_text, num, model, tokenizer, args, iter_data
315315
if args['infer_count'] is not None:
316316
out_str += 'all max_output_token_size: {} * {}'.format(args['infer_count'], args['batch_size'])
317317
log.info(out_str)
318+
from openvino_genai import GenerationConfig
318319
gen_config = model.get_generation_config() if hasattr(model, 'get_generation_config') else GenerationConfig()
319320
gen_config.max_new_tokens = max_gen_tokens
320321
# llama-3-8b-instruct's generation_config.json has 4096 max_length.

0 commit comments

Comments
 (0)