diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/requirements_GPU.txt b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/requirements_GPU.txt index 3d5e737d101..fe091ca0ef0 100644 --- a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/requirements_GPU.txt +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/requirements_GPU.txt @@ -11,5 +11,5 @@ bitsandbytes #baichuan transformers_stream_generator tiktoken #qwen einops #qwen -autoround +auto_round lm-eval==0.4.3 diff --git a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/requirements_cpu_woq.txt b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/requirements_cpu_woq.txt index d260c5e3b8b..63f7c30d695 100644 --- a/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/requirements_cpu_woq.txt +++ b/examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/transformers/weight_only/text-generation/requirements_cpu_woq.txt @@ -10,6 +10,6 @@ bitsandbytes #baichuan transformers_stream_generator tiktoken #qwen einops #qwen -autoround +auto_round lm-eval==0.4.3 huggingface_hub