We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0ecfe56 commit 68f1a43Copy full SHA for 68f1a43
vllm/worker/hpu_worker.py
@@ -143,7 +143,7 @@ def execute_model(
143
'VLLM_HPU_LOG_STEP_CPU_FALLBACKS_ALL', '0') != '0'
144
log_cpu_fallbacks = os.environ.get('VLLM_HPU_LOG_STEP_CPU_FALLBACKS',
145
'0') != '0' or log_cpu_fallbacks_all
146
- if log_graph_compilation or log_cpu_fallbacks and \
+ if (log_graph_compilation or log_cpu_fallbacks) and \
147
execute_model_req is not None:
148
from habana_frameworks.torch.hpu.metrics import metric_localcontext
149
seq_group_metadata_list = execute_model_req.seq_group_metadata_list
0 commit comments