From ecf4f66e07dd240b4d19b80ecf6e0265ec415fca Mon Sep 17 00:00:00 2001 From: "Cui, Yifeng" Date: Sun, 9 Nov 2025 23:15:28 -0800 Subject: [PATCH 1/2] Remove work-around used for 2025.2 --- src/ATen/native/xpu/mkl/BatchLinearAlgebra.cpp | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/src/ATen/native/xpu/mkl/BatchLinearAlgebra.cpp b/src/ATen/native/xpu/mkl/BatchLinearAlgebra.cpp index 26e80fa4d0..9abad5565c 100644 --- a/src/ATen/native/xpu/mkl/BatchLinearAlgebra.cpp +++ b/src/ATen/native/xpu/mkl/BatchLinearAlgebra.cpp @@ -44,16 +44,6 @@ void error_handle( auto errs = be.exceptions(); auto ids = be.ids(); - if (!errs.size()) { - TORCH_WARN( - "Caught lapack exception:\nWhat: ", be.what(), "\nInfo: ", be.info()); - for (auto& i : ids) { - TORCH_WARN("Error in matrix #", i); - info_cpu[i] = 1; - } - return; - } - for (size_t i = 0; i < errs.size(); ++i) { try { std::rethrow_exception(errs[i]); @@ -65,10 +55,10 @@ void error_handle( e.info(), "\nDetail: ", e.detail()); - info_cpu[i] = e.info(); + info_cpu[ids[i]] = e.info(); } catch (const sycl::exception& e) { TORCH_WARN("Caught SYCL exception:\nWhat: ", e.what(), "\nInfo: -1"); - info_cpu[i] = -1; + info_cpu[ids[i]] = -1; } } } From 3066ba28c4b27ff26a6787b51652b4fb740751cc Mon Sep 17 00:00:00 2001 From: "Cui, Yifeng" Date: Mon, 10 Nov 2025 04:53:05 -0800 Subject: [PATCH 2/2] Reactivate skipped test cases --- test/xpu/skip_list_common.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index c9e1c248d6..c6b0c699de 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -290,15 +290,6 @@ "test_scaled_gemm_offline_tunableop_xpu_float8_e5m2fnuz", # case need to port for xpu "test_gemm_bias_offline_tunableop_xpu_bfloat16", - # Exception is temporarily unavailable due to regression in oneMKL - "test_inv_errors_and_warnings_xpu_float32", - "test_inv_errors_and_warnings_xpu_float64", - "test_inverse_errors_large_xpu_float32", - "test_inverse_errors_large_xpu_float64", - "test_inverse_errors_xpu_float32", - "test_inverse_errors_xpu_float64", - "test_inv_ex_singular_xpu_float32", - "test_inv_ex_singular_xpu_float64", ), "test_ops_fwd_gradients_xpu.py": ( # All of the followings are oneDNN issues