skip_dict = {
    "test_ops_xpu.py": (
        # Skip list of base line
        # XPU implementation doesn't claimn FP8 now
        # https://github.com/intel/torch-xpu-ops/issues/461
        "float8",
        # workarounds for the following tests
        # https://github.com/intel/torch-xpu-ops/issues/1214
        "test_python_ref__refs_exp_xpu_complex128",
        "test_python_ref__refs_sigmoid_xpu_complex128",
        "test_python_ref_executor__refs_log2_executor_aten_xpu_complex128",
        "test_python_ref_executor__refs_exp_executor_aten_xpu_complex128",
        "test_python_ref_torch_fallback__refs_log2_xpu_complex128",
        "test_python_ref_torch_fallback__refs_log10_xpu_complex128",
        "test_python_ref_torch_fallback__refs_sigmoid_xpu_complex128",
        "test_python_ref_executor__refs_log10_executor_aten_xpu_complex128",
        "test_noncontiguous_samples_histogram_xpu_float32",
        # TODO: Fix the following tests
        "test_out_warning_torch__scaled_mm_xpu",
        # To be removed from this file.
        # CUDA and XPU both XFAIL now.
        "test_out_narrow_copy_xpu_float32",
        # This case is marked as skip but XPU failed. However, CUDA and XPU throw the same runtime error.
        "test_out_histc_xpu_float32",
        # Data type is not supported in oneDNN!
        "test_dtypes_nn_functional_conv1d_xpu",
        "test_dtypes_nn_functional_conv2d_xpu",
        "test_dtypes_nn_functional_conv3d_xpu",
        "test_dtypes_nn_functional_conv_transpose1d_xpu",
        "test_dtypes_nn_functional_conv_transpose2d_xpu",
        "test_dtypes_nn_functional_conv_transpose3d_xpu",
        # AssertionError: The supported dtypes for nn.functional.softsign on device type xpu are incorrect!
        "test_dtypes_nn_functional_softsign_xpu",
        # AssertionError: The supported dtypes for sparse.sampled_addmm on device type xpu are incorrect! - OPs not supported
        "test_dtypes_sparse_sampled_addmm_xpu",
        # OPs not supported
        "test_errors_dot_xpu",
        "test_errors_vdot_xpu",
        # Linalg OPs not supported
        "test_noncontiguous_samples_linalg_det_xpu_float32",
        "test_noncontiguous_samples_linalg_slogdet_xpu_float32",
        "test_noncontiguous_samples_linalg_solve_ex_xpu_float32",
        "test_noncontiguous_samples_linalg_solve_xpu_float32",
        "test_noncontiguous_samples_linalg_tensorsolve_xpu_float32",
        "test_noncontiguous_samples_logdet_xpu_float32",
        # Sparse CSR OPs not supported
        # RuntimeError: device type of values (xpu) must be CPU or CUDA or Meta
        # https://github.com/intel/torch-xpu-ops/issues/357
        "test_compare_cpu_sparse_sampled_addmm_xpu_float32",
        "test_errors_sparse_mul_layout0_xpu",
        "test_errors_sparse_mul_layout1_xpu",
        "test_errors_sparse_mul_layout2_xpu",
        "test_errors_sparse_mul_layout3_xpu",
        "test_out_requires_grad_error_sparse_sampled_addmm_xpu_complex64",
        "test_out_requires_grad_error_sparse_sampled_addmm_xpu_float32",
        # OneDNN issues, https://github.com/intel/torch-xpu-ops/issues/253
        # RuntimeError: Long is not supported in oneDNN!
        # RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive
        # RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_noncontiguous_samples_nn_functional_conv3d_xpu_int64",
        "test_noncontiguous_samples_nn_functional_conv_transpose1d_xpu_int64",
        "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_complex64",
        "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_float32",
        "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_int64",
        "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_complex64",
        "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_float32",
        "test_noncontiguous_samples_nn_functional_conv_transpose3d_xpu_int64",
        "test_noncontiguous_samples_nn_functional_conv1d_xpu_int64",
        "test_noncontiguous_samples_nn_functional_conv2d_xpu_int64",
        # Linalg OPs not supported
        # RuntimeError: mode only supports CPU AND CUDA device type, got: xpu
        # Issue https://github.com/intel/torch-xpu-ops/issues/327
        "test_numpy_ref_linalg_tensorinv_xpu_float64",
        # RuntimeError: could not create a primitive descriptor for a deconvolution
        # https://github.com/intel/torch-xpu-ops/issues/253
        "test_variant_consistency_eager_nn_functional_conv_transpose2d_xpu_complex64",
        "test_variant_consistency_eager_nn_functional_conv_transpose2d_xpu_float32",
        "test_variant_consistency_eager_nn_functional_conv_transpose3d_xpu_complex64",
        "test_variant_consistency_eager_nn_functional_conv_transpose3d_xpu_float32",
        # Linalg OPs not supported
        "test_compare_cpu_linalg_lu_factor_ex_xpu_float32",
        "test_compare_cpu_linalg_lu_factor_xpu_float32",
        "test_compare_cpu_linalg_lu_xpu_float32",
        # XPU hang. CUDA hang as well.
        # https://github.com/pytorch/pytorch/issues/79528
        "test_compare_cpu_special_hermite_polynomial_h_xpu_float32",
        # XFAIL of CUDA and XPU, unexpected success in fallback
        # Linalg OPs not supported
        "test_out_cholesky_inverse_xpu_float32",
        "test_out_geqrf_xpu_float32",
        "test_out_ormqr_xpu_float32",
        # XFAIL of CUDA, XPU got unexpected success
        "test_python_ref__refs_div_no_rounding_mode_xpu_complex32",
        "test_python_ref__refs_pow_xpu_complex32",
        "test_python_ref_executor__refs_mul_executor_aten_xpu_complex32",
        "test_python_ref_torch_fallback__refs_div_no_rounding_mode_xpu_complex32",
        "test_python_ref__refs_pow_xpu_complex32",
        "test_python_ref_executor__refs_mul_executor_aten_xpu_complex32",
        "test_python_ref_torch_fallback__refs_div_no_rounding_mode_xpu_complex32",
        "test_python_ref_torch_fallback__refs_pow_xpu_complex32",
        # unexpected success because of cpu fallback
        # Linalg OPs not supported
        "test_out_triangular_solve_xpu_float32",
        # Newly added:
        # Cuda skipped it
        "test_non_standard_bool_values_sort_xpu_bool",  # The implementation aligns with CUDA, RuntimeError: "sort" not implemented for 'Bool'.
        # Cuda XFAIL (stock pytorch commit: e7cf7d0)
        "test_non_standard_bool_values_argsort_xpu_bool",
        # Unexpected success
        "test_python_ref_executor__refs_pow_executor_aten_xpu_complex32",  # Didn't align with CUDA, Unexpected success
        # Unexpected success
        # "test_errors_histogramdd_xpu", #XFAIL now
        # Jiterator is only supported on CUDA and ROCm GPUs, none are available.
        # https://github.com/intel/torch-xpu-ops/issues/584
        "_jiterator_",
        # https://github.com/intel/torch-xpu-ops/issues/157
        # Segfault:
        "test_dtypes_nn_functional_multi_head_attention_forward_xpu",  # https://github.com/intel/torch-xpu-ops/issues/157
        # Linalg OPs not supported
        "test_dtypes_pca_lowrank_xpu",  # https://github.com/intel/torch-xpu-ops/issues/157
        "test_dtypes_svd_lowrank_xpu",  # https://github.com/intel/torch-xpu-ops/issues/157
        # RuntimeError: Long is not supported in oneDNN!
        "test_noncontiguous_samples_nn_functional_linear_xpu_int64",  # https://github.com/intel/torch-xpu-ops/issues/157
        # https://github.com/intel/torch-xpu-ops/issues/157
        # Datatype not supported in oneDNN
        "test_dtypes_addmm_decomposed_xpu",
        "test_dtypes_addmm_xpu",
        "test_dtypes_addmv_xpu",
        "test_dtypes_addr_xpu",
        "test_dtypes_baddbmm_xpu",
        "test_dtypes_cholesky_inverse_xpu",
        "test_dtypes_cholesky_solve_xpu",
        "test_dtypes_cholesky_xpu",
        "test_dtypes_corrcoef_xpu",
        "test_dtypes_cov_xpu",
        "test_dtypes_linalg_cholesky_ex_xpu",
        "test_dtypes_linalg_cholesky_xpu",
        "test_dtypes_linalg_cond_xpu",
        "test_dtypes_linalg_det_singular_xpu",
        "test_dtypes_linalg_det_xpu",
        "test_dtypes_linalg_eig_xpu",
        "test_dtypes_linalg_eigh_xpu",
        "test_dtypes_linalg_eigvals_xpu",
        "test_dtypes_linalg_eigvalsh_xpu",
        "test_dtypes_linalg_inv_ex_xpu",
        "test_dtypes_linalg_inv_xpu",
        "test_dtypes_linalg_ldl_factor_ex_xpu",
        "test_dtypes_linalg_ldl_factor_xpu",
        "test_dtypes_linalg_ldl_solve_xpu",
        "test_dtypes_linalg_lstsq_grad_oriented_xpu",
        "test_dtypes_linalg_lstsq_xpu",
        "test_dtypes_linalg_lu_factor_ex_xpu",
        "test_dtypes_linalg_lu_factor_xpu",
        "test_dtypes_linalg_lu_solve_xpu",
        "test_dtypes_linalg_lu_xpu",
        "test_dtypes_linalg_matrix_power_xpu",
        "test_dtypes_linalg_matrix_rank_hermitian_xpu",
        "test_dtypes_linalg_matrix_rank_xpu",
        "test_dtypes_linalg_pinv_hermitian_xpu",
        "test_dtypes_linalg_pinv_xpu",
        "test_dtypes_linalg_qr_xpu",
        "test_dtypes_linalg_slogdet_xpu",
        "test_dtypes_linalg_solve_ex_xpu",
        "test_dtypes_linalg_solve_xpu",
        "test_dtypes_linalg_svd_xpu",
        "test_dtypes_linalg_tensorinv_xpu",
        "test_dtypes_linalg_tensorsolve_xpu",
        "test_dtypes_logdet_xpu",
        "test_dtypes_lu_solve_xpu",
        "test_dtypes_lu_xpu",
        "test_dtypes_mv_xpu",
        "test_dtypes_nn_functional_scaled_dot_product_attention_xpu",
        "test_dtypes_norm_nuc_xpu",
        "test_dtypes_pinverse_xpu",
        "test_dtypes_qr_xpu",
        "test_dtypes_svd_xpu",
        "test_dtypes_tensordot_xpu",
        "test_dtypes_triangular_solve_xpu",
        "test_noncontiguous_samples___rmatmul___xpu_complex64",
        "test_noncontiguous_samples___rmatmul___xpu_int64",
        "test_noncontiguous_samples_addbmm_xpu_complex64",
        "test_noncontiguous_samples_addbmm_xpu_float32",
        "test_noncontiguous_samples_addbmm_xpu_int64",
        "test_noncontiguous_samples_addmm_decomposed_xpu_complex64",
        "test_noncontiguous_samples_addmm_decomposed_xpu_int64",
        "test_noncontiguous_samples_addmm_xpu_complex64",
        "test_noncontiguous_samples_addmm_xpu_float32",
        "test_noncontiguous_samples_addmm_xpu_int64",
        "test_noncontiguous_samples_addmv_xpu_complex64",
        "test_noncontiguous_samples_addmv_xpu_float32",
        "test_noncontiguous_samples_addmv_xpu_int64",
        "test_noncontiguous_samples_addr_xpu_complex64",
        "test_noncontiguous_samples_baddbmm_xpu_complex64",
        "test_noncontiguous_samples_baddbmm_xpu_int64",
        "test_noncontiguous_samples_bmm_xpu_complex64",
        "test_noncontiguous_samples_bmm_xpu_int64",
        "test_noncontiguous_samples_cholesky_inverse_xpu_complex64",
        "test_noncontiguous_samples_cholesky_solve_xpu_complex64",
        "test_noncontiguous_samples_cholesky_xpu_complex64",
        "test_noncontiguous_samples_corrcoef_xpu_complex64",
        "test_noncontiguous_samples_cov_xpu_complex64",
        "test_noncontiguous_samples_einsum_xpu_complex64",
        "test_noncontiguous_samples_einsum_xpu_int64",
        "test_noncontiguous_samples_geqrf_xpu_complex64",
        "test_noncontiguous_samples_inner_xpu_complex64",
        "test_noncontiguous_samples_inner_xpu_int64",
        "test_noncontiguous_samples_linalg_cholesky_ex_xpu_complex64",
        "test_noncontiguous_samples_linalg_cholesky_xpu_complex64",
        "test_noncontiguous_samples_linalg_cond_xpu_complex64",
        "test_noncontiguous_samples_linalg_det_xpu_complex64",
        "test_noncontiguous_samples_linalg_eig_xpu_complex64",
        "test_noncontiguous_samples_linalg_eig_xpu_float32",
        "test_noncontiguous_samples_linalg_eigh_xpu_complex64",
        "test_noncontiguous_samples_linalg_eigvals_xpu_complex64",
        "test_noncontiguous_samples_linalg_eigvalsh_xpu_complex64",
        "test_noncontiguous_samples_linalg_householder_product_xpu_complex64",
        "test_noncontiguous_samples_linalg_inv_ex_xpu_complex64",
        "test_noncontiguous_samples_linalg_inv_xpu_complex64",
        "test_noncontiguous_samples_linalg_ldl_factor_ex_xpu_complex64",
        "test_noncontiguous_samples_linalg_ldl_factor_xpu_complex64",
        "test_noncontiguous_samples_linalg_ldl_solve_xpu_complex64",
        "test_noncontiguous_samples_linalg_lstsq_grad_oriented_xpu_complex64",
        "test_noncontiguous_samples_linalg_lstsq_xpu_complex64",
        "test_noncontiguous_samples_linalg_lu_factor_ex_xpu_complex64",
        "test_noncontiguous_samples_linalg_lu_factor_xpu_complex64",
        "test_noncontiguous_samples_linalg_lu_solve_xpu_complex64",
        "test_noncontiguous_samples_linalg_lu_xpu_complex64",
        "test_noncontiguous_samples_linalg_matrix_norm_xpu_complex64",
        "test_noncontiguous_samples_linalg_matrix_power_xpu_complex64",
        "test_noncontiguous_samples_linalg_matrix_rank_hermitian_xpu_complex64",
        "test_noncontiguous_samples_linalg_matrix_rank_xpu_complex64",
        "test_noncontiguous_samples_linalg_norm_subgradients_at_zero_xpu_complex64",
        "test_noncontiguous_samples_linalg_norm_xpu_complex64",
        "test_noncontiguous_samples_linalg_pinv_hermitian_xpu_complex64",
        "test_noncontiguous_samples_linalg_pinv_singular_xpu_complex64",
        "test_noncontiguous_samples_linalg_pinv_xpu_complex64",
        "test_noncontiguous_samples_linalg_qr_xpu_complex64",
        "test_noncontiguous_samples_linalg_slogdet_xpu_complex64",
        "test_noncontiguous_samples_linalg_solve_ex_xpu_complex64",
        "test_noncontiguous_samples_linalg_solve_triangular_xpu_complex64",
        "test_noncontiguous_samples_linalg_solve_xpu_complex64",
        "test_noncontiguous_samples_linalg_svd_xpu_complex64",
        "test_noncontiguous_samples_linalg_svdvals_xpu_complex64",
        "test_noncontiguous_samples_linalg_tensorinv_xpu_complex64",
        "test_noncontiguous_samples_linalg_tensorsolve_xpu_complex64",
        "test_noncontiguous_samples_logdet_xpu_complex64",
        "test_noncontiguous_samples_lu_solve_xpu_complex64",
        "test_noncontiguous_samples_lu_xpu_complex64",
        "test_noncontiguous_samples_matmul_xpu_complex64",
        "test_noncontiguous_samples_matmul_xpu_int64",
        "test_noncontiguous_samples_mm_xpu_complex64",
        "test_noncontiguous_samples_mm_xpu_int64",
        "test_noncontiguous_samples_mv_xpu_complex64",
        "test_noncontiguous_samples_mv_xpu_int64",
        "test_noncontiguous_samples_nn_functional_bilinear_xpu_int64",
        "test_noncontiguous_samples_nn_functional_linear_xpu_complex64",
        "test_noncontiguous_samples_norm_nuc_xpu_complex64",
        "test_noncontiguous_samples_ormqr_xpu_complex64",
        "test_noncontiguous_samples_pinverse_xpu_complex64",
        "test_noncontiguous_samples_qr_xpu_complex64",
        "test_noncontiguous_samples_svd_xpu_complex64",
        "test_noncontiguous_samples_tensordot_xpu_complex64",
        "test_noncontiguous_samples_tensordot_xpu_int64",
        "test_noncontiguous_samples_triangular_solve_xpu_complex64",
        "test_numpy_ref_addbmm_xpu_complex128",
        "test_numpy_ref_addbmm_xpu_float64",
        "test_numpy_ref_addbmm_xpu_int64",
        "test_numpy_ref_linalg_tensorinv_xpu_complex128",
        "test_out_addbmm_xpu_float32",
        "test_out_addmm_xpu_float32",
        "test_out_addmv_xpu_float32",
        "test_out_baddbmm_xpu_float32",
        "test_out_mm_xpu_float32",
        "test_out_mv_xpu_float32",
        "test_out_requires_grad_error_addbmm_xpu_complex64",
        "test_out_requires_grad_error_addmm_decomposed_xpu_complex64",
        "test_out_requires_grad_error_addmm_xpu_complex64",
        "test_out_requires_grad_error_addmv_xpu_complex64",
        "test_out_requires_grad_error_baddbmm_xpu_complex64",
        "test_out_requires_grad_error_bmm_xpu_complex64",
        "test_out_requires_grad_error_cholesky_inverse_xpu_complex64",
        "test_out_requires_grad_error_cholesky_solve_xpu_complex64",
        "test_out_requires_grad_error_cholesky_xpu_complex64",
        "test_out_requires_grad_error_inner_xpu_complex64",
        "test_out_requires_grad_error_linalg_cholesky_ex_xpu_complex64",
        "test_out_requires_grad_error_linalg_cholesky_xpu_complex64",
        "test_out_requires_grad_error_linalg_det_singular_xpu_complex64",
        "test_out_requires_grad_error_linalg_eig_xpu_complex64",
        "test_out_requires_grad_error_linalg_eigh_xpu_complex64",
        "test_out_requires_grad_error_linalg_eigvals_xpu_complex64",
        "test_out_requires_grad_error_linalg_eigvalsh_xpu_complex64",
        "test_out_requires_grad_error_linalg_inv_ex_xpu_complex64",
        "test_out_requires_grad_error_linalg_inv_xpu_complex64",
        "test_out_requires_grad_error_linalg_lstsq_xpu_complex64",
        "test_out_requires_grad_error_linalg_lu_factor_xpu_complex64",
        "test_out_requires_grad_error_linalg_lu_solve_xpu_complex64",
        "test_out_requires_grad_error_linalg_multi_dot_xpu_complex64",
        "test_out_requires_grad_error_linalg_pinv_hermitian_xpu_complex64",
        "test_out_requires_grad_error_linalg_pinv_xpu_complex64",
        "test_out_requires_grad_error_linalg_qr_xpu_complex64",
        "test_out_requires_grad_error_linalg_solve_ex_xpu_complex64",
        "test_out_requires_grad_error_linalg_solve_xpu_complex64",
        "test_out_requires_grad_error_linalg_tensorinv_xpu_complex64",
        "test_out_requires_grad_error_lu_solve_xpu_complex64",
        "test_out_requires_grad_error_lu_xpu_complex64",
        "test_out_requires_grad_error_mm_xpu_complex64",
        "test_out_requires_grad_error_mv_xpu_complex64",
        "test_out_requires_grad_error_nn_functional_linear_xpu_complex64",
        "test_out_requires_grad_error_qr_xpu_complex64",
        "test_out_requires_grad_error_tensordot_xpu_complex64",
        "test_out_requires_grad_error_triangular_solve_xpu_complex64",
        "test_out_warning_addmm_decomposed_xpu",
        "test_out_warning_addmm_xpu",
        "test_out_warning_addmv_xpu",
        "test_out_warning_baddbmm_xpu",
        "test_out_warning_bmm_xpu",
        "test_out_warning_matmul_xpu",
        "test_out_warning_mm_xpu",
        "test_out_warning_mv_xpu",
        "test_out_warning_nn_functional_linear_xpu",
        "test_python_ref__refs_linalg_svd_xpu_complex128",
        "test_python_ref__refs_linalg_svd_xpu_complex64",
        "test_python_ref__refs_linalg_svd_xpu_float64",
        "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex128",
        "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_complex64",
        "test_python_ref_executor__refs_linalg_svd_executor_aten_xpu_float64",
        "test_python_ref_executor__refs_nn_functional_pdist_executor_aten_xpu_float64",
        "test_python_ref_meta__refs_linalg_svd_xpu_complex128",
        "test_python_ref_meta__refs_linalg_svd_xpu_complex64",
        "test_python_ref_meta__refs_linalg_svd_xpu_float64",
        "test_python_ref_meta__refs_nn_functional_pdist_xpu_float64",
        "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex128",
        "test_python_ref_torch_fallback__refs_linalg_svd_xpu_complex64",
        "test_python_ref_torch_fallback__refs_linalg_svd_xpu_float64",
        "test_python_ref_torch_fallback__refs_nn_functional_pdist_xpu_float64",
        "test_variant_consistency_eager___rmatmul___xpu_complex64",
        "test_variant_consistency_eager_addmm_decomposed_xpu_complex64",
        "test_variant_consistency_eager_addmm_xpu_complex64",
        "test_variant_consistency_eager_addmm_xpu_float32",
        "test_variant_consistency_eager_addmv_xpu_complex64",
        "test_variant_consistency_eager_addmv_xpu_float32",
        "test_variant_consistency_eager_baddbmm_xpu_complex64",
        "test_variant_consistency_eager_baddbmm_xpu_float32",
        "test_variant_consistency_eager_bmm_xpu_complex64",
        "test_variant_consistency_eager_cholesky_inverse_xpu_complex64",
        "test_variant_consistency_eager_cholesky_solve_xpu_complex64",
        "test_variant_consistency_eager_cholesky_xpu_complex64",
        "test_variant_consistency_eager_corrcoef_xpu_complex64",
        "test_variant_consistency_eager_cov_xpu_complex64",
        "test_variant_consistency_eager_einsum_xpu_complex64",
        "test_variant_consistency_eager_geqrf_xpu_complex64",
        "test_variant_consistency_eager_inner_xpu_complex64",
        "test_variant_consistency_eager_linalg_cholesky_ex_xpu_complex64",
        "test_variant_consistency_eager_linalg_cholesky_xpu_complex64",
        "test_variant_consistency_eager_linalg_cond_xpu_complex64",
        "test_variant_consistency_eager_linalg_det_singular_xpu_complex64",
        "test_variant_consistency_eager_linalg_det_xpu_complex64",
        "test_variant_consistency_eager_linalg_eig_xpu_complex64",
        "test_variant_consistency_eager_linalg_eigh_xpu_complex64",
        "test_variant_consistency_eager_linalg_eigvals_xpu_complex64",
        "test_variant_consistency_eager_linalg_eigvalsh_xpu_complex64",
        "test_variant_consistency_eager_linalg_householder_product_xpu_complex64",
        "test_variant_consistency_eager_linalg_inv_ex_xpu_complex64",
        "test_variant_consistency_eager_linalg_inv_xpu_complex64",
        "test_variant_consistency_eager_linalg_ldl_factor_ex_xpu_complex64",
        "test_variant_consistency_eager_linalg_ldl_factor_xpu_complex64",
        "test_variant_consistency_eager_linalg_ldl_solve_xpu_complex64",
        "test_variant_consistency_eager_linalg_lstsq_grad_oriented_xpu_complex64",
        "test_variant_consistency_eager_linalg_lstsq_xpu_complex64",
        "test_variant_consistency_eager_linalg_lu_factor_xpu_complex64",
        "test_variant_consistency_eager_linalg_lu_solve_xpu_complex64",
        "test_variant_consistency_eager_linalg_matrix_norm_xpu_complex64",
        "test_variant_consistency_eager_linalg_matrix_power_xpu_complex64",
        "test_variant_consistency_eager_linalg_matrix_rank_hermitian_xpu_complex64",
        "test_variant_consistency_eager_linalg_matrix_rank_xpu_complex64",
        "test_variant_consistency_eager_linalg_multi_dot_xpu_complex64",
        "test_variant_consistency_eager_linalg_norm_subgradients_at_zero_xpu_complex64",
        "test_variant_consistency_eager_linalg_norm_xpu_complex64",
        "test_variant_consistency_eager_linalg_pinv_hermitian_xpu_complex64",
        "test_variant_consistency_eager_linalg_pinv_singular_xpu_complex64",
        "test_variant_consistency_eager_linalg_pinv_xpu_complex64",
        "test_variant_consistency_eager_linalg_qr_xpu_complex64",
        "test_variant_consistency_eager_linalg_slogdet_xpu_complex64",
        "test_variant_consistency_eager_linalg_solve_ex_xpu_complex64",
        "test_variant_consistency_eager_linalg_solve_triangular_xpu_complex64",
        "test_variant_consistency_eager_linalg_solve_xpu_complex64",
        "test_variant_consistency_eager_linalg_svd_xpu_complex64",
        "test_variant_consistency_eager_linalg_svdvals_xpu_complex64",
        "test_variant_consistency_eager_linalg_tensorinv_xpu_complex64",
        "test_variant_consistency_eager_linalg_tensorsolve_xpu_complex64",
        "test_variant_consistency_eager_logdet_xpu_complex64",
        "test_variant_consistency_eager_lu_solve_xpu_complex64",
        "test_variant_consistency_eager_lu_xpu_complex64",
        "test_variant_consistency_eager_matmul_xpu_complex64",
        "test_variant_consistency_eager_mm_xpu_complex64",
        "test_variant_consistency_eager_mv_xpu_complex64",
        "test_variant_consistency_eager_nn_functional_linear_xpu_complex64",
        "test_variant_consistency_eager_norm_nuc_xpu_complex64",
        "test_variant_consistency_eager_ormqr_xpu_complex64",
        "test_variant_consistency_eager_pinverse_xpu_complex64",
        "test_variant_consistency_eager_qr_xpu_complex64",
        "test_variant_consistency_eager_svd_xpu_complex64",
        "test_variant_consistency_eager_tensordot_xpu_complex64",
        "test_variant_consistency_eager_triangular_solve_xpu_complex64",
        # oneDNN issues
        # RuntimeError: value cannot be converted to type float without overflow
        # https://github.com/intel/torch-xpu-ops/issues/683
        "test_conj_view_addbmm_xpu_complex64",
        "test_neg_conj_view_addbmm_xpu_complex128",
        ### Error #0 in TestMathBitsXPU , RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        # https://github.com/intel/torch-xpu-ops/issues/254
        "test_conj_view___rmatmul___xpu_complex64",
        "test_conj_view__refs_linalg_svd_xpu_complex64",
        "test_conj_view_addmm_decomposed_xpu_complex64",
        "test_conj_view_addmm_xpu_complex64",
        "test_conj_view_addmv_xpu_complex64",
        "test_conj_view_addr_xpu_complex64",
        "test_conj_view_baddbmm_xpu_complex64",
        "test_conj_view_bmm_xpu_complex64",
        "test_conj_view_cholesky_inverse_xpu_complex64",
        "test_conj_view_cholesky_solve_xpu_complex64",
        "test_conj_view_cholesky_xpu_complex64",
        "test_conj_view_corrcoef_xpu_complex64",
        "test_conj_view_cov_xpu_complex64",
        "test_conj_view_einsum_xpu_complex64",
        "test_conj_view_geqrf_xpu_complex64",
        "test_conj_view_inner_xpu_complex64",
        "test_conj_view_linalg_cholesky_ex_xpu_complex64",
        "test_conj_view_linalg_cholesky_xpu_complex64",
        "test_conj_view_linalg_cond_xpu_complex64",
        "test_conj_view_linalg_det_singular_xpu_complex64",
        "test_conj_view_linalg_det_xpu_complex64",
        "test_conj_view_linalg_eig_xpu_complex64",
        "test_conj_view_linalg_eigh_xpu_complex64",
        "test_conj_view_linalg_eigvals_xpu_complex64",
        "test_conj_view_linalg_eigvalsh_xpu_complex64",
        "test_conj_view_linalg_householder_product_xpu_complex64",
        "test_conj_view_linalg_inv_ex_xpu_complex64",
        "test_conj_view_linalg_inv_xpu_complex64",
        "test_conj_view_linalg_ldl_factor_ex_xpu_complex64",
        "test_conj_view_linalg_ldl_factor_xpu_complex64",
        "test_conj_view_linalg_ldl_solve_xpu_complex64",
        "test_conj_view_linalg_lstsq_grad_oriented_xpu_complex64",
        "test_conj_view_linalg_lstsq_xpu_complex64",
        "test_conj_view_linalg_lu_factor_xpu_complex64",
        "test_conj_view_linalg_lu_solve_xpu_complex64",
        "test_conj_view_linalg_matrix_norm_xpu_complex64",
        "test_conj_view_linalg_matrix_power_xpu_complex64",
        "test_conj_view_linalg_matrix_rank_hermitian_xpu_complex64",
        "test_conj_view_linalg_matrix_rank_xpu_complex64",
        "test_conj_view_linalg_multi_dot_xpu_complex64",
        "test_conj_view_linalg_norm_subgradients_at_zero_xpu_complex64",
        "test_conj_view_linalg_norm_xpu_complex64",
        "test_conj_view_linalg_pinv_hermitian_xpu_complex64",
        "test_conj_view_linalg_pinv_singular_xpu_complex64",
        "test_conj_view_linalg_pinv_xpu_complex64",
        "test_conj_view_linalg_qr_xpu_complex64",
        "test_conj_view_linalg_slogdet_xpu_complex64",
        "test_conj_view_linalg_solve_ex_xpu_complex64",
        "test_conj_view_linalg_solve_triangular_xpu_complex64",
        "test_conj_view_linalg_solve_xpu_complex64",
        "test_conj_view_linalg_svd_xpu_complex64",
        "test_conj_view_linalg_svdvals_xpu_complex64",
        "test_conj_view_linalg_tensorinv_xpu_complex64",
        "test_conj_view_linalg_tensorsolve_xpu_complex64",
        "test_conj_view_logdet_xpu_complex64",
        "test_conj_view_lu_solve_xpu_complex64",
        "test_conj_view_lu_xpu_complex64",
        "test_conj_view_matmul_xpu_complex64",
        "test_conj_view_mm_xpu_complex64",
        "test_conj_view_mv_xpu_complex64",
        "test_conj_view_nn_functional_linear_xpu_complex64",
        "test_conj_view_norm_nuc_xpu_complex64",
        "test_conj_view_ormqr_xpu_complex64",
        "test_conj_view_pinverse_xpu_complex64",
        "test_conj_view_qr_xpu_complex64",
        "test_conj_view_svd_xpu_complex64",
        "test_conj_view_tensordot_xpu_complex64",
        "test_conj_view_triangular_solve_xpu_complex64",
        "test_neg_conj_view_addmm_decomposed_xpu_complex128",
        "test_neg_conj_view_addmm_xpu_complex128",
        "test_neg_conj_view_addmv_xpu_complex128",
        "test_neg_conj_view_addr_xpu_complex128",
        "test_neg_conj_view_baddbmm_xpu_complex128",
        "test_neg_conj_view_bmm_xpu_complex128",
        "test_neg_conj_view_cholesky_inverse_xpu_complex128",
        "test_neg_conj_view_cholesky_solve_xpu_complex128",
        "test_neg_conj_view_cholesky_xpu_complex128",
        "test_neg_conj_view_corrcoef_xpu_complex128",
        "test_neg_conj_view_cov_xpu_complex128",
        "test_neg_conj_view_geqrf_xpu_complex128",
        "test_neg_conj_view_inner_xpu_complex128",
        "test_neg_conj_view_linalg_cholesky_ex_xpu_complex128",
        "test_neg_conj_view_linalg_cholesky_xpu_complex128",
        "test_neg_conj_view_linalg_cond_xpu_complex128",
        "test_neg_conj_view_linalg_det_singular_xpu_complex128",
        "test_neg_conj_view_linalg_eig_xpu_complex128",
        "test_neg_conj_view_linalg_eigh_xpu_complex128",
        "test_neg_conj_view_linalg_eigvals_xpu_complex128",
        "test_neg_conj_view_linalg_eigvalsh_xpu_complex128",
        "test_neg_conj_view_linalg_householder_product_xpu_complex128",
        "test_neg_conj_view_linalg_inv_ex_xpu_complex128",
        "test_neg_conj_view_linalg_inv_xpu_complex128",
        "test_neg_conj_view_linalg_ldl_factor_ex_xpu_complex128",
        "test_neg_conj_view_linalg_ldl_factor_xpu_complex128",
        "test_neg_conj_view_linalg_ldl_solve_xpu_complex128",
        "test_neg_conj_view_linalg_lstsq_grad_oriented_xpu_complex128",
        "test_neg_conj_view_linalg_lstsq_xpu_complex128",
        "test_neg_conj_view_linalg_lu_factor_xpu_complex128",
        "test_neg_conj_view_linalg_lu_solve_xpu_complex128",
        "test_neg_conj_view_linalg_matrix_rank_hermitian_xpu_complex128",
        "test_neg_conj_view_linalg_matrix_rank_xpu_complex128",
        "test_neg_conj_view_linalg_multi_dot_xpu_complex128",
        "test_neg_conj_view_linalg_pinv_hermitian_xpu_complex128",
        "test_neg_conj_view_linalg_pinv_singular_xpu_complex128",
        "test_neg_conj_view_linalg_pinv_xpu_complex128",
        "test_neg_conj_view_linalg_qr_xpu_complex128",
        "test_neg_conj_view_linalg_solve_ex_xpu_complex128",
        "test_neg_conj_view_linalg_solve_triangular_xpu_complex128",
        "test_neg_conj_view_linalg_solve_xpu_complex128",
        "test_neg_conj_view_linalg_svdvals_xpu_complex128",
        "test_neg_conj_view_linalg_tensorinv_xpu_complex128",
        "test_neg_conj_view_linalg_tensorsolve_xpu_complex128",
        "test_neg_conj_view_lu_solve_xpu_complex128",
        "test_neg_conj_view_lu_xpu_complex128",
        "test_neg_conj_view_mm_xpu_complex128",
        "test_neg_conj_view_mv_xpu_complex128",
        "test_neg_conj_view_nn_functional_linear_xpu_complex128",
        "test_neg_conj_view_norm_nuc_xpu_complex128",
        "test_neg_conj_view_ormqr_xpu_complex128",
        "test_neg_conj_view_pinverse_xpu_complex128",
        "test_neg_conj_view_qr_xpu_complex128",
        "test_neg_conj_view_tensordot_xpu_complex128",
        "test_neg_conj_view_triangular_solve_xpu_complex128",
        "test_neg_view___rmatmul___xpu_float64",
        "test_neg_view__refs_linalg_svd_xpu_float64",
        "test_neg_view__refs_nn_functional_pdist_xpu_float64",
        "test_neg_view_addbmm_xpu_float64",
        "test_neg_view_addmm_decomposed_xpu_float64",
        "test_neg_view_addmm_xpu_float64",
        "test_neg_view_addmv_xpu_float64",
        "test_neg_view_addr_xpu_float64",
        "test_neg_view_baddbmm_xpu_float64",
        "test_neg_view_bmm_xpu_float64",
        "test_neg_view_cdist_xpu_float64",
        "test_neg_view_cholesky_inverse_xpu_float64",
        "test_neg_view_cholesky_solve_xpu_float64",
        "test_neg_view_cholesky_xpu_float64",
        "test_neg_view_corrcoef_xpu_float64",
        "test_neg_view_cov_xpu_float64",
        "test_neg_view_einsum_xpu_float64",
        "test_neg_view_geqrf_xpu_float64",
        "test_neg_view_inner_xpu_float64",
        "test_neg_view_linalg_cholesky_ex_xpu_float64",
        "test_neg_view_linalg_cholesky_xpu_float64",
        "test_neg_view_linalg_cond_xpu_float64",
        "test_neg_view_linalg_det_singular_xpu_float64",
        "test_neg_view_linalg_det_xpu_float64",
        "test_neg_view_linalg_eig_xpu_float64",
        "test_neg_view_linalg_eigh_xpu_float64",
        "test_neg_view_linalg_eigvals_xpu_float64",
        "test_neg_view_linalg_eigvalsh_xpu_float64",
        "test_neg_view_linalg_householder_product_xpu_float64",
        "test_neg_view_linalg_inv_ex_xpu_float64",
        "test_neg_view_linalg_inv_xpu_float64",
        "test_neg_view_linalg_ldl_factor_ex_xpu_float64",
        "test_neg_view_linalg_ldl_factor_xpu_float64",
        "test_neg_view_linalg_ldl_solve_xpu_float64",
        "test_neg_view_linalg_lstsq_grad_oriented_xpu_float64",
        "test_neg_view_linalg_lstsq_xpu_float64",
        "test_neg_view_linalg_lu_factor_xpu_float64",
        "test_neg_view_linalg_lu_solve_xpu_float64",
        "test_neg_view_linalg_matrix_norm_xpu_float64",
        "test_neg_view_linalg_matrix_power_xpu_float64",
        "test_neg_view_linalg_matrix_rank_hermitian_xpu_float64",
        "test_neg_view_linalg_matrix_rank_xpu_float64",
        "test_neg_view_linalg_multi_dot_xpu_float64",
        "test_neg_view_linalg_norm_subgradients_at_zero_xpu_float64",
        "test_neg_view_linalg_norm_xpu_float64",
        "test_neg_view_linalg_pinv_hermitian_xpu_float64",
        "test_neg_view_linalg_pinv_singular_xpu_float64",
        "test_neg_view_linalg_pinv_xpu_float64",
        "test_neg_view_linalg_qr_xpu_float64",
        "test_neg_view_linalg_slogdet_xpu_float64",
        "test_neg_view_linalg_solve_ex_xpu_float64",
        "test_neg_view_linalg_solve_triangular_xpu_float64",
        "test_neg_view_linalg_solve_xpu_float64",
        "test_neg_view_linalg_svd_xpu_float64",
        "test_neg_view_linalg_svdvals_xpu_float64",
        "test_neg_view_linalg_tensorinv_xpu_float64",
        "test_neg_view_linalg_tensorsolve_xpu_float64",
        "test_neg_view_logdet_xpu_float64",
        "test_neg_view_lu_solve_xpu_float64",
        "test_neg_view_lu_xpu_float64",
        "test_neg_view_matmul_xpu_float64",
        "test_neg_view_mm_xpu_float64",
        "test_neg_view_mv_xpu_float64",
        "test_neg_view_nn_functional_bilinear_xpu_float64",
        "test_neg_view_nn_functional_linear_xpu_float64",
        "test_neg_view_nn_functional_multi_head_attention_forward_xpu_float64",
        "test_neg_view_nn_functional_scaled_dot_product_attention_xpu_float64",
        "test_neg_view_norm_nuc_xpu_float64",
        "test_neg_view_ormqr_xpu_float64",
        "test_neg_view_pca_lowrank_xpu_float64",
        "test_neg_view_pinverse_xpu_float64",
        "test_neg_view_qr_xpu_float64",
        "test_neg_view_svd_lowrank_xpu_float64",
        "test_neg_view_svd_xpu_float64",
        "test_neg_view_tensordot_xpu_float64",
        "test_neg_view_triangular_solve_xpu_float64",
        "test_noncontiguous_samples_pca_lowrank_xpu_complex64",
        "test_noncontiguous_samples_svd_lowrank_xpu_complex64",
        "test_variant_consistency_eager_pca_lowrank_xpu_complex64",
        "test_variant_consistency_eager_svd_lowrank_xpu_complex64",
        "test_conj_view_pca_lowrank_xpu_complex64",
        "test_conj_view_svd_lowrank_xpu_complex64",
        "test_neg_conj_view_pca_lowrank_xpu_complex128",
        "test_neg_conj_view_svd_lowrank_xpu_complex128",
        # oneDNN issues
        ### Error #1 in TestMathBitsXPU , RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive
        # https://github.com/intel/torch-xpu-ops/issues/253
        "test_conj_view_nn_functional_conv_transpose2d_xpu_complex64",
        "test_conj_view_nn_functional_conv_transpose3d_xpu_complex64",
        "test_neg_view_nn_functional_conv_transpose2d_xpu_float64",
        "test_neg_view_nn_functional_conv_transpose3d_xpu_float64",
        # implemented aten::histogram to align MPS operators coverage, CUDA doesn't support
        # but test_dtypes infrastructure leverage CUDA supported datatypes
        "test_dtypes_histogram_xpu",
        # Unexpected success, CUDA got XFAIL because CUDA does not have historgramadd supported
        "test_errors_histogramdd_xpu",
        # 2025 bundle std::pow complex result is different on host and device
        "test_python_ref__refs_square_xpu_complex64",
        "test_python_ref_torch_fallback__refs_square_xpu_complex64",
        "test_python_ref_torch_fallback__refs_exp_xpu_complex128",
        # Failed on rolling driver, passed on preci
        "test_python_ref__refs_div_trunc_rounding_xpu_float64",
        "test_python_ref_executor__refs_div_trunc_rounding_executor_aten_xpu_float64",
        "test_python_ref_torch_fallback__refs_div_trunc_rounding_xpu_float64",
        # TODO: passed from source code building version, investigate
        "test_python_ref__refs_log2_xpu_complex128",
        # The following dtypes did not work in backward but are listed by the OpInfo: {torch.bfloat16}.
        "test_dtypes_fft_fft2_xpu",
        "test_dtypes_fft_fft_xpu",
        "test_dtypes_fft_fftn_xpu",
        "test_dtypes_fft_hfft2_xpu",
        "test_dtypes_fft_hfft_xpu",
        "test_dtypes_fft_hfftn_xpu",
        "test_dtypes_fft_ifft2_xpu",
        "test_dtypes_fft_ifft_xpu",
        "test_dtypes_fft_ifftn_xpu",
        "test_dtypes_fft_ihfft2_xpu",
        "test_dtypes_fft_ihfft_xpu",
        "test_dtypes_fft_ihfftn_xpu",
        "test_dtypes_fft_irfft2_xpu",
        "test_dtypes_fft_irfft_xpu",
        "test_dtypes_fft_irfftn_xpu",
        "test_dtypes_fft_rfft2_xpu",
        "test_dtypes_fft_rfft_xpu",
        "test_dtypes_fft_rfftn_xpu",
    ),
    "test_binary_ufuncs_xpu.py": (
        "test_fmod_remainder_by_zero_integral_xpu_int64",  # zero division is an undefined behavior: different handles on different backends
        "test_div_rounding_numpy_xpu_float16",  # Calculation error. XPU implementation uses opmath type.
        # AssertionError: Jiterator is only supported on CUDA and ROCm GPUs, none are available.
        "_jiterator_",
        # nextafter: Numeric error due to `std::nextafter` difference between CPU (GCC) and XPU (SYCL)
        # https://github.com/intel/torch-xpu-ops/issues/623
        # AssertionError: Scalars are not equal!
        # Expected 9.183549615799121e-41 but got 0.0.
        # Absolute difference: 9.183549615799121e-41
        # Relative difference: 1.0
        "test_nextafter_bfloat16_xpu_bfloat16",
    ),
    "test_scatter_gather_ops_xpu.py": (
        # AssertionError: Tensor-likes are not equal!
        # Mismatched elements: 2 / 1870 (0.1%)
        # Greatest absolute difference: 2.220446049250313e-16 at index (14, 9, 4)
        # Greatest relative difference: 1.7039539596977877e-16 at index (15, 7, 6)
        "test_scatter_reduce_mean_xpu_float64",
    ),
    "test_autograd_fallback_xpu.py": None,
    "test_sort_and_select_xpu.py": (
        "test_sort_large_slice_xpu",
    ),  # Hard code CUDA, UT has already been rewritten to test/regressions/test_sort.py.
    "nn/test_embedding_xpu.py": (
        # NotImplementedError: Could not run 'aten::_indices' with arguments from the 'SparseXPU' backend.
        "test_embedding_bag_device_xpu_int32_int32_float16",
        "test_embedding_bag_device_xpu_int32_int32_float32",
        "test_embedding_bag_device_xpu_int32_int32_float64",
        "test_embedding_bag_device_xpu_int32_int64_float16",
        "test_embedding_bag_device_xpu_int32_int64_float32",
        "test_embedding_bag_device_xpu_int32_int64_float64",
        "test_embedding_bag_device_xpu_int64_int32_float16",
        "test_embedding_bag_device_xpu_int64_int32_float32",
        "test_embedding_bag_device_xpu_int64_int32_float64",
        "test_embedding_bag_device_xpu_int64_int64_float16",
        "test_embedding_bag_device_xpu_int64_int64_float32",
        "test_embedding_bag_device_xpu_int64_int64_float64",
        # CUDA implementation has no such functionality due to performance consideration.
        # skipped by CUDA for performance
        # @skipCUDAIf(True, "no out-of-bounds check on CUDA for perf.")
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_max_xpu_float32_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_max_xpu_float32_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_max_xpu_float64_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_max_xpu_float64_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_mean_xpu_float32_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_mean_xpu_float32_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_mean_xpu_float64_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_mean_xpu_float64_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_sum_xpu_float32_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_sum_xpu_float32_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_sum_xpu_float64_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx0_mode_sum_xpu_float64_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_max_xpu_float32_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_max_xpu_float32_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_max_xpu_float64_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_max_xpu_float64_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_mean_xpu_float32_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_mean_xpu_float32_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_mean_xpu_float64_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_mean_xpu_float64_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_sum_xpu_float32_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_sum_xpu_float32_int64",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_sum_xpu_float64_int32",
        "test_embedding_bag_out_of_bounds_idx_padding_idx_0_mode_sum_xpu_float64_int64",
    ),
    "test_transformers_xpu.py": (
        # https://github.com/intel/torch-xpu-ops/issues/761
        # AssertionError: False is not true
        # CPU fallback failure. To support aten::transformer_encoder_layer_forward with proper priority.
        "test_disable_fastpath_xpu",
        # We have no mechanism to handle SDPBackend::ERROR so far. Will give a fully support when we support all SDPBackends.
        "test_dispatch_fails_no_backend_xpu",
        # NestedTensorXPU not supported
        # Could not run 'aten::_to_copy' with arguments from the 'NestedTensorXPU' backend
        "test_with_nested_tensor_input_xpu",
        # oneDNN issues
        # Double and complex datatype matmul is not supported in oneDNN
        # https://github.com/intel/torch-xpu-ops/issues/253
        "test_sdp_math_gradcheck_contiguous_inputs_False_xpu",
        "test_sdp_math_gradcheck_contiguous_inputs_True_xpu",
        "test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_True_xpu",
        "test_transformerencoder_batch_first_True_training_True_enable_nested_tensor_False_xpu",
        "test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_True_xpu",
        "test_transformerencoder_batch_first_True_training_False_enable_nested_tensor_False_xpu",
        "test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_True_xpu",
        "test_transformerencoder_batch_first_False_training_True_enable_nested_tensor_False_xpu",
        "test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_True_xpu",
        "test_transformerencoder_batch_first_False_training_False_enable_nested_tensor_False_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_no_attn_mask_dropout_p_0_0_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_4D_causal_attn_mask_dropout_p_0_0_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_4D_attn_mask_dropout_p_0_0_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_4D_input_dim_2D_attn_mask_dropout_p_0_0_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_no_attn_mask_dropout_p_0_0_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_3D_causal_attn_mask_dropout_p_0_0_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_3D_attn_mask_dropout_p_0_0_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_2D_causal_attn_mask_dropout_p_0_0_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_5_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_2_xpu",
        "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_0_xpu",
    ),
    "test_complex_xpu.py": None,
    "test_modules_xpu.py": (
        # oneDNN issues
        # RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_cpu_gpu_parity_nn_Bilinear_xpu_float64",
        "test_cpu_gpu_parity_nn_GRUCell_xpu_float64",
        "test_cpu_gpu_parity_nn_GRU_eval_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_GRU_train_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_LSTMCell_xpu_float64",
        "test_cpu_gpu_parity_nn_LSTM_eval_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_LSTM_train_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_Linear_xpu_float64",
        "test_cpu_gpu_parity_nn_MultiheadAttention_eval_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_MultiheadAttention_train_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_RNNCell_xpu_float64",
        "test_cpu_gpu_parity_nn_RNN_eval_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_RNN_train_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_TransformerDecoderLayer_xpu_float64",
        "test_cpu_gpu_parity_nn_TransformerEncoderLayer_eval_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_TransformerEncoderLayer_train_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_TransformerEncoder_eval_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_TransformerEncoder_train_mode_xpu_float64",
        "test_cpu_gpu_parity_nn_Transformer_xpu_float64",
        "test_forward_nn_Bilinear_xpu_float64",
        "test_forward_nn_GRUCell_xpu_float64",
        "test_forward_nn_GRU_eval_mode_xpu_float64",
        "test_forward_nn_GRU_train_mode_xpu_float64",
        "test_forward_nn_LSTMCell_xpu_float64",
        "test_forward_nn_LSTM_eval_mode_xpu_float64",
        "test_forward_nn_LSTM_train_mode_xpu_float64",
        "test_forward_nn_Linear_xpu_float64",
        "test_forward_nn_MultiheadAttention_eval_mode_xpu_float64",
        "test_forward_nn_MultiheadAttention_train_mode_xpu_float64",
        "test_forward_nn_RNNCell_xpu_float64",
        "test_forward_nn_RNN_eval_mode_xpu_float64",
        "test_forward_nn_RNN_train_mode_xpu_float64",
        "test_forward_nn_TransformerDecoderLayer_xpu_float64",
        "test_forward_nn_TransformerEncoderLayer_eval_mode_xpu_float64",
        "test_forward_nn_TransformerEncoderLayer_train_mode_xpu_float64",
        "test_forward_nn_TransformerEncoder_eval_mode_xpu_float64",
        "test_forward_nn_TransformerEncoder_train_mode_xpu_float64",
        "test_forward_nn_Transformer_xpu_float64",
        "test_grad_nn_Bilinear_xpu_float64",
        "test_grad_nn_GRUCell_xpu_float64",
        "test_grad_nn_GRU_eval_mode_xpu_float64",
        "test_grad_nn_GRU_train_mode_xpu_float64",
        "test_grad_nn_LSTMCell_xpu_float64",
        "test_grad_nn_LSTM_eval_mode_xpu_float64",
        "test_grad_nn_LSTM_train_mode_xpu_float64",
        "test_grad_nn_Linear_xpu_float64",
        "test_grad_nn_MultiheadAttention_eval_mode_xpu_float64",
        "test_grad_nn_MultiheadAttention_train_mode_xpu_float64",
        "test_grad_nn_RNNCell_xpu_float64",
        "test_grad_nn_RNN_eval_mode_xpu_float64",
        "test_grad_nn_RNN_train_mode_xpu_float64",
        "test_grad_nn_TransformerDecoderLayer_xpu_float64",
        "test_grad_nn_TransformerEncoderLayer_eval_mode_xpu_float64",
        "test_grad_nn_TransformerEncoderLayer_train_mode_xpu_float64",
        "test_grad_nn_TransformerEncoder_eval_mode_xpu_float64",
        "test_grad_nn_TransformerEncoder_train_mode_xpu_float64",
        "test_grad_nn_Transformer_xpu_float64",
        "test_gradgrad_nn_Bilinear_xpu_float64",
        "test_gradgrad_nn_GRUCell_xpu_float64",
        "test_gradgrad_nn_GRU_eval_mode_xpu_float64",
        "test_gradgrad_nn_GRU_train_mode_xpu_float64",
        "test_gradgrad_nn_LSTMCell_xpu_float64",
        "test_gradgrad_nn_LSTM_eval_mode_xpu_float64",
        "test_gradgrad_nn_LSTM_train_mode_xpu_float64",
        "test_gradgrad_nn_Linear_xpu_float64",
        "test_gradgrad_nn_MultiheadAttention_eval_mode_xpu_float64",
        "test_gradgrad_nn_MultiheadAttention_train_mode_xpu_float64",
        "test_gradgrad_nn_RNNCell_xpu_float64",
        "test_gradgrad_nn_RNN_eval_mode_xpu_float64",
        "test_gradgrad_nn_RNN_train_mode_xpu_float64",
        "test_gradgrad_nn_TransformerDecoderLayer_xpu_float64",
        "test_gradgrad_nn_TransformerEncoderLayer_eval_mode_xpu_float64",
        "test_gradgrad_nn_TransformerEncoderLayer_train_mode_xpu_float64",
        "test_gradgrad_nn_TransformerEncoder_eval_mode_xpu_float64",
        "test_gradgrad_nn_TransformerEncoder_train_mode_xpu_float64",
        "test_gradgrad_nn_Transformer_xpu_float64",
        "test_if_train_and_eval_modes_differ_nn_Bilinear_xpu_float64",
        "test_if_train_and_eval_modes_differ_nn_GRUCell_xpu_float64",
        "test_if_train_and_eval_modes_differ_nn_LSTMCell_xpu_float64",
        "test_if_train_and_eval_modes_differ_nn_Linear_xpu_float64",
        "test_if_train_and_eval_modes_differ_nn_RNNCell_xpu_float64",
        "test_if_train_and_eval_modes_differ_nn_TransformerDecoderLayer_xpu_float64",
        "test_if_train_and_eval_modes_differ_nn_TransformerEncoderLayer_xpu_float64",
        "test_if_train_and_eval_modes_differ_nn_TransformerEncoder_xpu_float64",
        "test_if_train_and_eval_modes_differ_nn_Transformer_xpu_float64",
        "test_memory_format_nn_GRUCell_xpu_float64",
        "test_memory_format_nn_GRU_eval_mode_xpu_float64",
        "test_memory_format_nn_GRU_train_mode_xpu_float64",
        "test_memory_format_nn_LSTMCell_xpu_float64",
        "test_memory_format_nn_LSTM_eval_mode_xpu_float64",
        "test_memory_format_nn_LSTM_train_mode_xpu_float64",
        "test_memory_format_nn_RNNCell_xpu_float64",
        "test_memory_format_nn_RNN_eval_mode_xpu_float64",
        "test_memory_format_nn_RNN_train_mode_xpu_float64",
        "test_multiple_device_transfer_nn_Bilinear_xpu_float64",
        "test_multiple_device_transfer_nn_GRUCell_xpu_float64",
        "test_multiple_device_transfer_nn_GRU_eval_mode_xpu_float64",
        "test_multiple_device_transfer_nn_GRU_train_mode_xpu_float64",
        "test_multiple_device_transfer_nn_LSTMCell_xpu_float64",
        "test_multiple_device_transfer_nn_LSTM_eval_mode_xpu_float64",
        "test_multiple_device_transfer_nn_LSTM_train_mode_xpu_float64",
        "test_multiple_device_transfer_nn_Linear_xpu_float64",
        "test_multiple_device_transfer_nn_MultiheadAttention_eval_mode_xpu_float64",
        "test_multiple_device_transfer_nn_MultiheadAttention_train_mode_xpu_float64",
        "test_multiple_device_transfer_nn_RNNCell_xpu_float64",
        "test_multiple_device_transfer_nn_RNN_eval_mode_xpu_float64",
        "test_multiple_device_transfer_nn_RNN_train_mode_xpu_float64",
        "test_multiple_device_transfer_nn_TransformerDecoderLayer_xpu_float64",
        "test_multiple_device_transfer_nn_TransformerEncoderLayer_eval_mode_xpu_float64",
        "test_multiple_device_transfer_nn_TransformerEncoderLayer_train_mode_xpu_float64",
        "test_multiple_device_transfer_nn_TransformerEncoder_eval_mode_xpu_float64",
        "test_multiple_device_transfer_nn_TransformerEncoder_train_mode_xpu_float64",
        "test_multiple_device_transfer_nn_Transformer_xpu_float64",
        "test_non_contiguous_tensors_nn_Bilinear_xpu_float64",
        "test_non_contiguous_tensors_nn_GRUCell_xpu_float64",
        "test_non_contiguous_tensors_nn_GRU_eval_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_GRU_train_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_LSTMCell_xpu_float64",
        "test_non_contiguous_tensors_nn_LSTM_eval_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_LSTM_train_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_Linear_xpu_float64",
        "test_non_contiguous_tensors_nn_MultiheadAttention_eval_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_MultiheadAttention_train_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_RNNCell_xpu_float64",
        "test_non_contiguous_tensors_nn_RNN_eval_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_RNN_train_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_TransformerDecoderLayer_xpu_float64",
        "test_non_contiguous_tensors_nn_TransformerEncoderLayer_eval_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_TransformerEncoderLayer_train_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_TransformerEncoder_eval_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_TransformerEncoder_train_mode_xpu_float64",
        "test_non_contiguous_tensors_nn_Transformer_xpu_float64",
        "test_save_load_nn_Bilinear_xpu_float64",
        "test_save_load_nn_GRUCell_xpu_float64",
        "test_save_load_nn_GRU_eval_mode_xpu_float64",
        "test_save_load_nn_GRU_train_mode_xpu_float64",
        "test_save_load_nn_LSTMCell_xpu_float64",
        "test_save_load_nn_LSTM_eval_mode_xpu_float64",
        "test_save_load_nn_LSTM_train_mode_xpu_float64",
        "test_save_load_nn_Linear_xpu_float64",
        "test_save_load_nn_MultiheadAttention_eval_mode_xpu_float64",
        "test_save_load_nn_MultiheadAttention_train_mode_xpu_float64",
        "test_save_load_nn_RNNCell_xpu_float64",
        "test_save_load_nn_RNN_eval_mode_xpu_float64",
        "test_save_load_nn_RNN_train_mode_xpu_float64",
        "test_save_load_nn_TransformerDecoderLayer_xpu_float64",
        "test_save_load_nn_TransformerEncoderLayer_eval_mode_xpu_float64",
        "test_save_load_nn_TransformerEncoderLayer_train_mode_xpu_float64",
        "test_save_load_nn_TransformerEncoder_eval_mode_xpu_float64",
        "test_save_load_nn_TransformerEncoder_train_mode_xpu_float64",
        "test_save_load_nn_Transformer_xpu_float64",
        # Unexpected success:
        "test_cpu_gpu_parity_nn_ConvTranspose1d_xpu_complex32",
        "test_cpu_gpu_parity_nn_ConvTranspose2d_xpu_complex32",
        # CPU fallback fails
        # RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
        # AssertionError: False is not true
        "test_to_nn_BatchNorm1d_eval_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_BatchNorm1d_train_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_BatchNorm2d_eval_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_BatchNorm2d_train_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_BatchNorm3d_eval_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_BatchNorm3d_train_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_Bilinear_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_Conv1d_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_Conv2d_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_Conv3d_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_ConvTranspose1d_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_ConvTranspose2d_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_ConvTranspose3d_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_Embedding_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_GRUCell_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_GRU_eval_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_GRU_train_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_GroupNorm_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_LSTMCell_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_LSTM_eval_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_LSTM_train_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_LayerNorm_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_Linear_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_MultiheadAttention_eval_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_MultiheadAttention_train_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_PReLU_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_RMSNorm_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_RNNCell_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_RNN_eval_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_RNN_train_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_TransformerDecoderLayer_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_TransformerEncoderLayer_eval_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_TransformerEncoderLayer_train_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_TransformerEncoder_eval_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_TransformerEncoder_train_mode_swap_True_set_grad_True_xpu_float32",
        "test_to_nn_Transformer_swap_True_set_grad_True_xpu_float32",
    ),
    "test_nn_xpu.py": (
        # AttributeError: module 'torch.xpu' has no attribute 'FloatTensor'
        "test_type",
        # rnn fallback to cpu
        "test_cudnn_weight_format",
        # oneDNN issues
        # AssertionError: MultiheadAttention does not support NestedTensor outside of its fast path. The fast path was not hit because some Tensor argument's device is neither one of cpu, cuda or privateuseone
        "test_TransformerEncoderLayer_empty_xpu",
        "test_transformerencoderlayer_xpu_float16",
        "test_transformerencoderlayer_xpu_float32",
        # oneDNN issues
        # RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_TransformerDecoderLayer_empty_xpu",
        "test_TransformerDecoder_empty_xpu",
        "test_TransformerEncoder_empty_xpu",
        "test_Transformer_empty_xpu",
        "test_affine_grid",
        "test_affine_grid_3d",
        "test_RNN_cpu_vs_cudnn_no_dropout",
        "test_RNN_cpu_vs_cudnn_with_dropout",
        "test_GRU_grad_and_gradgrad_xpu_float64",
        "test_LSTM_grad_and_gradgrad_xpu_float64",
        "test_lstmcell_backward_only_one_output_grad_xpu_float64",
        "test_module_to_empty_xpu_float64",
        "test_RNN_change_dropout",
        "test_RNN_dropout",
        "test_rnn_fused_xpu_float64",
        "test_rnn_retain_variables_xpu_float64",
        "test_transformerencoderlayer_xpu_float64",
        "test_variable_sequence_xpu_float64",
        # Unexpected success: CUDA only test case, launch grid_y == 2**16 (larger than CUDA maximum y-dimension limit 65535) and expect fail.
        # SYCL don't have this limitation and hence can pass.
        "test_upsamplingNearest2d_launch_fail_xpu",
        # Could not run 'aten::_thnn_fused_lstm_cell' with arguments from the 'CPU' backend.
        "test_RNN_cudnn_weight_norm",
        "test_partial_flat_weights",
        "test_variable_sequence_xpu_float16",
        "test_variable_sequence_xpu_float32",
        # CPU fallback could not cover
        # NotImplementedError: Could not run 'aten::_thnn_fused_gru_cell' with arguments from the 'CPU' backend. This could be because the operator doesn't exist for this backend, or was omitted during the selective/custom build pro...
        "test_cudnn_weight_tying",
        "test_RNN_input_size_zero",
        "test_rnn_fused_xpu_float32",
        "test_rnn_retain_variables_xpu_float16",
        "test_rnn_retain_variables_xpu_float32",
        # AssertionError: False is not true
        "test_ctc_loss_cudnn_xpu",  # want "xpu" in function name
        "test_ctc_loss_cudnn_tensor",  # want "xpu" in function name
    ),
    "test_indexing_xpu.py": (
        # XPU implementation doesn't claimn FP8 now
        # https://github.com/intel/torch-xpu-ops/issues/461
        "test_index_put_src_datatype_xpu_float8_e5m2",
        "test_index_put_src_datatype_xpu_float8_e4m3fn",
    ),
    "nn/test_pooling_xpu.py": None,
    "nn/test_dropout_xpu.py": None,
    "test_dataloader_xpu.py": (
        # Skip for XPU didn't support
        # https://github.com/intel/torch-xpu-ops/issues/613
        "test_nested_tensor_multiprocessing_context_forkserver_xpu",
        "test_nested_tensor_multiprocessing_context_spawn_xpu",
        # pinned memory issue
        # https://github.com/intel/torch-xpu-ops/issues/296
        "test_custom_batch_pin",
        "test_sequential_pin_memory",
        "test_shuffle_pin_memory",
        "test_pin_memory",
        # failed in preci
        # https://github.com/intel/torch-xpu-ops/issues/928
        "test_segfault",
    ),
    "test_tensor_creation_ops_xpu.py": (
        # CPU only (vs Numpy). CUDA skips these cases since non-deterministic results are outputed for inf and nan.
        "test_float_to_int_conversion_finite_xpu_int8",
        "test_float_to_int_conversion_finite_xpu_int16",
        # Dispatch issue. It is a composite operator. But it is implemented by
        # DispatchStub. XPU doesn't support DispatchStub.
        "test_kaiser_window_xpu",
    ),
    "test_autocast_xpu.py": None,
    "test_autograd_xpu.py": (
        # https://github.com/intel/torch-xpu-ops/issues/618
        # c10::NotImplementedError
        "test_autograd_composite_implicit_and_dispatch_registration_xpu",
        "test_autograd_multiple_dispatch_registrations_xpu",
        # AttributeError: module 'torch.xpu' has no attribute
        "test_profiler_emit_nvtx_xpu",
        # Double and complex datatype matmul is not supported in oneDNN
        "test_mv_grad_stride_0_xpu",
        # module 'torch._C' has no attribute '_scatter'
        "test_checkpointing_without_reentrant_dataparallel",
        "test_dataparallel_saved_tensors_hooks",
        # Runtime error after enabling PTI
        # RuntimeError: Fail to enable Kineto Profiler on XPU due to error code: 200
        # https://github.com/intel/torch-xpu-ops/issues/731
        "test_profiler",
        "test_record_function",
        # Sometimes, will raise AssertionError: "Simulate error" does not match "grad can be implicitly created only for scalar outputs"
        # https://github.com/intel/torch-xpu-ops/issues/1071
        "test_reentrant_parent_error_on_cpu_xpu",
    ),
    "test_reductions_xpu.py": (
        # Accumulate error due to different accumulation order.
        "test_logcumsumexp_complex_xpu_complex64",
    ),
    "test_unary_ufuncs_xpu.py": (
        # AssertionError: Jiterator is only supported on CUDA and ROCm GPUs, none are available.
        "_jiterator_",
        # For extreme value processing, Numpy and XPU results are inconsistent
        # std operations get different behavior on std::complex operarands for extremal cases
        "test_reference_numerics_extremal__refs_log_xpu_complex64",
        "test_reference_numerics_extremal_log_xpu_complex64",
        "test_reference_numerics_extremal__refs_tanh_xpu_complex128",
        "test_reference_numerics_extremal__refs_tanh_xpu_complex64",
        "test_reference_numerics_extremal_tanh_xpu_complex128",
        "test_reference_numerics_extremal_tanh_xpu_complex64",
        "test_reference_numerics_extremal__refs_acos_xpu_complex64",
        "test_reference_numerics_extremal__refs_acosh_xpu_complex64",
        "test_reference_numerics_extremal_acos_xpu_complex64",
        "test_reference_numerics_extremal_acosh_xpu_complex64",
        "test_reference_numerics_extremal__refs_asinh_xpu_complex64",
        "test_reference_numerics_extremal_asinh_xpu_complex64",
        "test_reference_numerics_extremal__refs_asin_xpu_complex64",
        "test_reference_numerics_extremal_asin_xpu_complex64",
        "test_reference_numerics_large__refs_acosh_xpu_complex64",
        "test_reference_numerics_large_acosh_xpu_complex64",
        "test_reference_numerics_extremal__refs_log10_xpu_complex64",
        "test_reference_numerics_extremal__refs_log1p_xpu_complex64",
        "test_reference_numerics_extremal_log10_xpu_complex64",
        "test_reference_numerics_extremal_log1p_xpu_complex64",
        "test_reference_numerics_extremal__refs_tan_xpu_complex128",
        "test_reference_numerics_extremal__refs_tan_xpu_complex64",
        "test_reference_numerics_extremal_tan_xpu_complex128",
        "test_reference_numerics_extremal_tan_xpu_complex64",
        "test_reference_numerics_large__refs_tan_xpu_complex32",
        "test_reference_numerics_large_tan_xpu_complex32",
        "test_reference_numerics_large__refs_asinh_xpu_complex128",
        "test_reference_numerics_large__refs_asinh_xpu_complex64",
        "test_reference_numerics_large__refs_asinh_xpu_complex32",
        "test_reference_numerics_large_asinh_xpu_complex128",
        "test_reference_numerics_large_asinh_xpu_complex64",
        "test_reference_numerics_large_asinh_xpu_complex32",
        # AssertionError: Tensor-likes are not close!
        # exceeded maximum allowed difference
        # Greatest absolute difference: 6.266784475883469e-05 at index (463, 204) (up to 1e-05 allowed)
        # Greatest relative difference: 1.9145216356264427e-05 at index (463, 204) (up to 1.3e-06 allowed)
        "test_reference_numerics_normal__refs_asinh_xpu_complex64",
        "test_reference_numerics_normal_asinh_xpu_complex64",
        "test_batch_vs_slicing__refs_sigmoid_xpu_complex128",
        # Unexpected success: CUDA uses thrust::sqrt and has accuracy issue. XPU use std::sqrt and has no issue.
        "test_reference_numerics_large_rsqrt_xpu_complex32",
        # Numeric difference
        # https://github.com/intel/torch-xpu-ops/issues/544
        # Expected 0.00497517 but got 0.00497520063072443.
        # Absolute difference: 3.063072442997111e-08 (up to 0.0 allowed)
        # Relative difference: 6.156719153309558e-06 (up to 1e-06 allowed)
        "test_log1p_complex_xpu_complex64",
        # Issue: https://github.com/intel/torch-xpu-ops/issues/622
        # Mismatched elements: 8 / 943593 (0.0%)
        # Greatest absolute difference: inf at index (9, 860) (up to 0.001 allowed)
        # Greatest relative difference: inf at index (9, 860) (up to 0.0012 allowed)
        "test_reference_numerics_normal_polygamma_polygamma_n_1_xpu_float16",
        "test_reference_numerics_normal_polygamma_polygamma_n_2_xpu_float16",
        "test_reference_numerics_normal_polygamma_polygamma_n_3_xpu_float16",
        "test_reference_numerics_normal_polygamma_polygamma_n_4_xpu_float16",
        # CUDA XFAIL
        "test_reference_numerics_large__refs_rsqrt_xpu_complex32",
        # 2025 bundle std::pow complex result is different on host and device
        "test_exp_xpu_complex64",
        "test_reference_numerics_extremal__refs_exp2_xpu_complex64",
        "test_reference_numerics_extremal__refs_exp_xpu_complex64",
        "test_reference_numerics_extremal_exp2_xpu_complex64",
        "test_reference_numerics_extremal_exp_xpu_complex64",
        "test_reference_numerics_large__refs_exp_xpu_complex32",
        "test_reference_numerics_large_exp_xpu_complex32",
    ),
    "test_masked_xpu.py": (
        # Summary: Sparse CSR for XPU is not supported
        # NotImplementedError: Could not run 'aten::_to_sparse_csr' with arguments from the 'SparseXPU' backend.
        # https://github.com/intel/torch-xpu-ops/issues/357
        "test_mask_layout_sparse_coo_masked_amax_xpu_bfloat16",
        "test_mask_layout_sparse_coo_masked_amax_xpu_float16",
        "test_mask_layout_sparse_coo_masked_amax_xpu_float32",
        "test_mask_layout_sparse_coo_masked_amax_xpu_float64",
        "test_mask_layout_sparse_coo_masked_amin_xpu_bfloat16",
        "test_mask_layout_sparse_coo_masked_amin_xpu_float16",
        "test_mask_layout_sparse_coo_masked_amin_xpu_float32",
        "test_mask_layout_sparse_coo_masked_amin_xpu_float64",
        "test_mask_layout_sparse_coo_masked_prod_xpu_bfloat16",
        "test_mask_layout_sparse_coo_masked_prod_xpu_bool",
        "test_mask_layout_sparse_coo_masked_prod_xpu_complex128",
        "test_mask_layout_sparse_coo_masked_prod_xpu_complex64",
        "test_mask_layout_sparse_coo_masked_prod_xpu_float16",
        "test_mask_layout_sparse_coo_masked_prod_xpu_float32",
        "test_mask_layout_sparse_coo_masked_prod_xpu_float64",
        "test_mask_layout_sparse_coo_masked_prod_xpu_int16",
        "test_mask_layout_sparse_coo_masked_prod_xpu_int32",
        "test_mask_layout_sparse_coo_masked_prod_xpu_int64",
        "test_mask_layout_sparse_coo_masked_prod_xpu_int8",
        "test_mask_layout_sparse_coo_masked_prod_xpu_uint8",
        # NotImplementedError: Could not run 'aten::_to_sparse_csr' with arguments from the 'SparseXPU' backend.
        "test_mask_layout_sparse_coo_masked_sum_xpu_bfloat16",
        "test_mask_layout_sparse_coo_masked_sum_xpu_bool",
        "test_mask_layout_sparse_coo_masked_sum_xpu_complex128",
        "test_mask_layout_sparse_coo_masked_sum_xpu_complex64",
        "test_mask_layout_sparse_coo_masked_sum_xpu_float16",
        "test_mask_layout_sparse_coo_masked_sum_xpu_float32",
        "test_mask_layout_sparse_coo_masked_sum_xpu_float64",
        "test_mask_layout_sparse_coo_masked_sum_xpu_int16",
        "test_mask_layout_sparse_coo_masked_sum_xpu_int32",
        "test_mask_layout_sparse_coo_masked_sum_xpu_int64",
        "test_mask_layout_sparse_coo_masked_sum_xpu_int8",
        "test_mask_layout_sparse_coo_masked_sum_xpu_uint8",
        # CPU and CUDA bias code in SparseCsrTensor.cpp.
        # RuntimeError: device type of values (xpu) must be CPU or CUDA or Meta :
        "test_mask_layout_sparse_csr_masked_amax_xpu_bfloat16",
        "test_mask_layout_sparse_csr_masked_amax_xpu_float16",
        "test_mask_layout_sparse_csr_masked_amax_xpu_float32",
        "test_mask_layout_sparse_csr_masked_amax_xpu_float64",
        "test_mask_layout_sparse_csr_masked_amin_xpu_bfloat16",
        "test_mask_layout_sparse_csr_masked_amin_xpu_float16",
        "test_mask_layout_sparse_csr_masked_amin_xpu_float32",
        "test_mask_layout_sparse_csr_masked_amin_xpu_float64",
        "test_mask_layout_sparse_csr_masked_mean_xpu_bfloat16",
        "test_mask_layout_sparse_csr_masked_mean_xpu_float16",
        "test_mask_layout_sparse_csr_masked_mean_xpu_float32",
        "test_mask_layout_sparse_csr_masked_mean_xpu_float64",
        "test_mask_layout_sparse_csr_masked_prod_xpu_bfloat16",
        "test_mask_layout_sparse_csr_masked_prod_xpu_bool",
        "test_mask_layout_sparse_csr_masked_prod_xpu_complex128",
        "test_mask_layout_sparse_csr_masked_prod_xpu_complex64",
        "test_mask_layout_sparse_csr_masked_prod_xpu_float16",
        "test_mask_layout_sparse_csr_masked_prod_xpu_float32",
        "test_mask_layout_sparse_csr_masked_prod_xpu_float64",
        "test_mask_layout_sparse_csr_masked_prod_xpu_int16",
        "test_mask_layout_sparse_csr_masked_prod_xpu_int32",
        "test_mask_layout_sparse_csr_masked_prod_xpu_int64",
        "test_mask_layout_sparse_csr_masked_prod_xpu_int8",
        "test_mask_layout_sparse_csr_masked_prod_xpu_uint8",
        "test_mask_layout_sparse_csr_masked_sum_xpu_bfloat16",
        "test_mask_layout_sparse_csr_masked_sum_xpu_bool",
        "test_mask_layout_sparse_csr_masked_sum_xpu_complex128",
        "test_mask_layout_sparse_csr_masked_sum_xpu_complex64",
        "test_mask_layout_sparse_csr_masked_sum_xpu_float16",
        "test_mask_layout_sparse_csr_masked_sum_xpu_float32",
        "test_mask_layout_sparse_csr_masked_sum_xpu_float64",
        "test_mask_layout_sparse_csr_masked_sum_xpu_int16",
        "test_mask_layout_sparse_csr_masked_sum_xpu_int32",
        "test_mask_layout_sparse_csr_masked_sum_xpu_int64",
        "test_mask_layout_sparse_csr_masked_sum_xpu_int8",
        "test_mask_layout_sparse_csr_masked_sum_xpu_uint8",
        "test_mask_layout_strided_masked_mean_xpu_bfloat16",
        "test_mask_layout_strided_masked_mean_xpu_float16",
        "test_mask_layout_strided_masked_mean_xpu_float32",
        "test_mask_layout_strided_masked_mean_xpu_float64",
        # NotImplementedError: Could not run 'aten::_to_sparse_csr' with arguments from the 'SparseXPU' backend.
        "test_mask_layout_strided_masked_amax_xpu_bfloat16",
        "test_mask_layout_strided_masked_amax_xpu_float16",
        "test_mask_layout_strided_masked_amax_xpu_float32",
        "test_mask_layout_strided_masked_amax_xpu_float64",
        "test_mask_layout_strided_masked_amin_xpu_bfloat16",
        "test_mask_layout_strided_masked_amin_xpu_float16",
        "test_mask_layout_strided_masked_amin_xpu_float32",
        "test_mask_layout_strided_masked_amin_xpu_float64",
        "test_mask_layout_strided_masked_prod_xpu_bfloat16",
        "test_mask_layout_strided_masked_prod_xpu_bool",
        "test_mask_layout_strided_masked_prod_xpu_complex128",
        "test_mask_layout_strided_masked_prod_xpu_complex64",
        "test_mask_layout_strided_masked_prod_xpu_float16",
        "test_mask_layout_strided_masked_prod_xpu_float32",
        "test_mask_layout_strided_masked_prod_xpu_float64",
        "test_mask_layout_strided_masked_prod_xpu_int16",
        "test_mask_layout_strided_masked_prod_xpu_int32",
        "test_mask_layout_strided_masked_prod_xpu_int64",
        "test_mask_layout_strided_masked_prod_xpu_int8",
        "test_mask_layout_strided_masked_prod_xpu_uint8",
        "test_mask_layout_strided_masked_sum_xpu_bfloat16",
        "test_mask_layout_strided_masked_sum_xpu_bool",
        "test_mask_layout_strided_masked_sum_xpu_complex128",
        "test_mask_layout_strided_masked_sum_xpu_complex64",
        "test_mask_layout_strided_masked_sum_xpu_float16",
        "test_mask_layout_strided_masked_sum_xpu_float32",
        "test_mask_layout_strided_masked_sum_xpu_float64",
        "test_mask_layout_strided_masked_sum_xpu_int16",
        "test_mask_layout_strided_masked_sum_xpu_int32",
        "test_mask_layout_strided_masked_sum_xpu_int64",
        "test_mask_layout_strided_masked_sum_xpu_int8",
        "test_mask_layout_strided_masked_sum_xpu_uint8",
    ),
    "test_view_ops_xpu.py": (
        # Need quantization support, NotImplementedError: Could not run 'aten::_empty_affine_quantized' with arguments from the 'QuantizedXPU' backend.
        "test_flatten_xpu",
        "test_ravel_xpu",
    ),
    "test_shape_ops_xpu.py": (
        # Need quantization support.
        # https://github.com/intel/torch-xpu-ops/issues/275
        # NotImplementedError: Could not run 'aten::empty_quantized' with arguments from the 'QuantizedXPU' backend.
        "test_flip_xpu_float32",
    ),
    "test_content_store_xpu.py": None,
    "test_native_functions_xpu.py": None,
    "nn/test_init_xpu.py": None,
    "test_namedtensor_xpu.py": None,
    "nn/test_lazy_modules_xpu.py": None,
    "test_linalg_xpu.py": (
        # Summary:
        # All linear algebra related ops are not supported for XPU.
        # _convert_weight_to_int4pack not support
        "_int4_mm_m_",
        # RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_1_sized_with_0_strided_xpu_float64",
        "test_addbmm_xpu_complex128",
        "test_addbmm_xpu_complex64",
        "test_addbmm_xpu_float64",
        "test_addmm_gelu_xpu_float64",
        "test_addmm_relu_xpu_float64",
        "test_addmm_sizes_xpu_float64",
        "test_addmm_xpu_complex128",
        "test_addmm_xpu_complex64",
        "test_addmm_xpu_float64",
        "test_addmv_rowmajor_colmajor_incx_incy_lda_xpu_float64",
        "test_addmv_xpu_complex128",
        "test_addmv_xpu_complex64",
        "test_addmv_xpu_float64",
        "test_baddbmm_xpu_complex128",
        "test_baddbmm_xpu_complex64",
        "test_baddbmm_xpu_float64",
        "test_bmm_xpu_complex128",
        "test_bmm_xpu_complex64",
        "test_bmm_xpu_float64",
        "test_blas_alpha_beta_empty_xpu_float64",
        "test_cholesky_errors_and_warnings_xpu_complex128",
        "test_cholesky_errors_and_warnings_xpu_complex64",
        "test_cholesky_errors_and_warnings_xpu_float64",
        "test_cholesky_ex_xpu_complex128",
        "test_cholesky_ex_xpu_complex64",
        "test_cholesky_ex_xpu_float64",
        "test_cholesky_inverse_xpu_complex128",
        "test_cholesky_inverse_xpu_complex64",
        "test_cholesky_inverse_xpu_float64",
        "test_cholesky_solve_backward_xpu_float64",
        "test_cholesky_solve_batched_many_batches_xpu_complex128",
        "test_cholesky_solve_batched_many_batches_xpu_complex64",
        "test_cholesky_solve_batched_many_batches_xpu_float64",
        "test_cholesky_solve_batched_xpu_complex128",
        "test_cholesky_solve_batched_xpu_complex64",
        "test_cholesky_solve_batched_xpu_float64",
        "test_cholesky_solve_xpu_complex128",
        "test_cholesky_solve_xpu_complex64",
        "test_cholesky_solve_xpu_float64",
        "test_cholesky_xpu_complex128",
        "test_cholesky_xpu_complex64",
        "test_cholesky_xpu_float64",
        "test_corner_cases_of_cublasltmatmul_xpu_complex128",
        "test_corner_cases_of_cublasltmatmul_xpu_complex64",
        "test_corner_cases_of_cublasltmatmul_xpu_float64",
        "test_det_logdet_slogdet_batched_xpu_float64",
        "test_det_logdet_slogdet_xpu_float64",
        "test_eig_check_magma_xpu_float32",
        "test_einsum_random_xpu_complex128",
        "test_einsum_random_xpu_float64",
        "test_einsum_sublist_format_xpu_complex128",
        "test_einsum_sublist_format_xpu_float64",
        "test_einsum_xpu_complex128",
        "test_einsum_xpu_float64",
        "test_inner_xpu_complex64",
        "test_invariance_error_spectral_decompositions_xpu_complex128",
        "test_inverse_many_batches_xpu_complex128",
        "test_inverse_many_batches_xpu_complex64",
        "test_inverse_many_batches_xpu_float64",
        "test_inverse_xpu_complex128",
        "test_inverse_xpu_complex64",
        "test_inverse_xpu_float64",
        "test_ldl_factor_xpu_complex128",
        "test_ldl_factor_xpu_complex64",
        "test_ldl_factor_xpu_float64",
        "test_ldl_solve_xpu_complex128",
        "test_ldl_solve_xpu_complex64",
        "test_ldl_solve_xpu_float64",
        "test_linalg_lstsq_batch_broadcasting_xpu_complex128",
        "test_linalg_lstsq_batch_broadcasting_xpu_complex64",
        "test_linalg_lstsq_batch_broadcasting_xpu_float64",
        "test_linalg_lstsq_xpu_complex128",
        "test_linalg_lstsq_xpu_complex64",
        "test_linalg_lstsq_xpu_float64",
        "test_linalg_lu_family_xpu_complex128",
        "test_linalg_lu_family_xpu_complex64",
        "test_linalg_lu_family_xpu_float64",
        "test_linalg_lu_solve_xpu_complex128",
        "test_linalg_lu_solve_xpu_complex64",
        "test_linalg_lu_solve_xpu_float64",
        "test_linalg_solve_triangular_broadcasting_xpu_complex128",
        "test_linalg_solve_triangular_broadcasting_xpu_complex64",
        "test_linalg_solve_triangular_broadcasting_xpu_float64",
        "test_linalg_solve_triangular_large_xpu_complex128",
        "test_linalg_solve_triangular_large_xpu_complex64",
        "test_linalg_solve_triangular_large_xpu_float64",
        "test_linalg_solve_triangular_xpu_complex128",
        "test_linalg_solve_triangular_xpu_complex64",
        "test_linalg_solve_triangular_xpu_float64",
        "test_lobpcg_basic_xpu_float64",
        "test_lobpcg_ortho_xpu_float64",
        "test_lu_solve_batched_broadcasting_xpu_complex128",
        "test_lu_solve_batched_broadcasting_xpu_complex64",
        "test_lu_solve_batched_broadcasting_xpu_float64",
        "test_lu_solve_batched_many_batches_xpu_complex128",
        "test_lu_solve_batched_many_batches_xpu_complex64",
        "test_lu_solve_batched_many_batches_xpu_float64",
        "test_lu_solve_batched_xpu_complex128",
        "test_lu_solve_batched_xpu_complex64",
        "test_lu_solve_batched_xpu_float64",
        "test_lu_solve_large_matrices_xpu_complex128",
        "test_lu_solve_large_matrices_xpu_complex64",
        "test_lu_solve_large_matrices_xpu_float64",
        "test_lu_solve_xpu_complex128",
        "test_lu_solve_xpu_complex64",
        "test_lu_solve_xpu_float64",
        "test_matmul_out_kernel_errors_with_autograd_xpu_complex64",
        "test_matmul_small_brute_force_1d_Nd_xpu_complex64",
        "test_matmul_small_brute_force_2d_Nd_xpu_complex64",
        "test_matmul_small_brute_force_3d_Nd_xpu_complex64",
        "test_matrix_power_negative_xpu_complex128",
        "test_matrix_power_negative_xpu_float64",
        "test_matrix_power_non_negative_xpu_complex128",
        "test_matrix_power_non_negative_xpu_float64",
        "test_matrix_rank_atol_rtol_xpu_float64",
        "test_matrix_rank_xpu_complex128",
        "test_matrix_rank_xpu_complex64",
        "test_matrix_rank_xpu_float64",
        "test_mm_bmm_non_memory_dense_xpu",
        "test_mm_conjtranspose_xpu",
        "test_mm_xpu_complex128",
        "test_mm_xpu_complex64",
        "test_mm_xpu_float64",
        "test_multi_dot_xpu_complex128",
        "test_multi_dot_xpu_float64",
        "test_old_cholesky_batched_many_batches_xpu_float64",
        "test_old_cholesky_batched_upper_xpu_complex128",
        "test_old_cholesky_batched_upper_xpu_complex64",
        "test_old_cholesky_batched_upper_xpu_float64",
        "test_old_cholesky_batched_xpu_complex128",
        "test_old_cholesky_batched_xpu_complex64",
        "test_old_cholesky_batched_xpu_float64",
        "test_old_cholesky_xpu_complex128",
        "test_old_cholesky_xpu_complex64",
        "test_old_cholesky_xpu_float64",
        "test_ormqr_xpu_complex128",
        "test_ormqr_xpu_complex64",
        "test_ormqr_xpu_float64",
        "test_pca_lowrank_xpu",
        "test_pinv_errors_and_warnings_xpu_complex128",
        "test_pinv_errors_and_warnings_xpu_complex64",
        "test_pinv_errors_and_warnings_xpu_float64",
        "test_pinv_xpu_complex128",
        "test_pinv_xpu_complex64",
        "test_pinv_xpu_float64",
        "test_pinverse_xpu_complex128",
        "test_pinverse_xpu_complex64",
        "test_pinverse_xpu_float64",
        "test_slogdet_xpu_complex128",
        "test_slogdet_xpu_complex64",
        "test_slogdet_xpu_float64",
        "test_solve_batched_broadcasting_xpu_complex128",
        "test_solve_batched_broadcasting_xpu_complex64",
        "test_solve_batched_broadcasting_xpu_float64",
        "test_solve_xpu_complex128",
        "test_solve_xpu_complex64",
        "test_solve_xpu_float64",
        "test_strided_mm_bmm_xpu_float64",
        "test_svd_lowrank_xpu_complex128",
        "test_svd_lowrank_xpu_float64",
        "test_svd_xpu_complex128",
        "test_svd_xpu_complex64",
        "test_svd_xpu_float64",
        "test_triangular_solve_batched_broadcasting_xpu_complex128",
        "test_triangular_solve_batched_broadcasting_xpu_complex64",
        "test_triangular_solve_batched_broadcasting_xpu_float64",
        "test_triangular_solve_batched_many_batches_xpu_complex128",
        "test_triangular_solve_batched_many_batches_xpu_complex64",
        "test_triangular_solve_batched_many_batches_xpu_float64",
        "test_triangular_solve_batched_xpu_complex128",
        "test_triangular_solve_batched_xpu_complex64",
        "test_triangular_solve_batched_xpu_float64",
        "test_triangular_solve_xpu_complex128",
        "test_triangular_solve_xpu_complex64",
        "test_triangular_solve_xpu_float64",
        # https://github.com/intel/torch-xpu-ops/issues/821
        # addmm.out, addmv.out, linalg_lstsq, vdot&dot, _int_mm lack XPU support and fallback to CPU
        "test_addmm_sizes_xpu_complex128",
        "test_addmm_sizes_xpu_complex64",
        "test_blas_alpha_beta_empty_xpu_complex128",
        "test_blas_alpha_beta_empty_xpu_complex64",
        "test_linalg_lstsq_input_checks_xpu_complex128",
        "test_linalg_lstsq_input_checks_xpu_complex64",
        "test_linalg_lstsq_input_checks_xpu_float32",
        "test_linalg_lstsq_input_checks_xpu_float64",
        "test_dot_invalid_args_xpu",
        "test_vdot_invalid_args_xpu",
        "test__int_mm_errors_xpu",
        # https://github.com/intel/torch-xpu-ops/issues/821
        # RuntimeError: Fail to enable Kineto Profiler on XPU due to error code: 200
        "test_norm_fused_type_promotion_xpu_bfloat16",
        # AssertionError: True is not false
        "test_norm_fused_type_promotion_xpu_float16",
        # https://github.com/intel/torch-xpu-ops/issues/814
        # xpu does not have '_cuda_tunableop_is_enabled' API
        "test_matmul_small_brute_force_tunableop_xpu_float16",
        "test_matmul_small_brute_force_tunableop_xpu_float32",
        "test_matmul_small_brute_force_tunableop_xpu_float64",
        "test_matmul_offline_tunableop_xpu_float16",
        # XPU does not support tunable.
        "test_bmm_tunableop_rocm_xpu_float32",
        "test_numeric_check_leak_tunableop_rocm_xpu_float32",
        "test_dump_results_on_exit_tunableop_xpu_float32",
        "test_rotating_buffer_tunableop_xpu_float32",
        # CUDA bias cases added in latest PyTorch
        # AttributeError: module 'torch._C' has no attribute '_cuda_tunableop_enable'
        "test_matmul_check_entries_tunableop_xpu_float16",
        "test_minimum_tuning_iteration_tunableop_xpu_float16",
        "test_validator_tunableop_rocm_xpu_float32",
        "test_addmm_relu_tunableop_rocm_xpu_float32",
        "test_addmm_relu_tunableop_rocm_xpu_float64",
        "_tuning_tunableop_",
        # TODO: align input data type for convert_weight_to_int4pack with CUDA
        # XPU expects weight to be kInt, while CUDA expects kByte
        "test__int4_mm_m_32_k_32_n_48_xpu",
        "test__int4_mm_m_32_k_32_n_64_xpu",
        "test__int4_mm_m_32_k_64_n_48_xpu",
        "test__int4_mm_m_32_k_64_n_64_xpu",
        "test__int4_mm_m_64_k_32_n_48_xpu",
        "test__int4_mm_m_64_k_32_n_64_xpu",
        "test__int4_mm_m_64_k_64_n_48_xpu",
        "test__int4_mm_m_64_k_64_n_64_xpu",
        "test_compile_int4_mm_m_32_k_32_n_48_xpu",
        "test_compile_int4_mm_m_32_k_32_n_64_xpu",
        "test_compile_int4_mm_m_32_k_64_n_48_xpu",
        "test_compile_int4_mm_m_32_k_64_n_64_xpu",
        "test_compile_int4_mm_m_64_k_32_n_48_xpu",
        "test_compile_int4_mm_m_64_k_32_n_64_xpu",
        "test_compile_int4_mm_m_64_k_64_n_48_xpu",
        "test_compile_int4_mm_m_64_k_64_n_64_xpu",
        "test__int4_mm_m_32_k_32_n_48_xpu",
        "test__int4_mm_m_32_k_32_n_64_xpu",
        "test__int4_mm_m_32_k_64_n_48_xpu",
        "test__int4_mm_m_32_k_64_n_64_xpu",
        "test__int4_mm_m_64_k_32_n_48_xpu",
        "test__int4_mm_m_64_k_32_n_64_xpu",
        "test__int4_mm_m_64_k_64_n_48_xpu",
        "test__int4_mm_m_64_k_64_n_64_xpu",
        "test_compile_int4_mm_m_32_k_32_n_48_xpu",
        "test_compile_int4_mm_m_32_k_32_n_64_xpu",
        "test_compile_int4_mm_m_32_k_64_n_48_xpu",
        "test_compile_int4_mm_m_32_k_64_n_64_xpu",
        "test_compile_int4_mm_m_64_k_32_n_48_xpu",
        "test_compile_int4_mm_m_64_k_32_n_64_xpu",
        "test_compile_int4_mm_m_64_k_64_n_48_xpu",
        "test_compile_int4_mm_m_64_k_64_n_64_xpu",
    ),
    "test_ops_fwd_gradients_xpu.py": (
        # All of the followings are oneDNN issues
        # RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_fn_fwgrad_bwgrad___rmatmul___xpu_complex128",
        "test_fn_fwgrad_bwgrad___rmatmul___xpu_float64",
        "test_fn_fwgrad_bwgrad_addbmm_xpu_float64",
        "test_fn_fwgrad_bwgrad_addmm_decomposed_xpu_complex128",
        "test_fn_fwgrad_bwgrad_addmm_decomposed_xpu_float64",
        "test_fn_fwgrad_bwgrad_addmm_xpu_complex128",
        "test_fn_fwgrad_bwgrad_addmm_xpu_float64",
        "test_fn_fwgrad_bwgrad_addmv_xpu_complex128",
        "test_fn_fwgrad_bwgrad_addmv_xpu_float64",
        "test_fn_fwgrad_bwgrad_addr_xpu_complex128",
        "test_fn_fwgrad_bwgrad_addr_xpu_float64",
        "test_fn_fwgrad_bwgrad_baddbmm_xpu_complex128",
        "test_fn_fwgrad_bwgrad_baddbmm_xpu_float64",
        "test_fn_fwgrad_bwgrad_bmm_xpu_complex128",
        "test_fn_fwgrad_bwgrad_bmm_xpu_float64",
        "test_fn_fwgrad_bwgrad_cholesky_inverse_xpu_complex128",
        "test_fn_fwgrad_bwgrad_cholesky_inverse_xpu_float64",
        "test_fn_fwgrad_bwgrad_cholesky_solve_xpu_complex128",
        "test_fn_fwgrad_bwgrad_cholesky_solve_xpu_float64",
        "test_fn_fwgrad_bwgrad_cholesky_xpu_complex128",
        "test_fn_fwgrad_bwgrad_cholesky_xpu_float64",
        "test_fn_fwgrad_bwgrad_corrcoef_xpu_complex128",
        "test_fn_fwgrad_bwgrad_corrcoef_xpu_float64",
        "test_fn_fwgrad_bwgrad_einsum_xpu_complex128",
        "test_fn_fwgrad_bwgrad_einsum_xpu_float64",
        "test_fn_fwgrad_bwgrad_inner_xpu_complex128",
        "test_fn_fwgrad_bwgrad_inner_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_cholesky_ex_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_cholesky_ex_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_cholesky_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_cholesky_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_cond_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_cond_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_det_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_det_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_eig_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_eig_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_eigh_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_eigh_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_eigvals_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_eigvals_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_eigvalsh_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_eigvalsh_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_householder_product_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_householder_product_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_inv_ex_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_inv_ex_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_inv_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_inv_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_lstsq_grad_oriented_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_lstsq_grad_oriented_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_lu_factor_ex_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_lu_factor_ex_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_lu_factor_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_lu_factor_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_lu_solve_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_lu_solve_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_lu_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_lu_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_matrix_norm_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_matrix_norm_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_matrix_power_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_matrix_power_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_multi_dot_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_multi_dot_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_norm_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_pinv_hermitian_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_pinv_hermitian_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_pinv_singular_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_pinv_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_pinv_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_qr_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_qr_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_slogdet_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_slogdet_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_solve_ex_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_solve_ex_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_solve_triangular_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_solve_triangular_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_solve_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_solve_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_svd_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_svd_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_svdvals_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_svdvals_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_tensorinv_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_tensorinv_xpu_float64",
        "test_fn_fwgrad_bwgrad_linalg_tensorsolve_xpu_complex128",
        "test_fn_fwgrad_bwgrad_linalg_tensorsolve_xpu_float64",
        "test_fn_fwgrad_bwgrad_logdet_xpu_complex128",
        "test_fn_fwgrad_bwgrad_logdet_xpu_float64",
        "test_fn_fwgrad_bwgrad_lu_solve_xpu_complex128",
        "test_fn_fwgrad_bwgrad_lu_solve_xpu_float64",
        "test_fn_fwgrad_bwgrad_lu_xpu_complex128",
        "test_fn_fwgrad_bwgrad_lu_xpu_float64",
        "test_fn_fwgrad_bwgrad_matmul_xpu_complex128",
        "test_fn_fwgrad_bwgrad_matmul_xpu_float64",
        "test_fn_fwgrad_bwgrad_mm_xpu_complex128",
        "test_fn_fwgrad_bwgrad_mm_xpu_float64",
        "test_fn_fwgrad_bwgrad_mv_xpu_complex128",
        "test_fn_fwgrad_bwgrad_mv_xpu_float64",
        "test_fn_fwgrad_bwgrad_nn_functional_bilinear_xpu_float64",
        "test_fn_fwgrad_bwgrad_nn_functional_linear_xpu_complex128",
        "test_fn_fwgrad_bwgrad_nn_functional_linear_xpu_float64",
        "test_fn_fwgrad_bwgrad_nn_functional_multi_head_attention_forward_xpu_float64",
        "test_fn_fwgrad_bwgrad_nn_functional_scaled_dot_product_attention_xpu_float64",
        "test_fn_fwgrad_bwgrad_norm_nuc_xpu_complex128",
        "test_fn_fwgrad_bwgrad_norm_nuc_xpu_float64",
        "test_fn_fwgrad_bwgrad_ormqr_xpu_complex128",
        "test_fn_fwgrad_bwgrad_ormqr_xpu_float64",
        "test_fn_fwgrad_bwgrad_pca_lowrank_xpu_float64",
        "test_fn_fwgrad_bwgrad_pinverse_xpu_complex128",
        "test_fn_fwgrad_bwgrad_pinverse_xpu_float64",
        "test_fn_fwgrad_bwgrad_qr_xpu_complex128",
        "test_fn_fwgrad_bwgrad_qr_xpu_float64",
        "test_fn_fwgrad_bwgrad_svd_lowrank_xpu_float64",
        "test_fn_fwgrad_bwgrad_svd_xpu_complex128",
        "test_fn_fwgrad_bwgrad_svd_xpu_float64",
        "test_fn_fwgrad_bwgrad_tensordot_xpu_complex128",
        "test_fn_fwgrad_bwgrad_tensordot_xpu_float64",
        "test_forward_mode_AD___rmatmul___xpu_complex128",
        "test_forward_mode_AD___rmatmul___xpu_float64",
        "test_forward_mode_AD_addbmm_xpu_float64",
        "test_forward_mode_AD_addmm_decomposed_xpu_complex128",
        "test_forward_mode_AD_addmm_decomposed_xpu_float64",
        "test_forward_mode_AD_addmm_xpu_complex128",
        "test_forward_mode_AD_addmm_xpu_float64",
        "test_forward_mode_AD_addmv_xpu_complex128",
        "test_forward_mode_AD_addmv_xpu_float64",
        "test_forward_mode_AD_baddbmm_xpu_complex128",
        "test_forward_mode_AD_baddbmm_xpu_float64",
        "test_forward_mode_AD_bmm_xpu_complex128",
        "test_forward_mode_AD_bmm_xpu_float64",
        "test_forward_mode_AD_cholesky_inverse_xpu_complex128",
        "test_forward_mode_AD_cholesky_inverse_xpu_float64",
        "test_forward_mode_AD_cholesky_solve_xpu_complex128",
        "test_forward_mode_AD_cholesky_solve_xpu_float64",
        "test_forward_mode_AD_cholesky_xpu_complex128",
        "test_forward_mode_AD_cholesky_xpu_float64",
        "test_forward_mode_AD_corrcoef_xpu_complex128",
        "test_forward_mode_AD_corrcoef_xpu_float64",
        "test_forward_mode_AD_dot_xpu_complex128",
        "test_forward_mode_AD_dot_xpu_float64",
        "test_forward_mode_AD_einsum_xpu_complex128",
        "test_forward_mode_AD_einsum_xpu_float64",
        "test_forward_mode_AD_inner_xpu_complex128",
        "test_forward_mode_AD_inner_xpu_float64",
        "test_forward_mode_AD_linalg_cholesky_ex_xpu_complex128",
        "test_forward_mode_AD_linalg_cholesky_ex_xpu_float64",
        "test_forward_mode_AD_linalg_cholesky_xpu_complex128",
        "test_forward_mode_AD_linalg_cholesky_xpu_float64",
        "test_forward_mode_AD_linalg_cond_xpu_complex128",
        "test_forward_mode_AD_linalg_cond_xpu_float64",
        "test_forward_mode_AD_linalg_det_singular_xpu_complex128",
        "test_forward_mode_AD_linalg_det_singular_xpu_float64",
        "test_forward_mode_AD_linalg_det_xpu_complex128",
        "test_forward_mode_AD_linalg_det_xpu_float64",
        "test_forward_mode_AD_linalg_eig_xpu_complex128",
        "test_forward_mode_AD_linalg_eig_xpu_float64",
        "test_forward_mode_AD_linalg_eigh_xpu_complex128",
        "test_forward_mode_AD_linalg_eigh_xpu_float64",
        "test_forward_mode_AD_linalg_eigvals_xpu_complex128",
        "test_forward_mode_AD_linalg_eigvals_xpu_float64",
        "test_forward_mode_AD_linalg_eigvalsh_xpu_complex128",
        "test_forward_mode_AD_linalg_eigvalsh_xpu_float64",
        "test_forward_mode_AD_linalg_householder_product_xpu_complex128",
        "test_forward_mode_AD_linalg_householder_product_xpu_float64",
        "test_forward_mode_AD_linalg_inv_ex_xpu_complex128",
        "test_forward_mode_AD_linalg_inv_ex_xpu_float64",
        "test_forward_mode_AD_linalg_inv_xpu_complex128",
        "test_forward_mode_AD_linalg_inv_xpu_float64",
        "test_forward_mode_AD_linalg_lstsq_grad_oriented_xpu_complex128",
        "test_forward_mode_AD_linalg_lstsq_grad_oriented_xpu_float64",
        "test_forward_mode_AD_linalg_lu_factor_ex_xpu_complex128",
        "test_forward_mode_AD_linalg_lu_factor_ex_xpu_float64",
        "test_forward_mode_AD_linalg_lu_factor_xpu_complex128",
        "test_forward_mode_AD_linalg_lu_factor_xpu_float64",
        "test_forward_mode_AD_linalg_lu_solve_xpu_complex128",
        "test_forward_mode_AD_linalg_lu_solve_xpu_float64",
        "test_forward_mode_AD_linalg_lu_xpu_complex128",
        "test_forward_mode_AD_linalg_lu_xpu_float64",
        "test_forward_mode_AD_linalg_matrix_norm_xpu_complex128",
        "test_forward_mode_AD_linalg_matrix_norm_xpu_float64",
        "test_forward_mode_AD_linalg_matrix_power_xpu_complex128",
        "test_forward_mode_AD_linalg_matrix_power_xpu_float64",
        "test_forward_mode_AD_linalg_multi_dot_xpu_complex128",
        "test_forward_mode_AD_linalg_multi_dot_xpu_float64",
        "test_forward_mode_AD_linalg_norm_xpu_float64",
        "test_forward_mode_AD_linalg_pinv_hermitian_xpu_complex128",
        "test_forward_mode_AD_linalg_pinv_hermitian_xpu_float64",
        "test_forward_mode_AD_linalg_pinv_singular_xpu_complex128",
        "test_forward_mode_AD_linalg_pinv_singular_xpu_float64",
        "test_forward_mode_AD_linalg_pinv_xpu_complex128",
        "test_forward_mode_AD_linalg_pinv_xpu_float64",
        "test_forward_mode_AD_linalg_qr_xpu_complex128",
        "test_forward_mode_AD_linalg_qr_xpu_float64",
        "test_forward_mode_AD_linalg_slogdet_xpu_complex128",
        "test_forward_mode_AD_linalg_slogdet_xpu_float64",
        "test_forward_mode_AD_linalg_solve_ex_xpu_complex128",
        "test_forward_mode_AD_linalg_solve_ex_xpu_float64",
        "test_forward_mode_AD_linalg_solve_triangular_xpu_complex128",
        "test_forward_mode_AD_linalg_solve_triangular_xpu_float64",
        "test_forward_mode_AD_linalg_solve_xpu_complex128",
        "test_forward_mode_AD_linalg_solve_xpu_float64",
        "test_forward_mode_AD_linalg_svd_xpu_complex128",
        "test_forward_mode_AD_linalg_svd_xpu_float64",
        "test_forward_mode_AD_linalg_svdvals_xpu_complex128",
        "test_forward_mode_AD_linalg_svdvals_xpu_float64",
        "test_forward_mode_AD_linalg_tensorinv_xpu_complex128",
        "test_forward_mode_AD_linalg_tensorinv_xpu_float64",
        "test_forward_mode_AD_linalg_tensorsolve_xpu_complex128",
        "test_forward_mode_AD_linalg_tensorsolve_xpu_float64",
        "test_forward_mode_AD_logdet_xpu_complex128",
        "test_forward_mode_AD_logdet_xpu_float64",
        "test_forward_mode_AD_lu_solve_xpu_complex128",
        "test_forward_mode_AD_lu_solve_xpu_float64",
        "test_forward_mode_AD_lu_xpu_complex128",
        "test_forward_mode_AD_lu_xpu_float64",
        "test_forward_mode_AD_matmul_xpu_complex128",
        "test_forward_mode_AD_matmul_xpu_float64",
        "test_forward_mode_AD_mm_xpu_complex128",
        "test_forward_mode_AD_mm_xpu_float64",
        "test_forward_mode_AD_mv_xpu_complex128",
        "test_forward_mode_AD_mv_xpu_float64",
        "test_forward_mode_AD_nn_functional_bilinear_xpu_float64",
        "test_forward_mode_AD_nn_functional_linear_xpu_complex128",
        "test_forward_mode_AD_nn_functional_linear_xpu_float64",
        "test_forward_mode_AD_norm_nuc_xpu_complex128",
        "test_forward_mode_AD_norm_nuc_xpu_float64",
        "test_forward_mode_AD_pca_lowrank_xpu_float64",
        "test_forward_mode_AD_pinverse_xpu_complex128",
        "test_forward_mode_AD_pinverse_xpu_float64",
        "test_forward_mode_AD_qr_xpu_complex128",
        "test_forward_mode_AD_qr_xpu_float64",
        "test_forward_mode_AD_svd_lowrank_xpu_float64",
        "test_forward_mode_AD_svd_xpu_complex128",
        "test_forward_mode_AD_svd_xpu_float64",
        "test_forward_mode_AD_tensordot_xpu_complex128",
        "test_forward_mode_AD_tensordot_xpu_float64",
        "test_forward_mode_AD_triangular_solve_xpu_complex128",
        "test_forward_mode_AD_triangular_solve_xpu_float64",
        "test_inplace_forward_mode_AD_addbmm_xpu_float64",
        "test_inplace_forward_mode_AD_addmm_decomposed_xpu_complex128",
        "test_inplace_forward_mode_AD_addmm_decomposed_xpu_float64",
        "test_inplace_forward_mode_AD_addmm_xpu_complex128",
        "test_inplace_forward_mode_AD_addmm_xpu_float64",
        "test_inplace_forward_mode_AD_addmv_xpu_complex128",
        "test_inplace_forward_mode_AD_addmv_xpu_float64",
        "test_inplace_forward_mode_AD_baddbmm_xpu_complex128",
        "test_inplace_forward_mode_AD_baddbmm_xpu_float64",
        "test_forward_mode_AD_pca_lowrank_xpu_complex128",
        "test_forward_mode_AD_svd_lowrank_xpu_complex128",
        # RuntimeError: value cannot be converted to type float without overflow
        "test_fn_fwgrad_bwgrad_addbmm_xpu_complex128",
        "test_forward_mode_AD_addbmm_xpu_complex128",
        "test_inplace_forward_mode_AD_addbmm_xpu_complex128",
        # torch.autograd.gradcheck.GradcheckError: While considering the real part of complex inputs only, Jacobian computed with forward mode mismatch for output 0 with respect to input 0,
        "test_fn_fwgrad_bwgrad_linalg_norm_xpu_complex128",
        # torch.autograd.gradcheck.GradcheckError: While considering the imaginary part of complex inputs only, Jacobian computed with forward mode mismatch for output 0 with respect to input 0,
        "test_forward_mode_AD_linalg_norm_xpu_complex128",
        # RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive
        "test_fn_fwgrad_bwgrad_nn_functional_conv_transpose2d_xpu_complex128",
        "test_fn_fwgrad_bwgrad_nn_functional_conv_transpose2d_xpu_float64",
        "test_fn_fwgrad_bwgrad_nn_functional_conv_transpose3d_xpu_complex128",
        "test_fn_fwgrad_bwgrad_nn_functional_conv_transpose3d_xpu_float64",
        "test_forward_mode_AD_nn_functional_conv_transpose2d_xpu_complex128",
        "test_forward_mode_AD_nn_functional_conv_transpose2d_xpu_float64",
        "test_forward_mode_AD_nn_functional_conv_transpose3d_xpu_complex128",
        "test_forward_mode_AD_nn_functional_conv_transpose3d_xpu_float64",
        # issue: https://github.com/intel/torch-xpu-ops/issues/809
        "test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_complex128",
        "test_fn_fwgrad_bwgrad_nn_functional_conv3d_xpu_float64",
    ),
    "test_matmul_cuda_xpu.py": (
        # AssertionError: "Bias is not supported when out_dtype is set to Float32" does not match "Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend.
        "test_float32_output_errors_with_bias_xpu",
        # RuntimeError: "eye" not implemented for 'Float8_e4m3fn'
        "test_float8_basics_xpu",
        # AssertionError: "For row-wise scaling, scale_a must be size 1024 but got 1 and scale_b must be size 2048 but got 2" does not match "Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend.
        "test_float8_error_messages_xpu",
        # NotImplementedError: Could not run 'aten::_scaled_mm' with arguments from the 'CPU' backend.
        "test_float8_bias_relu_edgecase_xpu",
        "test_float8_bias_xpu",
        "test_float8_rowwise_scaling_sanity_use_fast_accum_False_xpu",
        "test_float8_rowwise_scaling_sanity_use_fast_accum_True_xpu",
        "test_float8_scale_fast_accum_xpu",
        "test_float8_scale_xpu",
        "test_non_divisible_leading_dim_bias_False_xpu",
        "test_non_divisible_leading_dim_bias_True_xpu",
        "test_scaled_mm_change_stride_bfloat16_xpu",
        "test_scaled_mm_change_stride_float16_xpu",
        "test_scaled_mm_change_stride_float32_xpu",
        "test_scaled_mm_vs_emulated_bfloat16_xpu",
        "test_scaled_mm_vs_emulated_float16_xpu",
        "test_scaled_mm_vs_emulated_float32_xpu",
        "test_scaled_mm_vs_emulated_row_wise_bfloat16_xpu",
        # AssertionError: Torch not compiled with CUDA enabled
        "test_zero_dim_tensorwise_which_dim_zero",
        # New added case in 2.7
        "test_cublas_addmm_reduced_precision_fp16_accumulate_size_10000_xpu_bfloat16",
        "test_cublas_addmm_reduced_precision_fp16_accumulate_size_10000_xpu_float16",
        "test_cublas_addmm_reduced_precision_fp16_accumulate_size_1000_xpu_bfloat16",
        "test_cublas_addmm_reduced_precision_fp16_accumulate_size_1000_xpu_float16",
        "test_cublas_addmm_reduced_precision_fp16_accumulate_size_100_xpu_bfloat16",
        "test_cublas_addmm_reduced_precision_fp16_accumulate_size_100_xpu_float16",
        "test_cublas_and_lt_reduced_precision_fp16_accumulate_xpu",
    ),
    "test_maskedtensor_xpu.py": (
        # Summary: SparseCsrXPU OPs are not supported
        # NotImplementedError: Could not run 'aten::_to_sparse_csr' with arguments from the 'SparseXPU' backend.
        # https://github.com/intel/torch-xpu-ops/issues/357
        "test_to_dense_xpu",
        # RuntimeError: device type of values (xpu) must be CPU or CUDA or Meta
        "test_like_",
        "test_invalid_sparse_layout_xpu",
        "test_to_dense_and_sparse_csr_xpu",
        "test_binary_core_add_layout2_xpu_float16",
        "test_binary_core_add_layout2_xpu_float32",
        "test_binary_core_add_layout2_xpu_float64",
        "test_binary_core_atan2_layout2_xpu_float16",
        "test_binary_core_atan2_layout2_xpu_float32",
        "test_binary_core_atan2_layout2_xpu_float64",
        "test_binary_core_div_floor_rounding_layout2_xpu_float16",
        "test_binary_core_div_floor_rounding_layout2_xpu_float32",
        "test_binary_core_div_floor_rounding_layout2_xpu_float64",
        "test_binary_core_div_no_rounding_mode_layout2_xpu_float16",
        "test_binary_core_div_no_rounding_mode_layout2_xpu_float32",
        "test_binary_core_div_no_rounding_mode_layout2_xpu_float64",
        "test_binary_core_div_trunc_rounding_layout2_xpu_float16",
        "test_binary_core_div_trunc_rounding_layout2_xpu_float32",
        "test_binary_core_div_trunc_rounding_layout2_xpu_float64",
        "test_binary_core_eq_layout2_xpu_float16",
        "test_binary_core_eq_layout2_xpu_float32",
        "test_binary_core_eq_layout2_xpu_float64",
        "test_binary_core_floor_divide_layout2_xpu_float16",
        "test_binary_core_floor_divide_layout2_xpu_float32",
        "test_binary_core_floor_divide_layout2_xpu_float64",
        "test_binary_core_fmax_layout2_xpu_float16",
        "test_binary_core_fmax_layout2_xpu_float32",
        "test_binary_core_fmax_layout2_xpu_float64",
        "test_binary_core_fmin_layout2_xpu_float16",
        "test_binary_core_fmin_layout2_xpu_float32",
        "test_binary_core_fmin_layout2_xpu_float64",
        "test_binary_core_fmod_layout2_xpu_float16",
        "test_binary_core_fmod_layout2_xpu_float32",
        "test_binary_core_fmod_layout2_xpu_float64",
        "test_binary_core_ge_layout2_xpu_float16",
        "test_binary_core_ge_layout2_xpu_float32",
        "test_binary_core_ge_layout2_xpu_float64",
        "test_binary_core_gt_layout2_xpu_float16",
        "test_binary_core_gt_layout2_xpu_float32",
        "test_binary_core_gt_layout2_xpu_float64",
        "test_binary_core_le_layout2_xpu_float16",
        "test_binary_core_le_layout2_xpu_float32",
        "test_binary_core_le_layout2_xpu_float64",
        "test_binary_core_logaddexp_layout2_xpu_float16",
        "test_binary_core_logaddexp_layout2_xpu_float32",
        "test_binary_core_logaddexp_layout2_xpu_float64",
        "test_binary_core_lt_layout2_xpu_float16",
        "test_binary_core_lt_layout2_xpu_float32",
        "test_binary_core_lt_layout2_xpu_float64",
        "test_binary_core_maximum_layout2_xpu_float16",
        "test_binary_core_maximum_layout2_xpu_float32",
        "test_binary_core_maximum_layout2_xpu_float64",
        "test_binary_core_minimum_layout2_xpu_float16",
        "test_binary_core_minimum_layout2_xpu_float32",
        "test_binary_core_minimum_layout2_xpu_float64",
        "test_binary_core_mul_layout2_xpu_float16",
        "test_binary_core_mul_layout2_xpu_float32",
        "test_binary_core_mul_layout2_xpu_float64",
        "test_binary_core_ne_layout2_xpu_float16",
        "test_binary_core_ne_layout2_xpu_float32",
        "test_binary_core_ne_layout2_xpu_float64",
        "test_binary_core_nextafter_layout2_xpu_float16",
        "test_binary_core_nextafter_layout2_xpu_float32",
        "test_binary_core_nextafter_layout2_xpu_float64",
        "test_binary_core_remainder_layout2_xpu_float16",
        "test_binary_core_remainder_layout2_xpu_float32",
        "test_binary_core_remainder_layout2_xpu_float64",
        "test_binary_core_sub_layout2_xpu_float16",
        "test_binary_core_sub_layout2_xpu_float32",
        "test_binary_core_sub_layout2_xpu_float64",
        "test_binary_core_true_divide_layout2_xpu_float16",
        "test_binary_core_true_divide_layout2_xpu_float32",
        "test_binary_core_true_divide_layout2_xpu_float64",
        "test_reduction_all_amax_layout2_xpu_float16",
        "test_reduction_all_amax_layout2_xpu_float32",
        "test_reduction_all_amax_layout2_xpu_float64",
        "test_reduction_all_amin_layout2_xpu_float16",
        "test_reduction_all_amin_layout2_xpu_float32",
        "test_reduction_all_amin_layout2_xpu_float64",
        "test_reduction_all_prod_layout2_xpu_float32",
        "test_reduction_all_prod_layout2_xpu_float64",
        "test_reduction_all_sum_layout2_xpu_float16",
        "test_reduction_all_sum_layout2_xpu_float64",
    ),
    "quantization/core/test_quantized_op_xpu.py": (
        # AssertionError: Torch not compiled with CUDA enabled
        "test_qgelu_xpu",
        "test_qrelu_xpu",
        # AttributeError: 'TestQuantizedOpsXPU' object has no attribute 'test_qsoftmax'
        "test_qsoftmax_qnnpack_xpu",
    ),
    "quantization/core/test_workflow_ops_xpu.py": (
        # AssertionError: Not equal to tolerance rtol=1e-06, atol=1e-06
        # Max absolute difference among violations: 1.731507e+10
        # Max relative difference among violations: 0.01587304
        # ACTUAL: array([-1.108163e+12,  1.108163e+12], dtype=float32)
        # DESIRED: array([-1.108163e+12,  1.090847e+12], dtype=float32)
        "test_fq_module_per_tensor_xpu",
    ),
    "quantization/core/test_workflow_module_xpu.py": None,
    "quantization/core/test_quantized_tensor_xpu.py": (
        # Summary: Quantized OPs are not supported for XPU
        # NotImplementedError: Could not run 'aten::dequantize.self' with arguments from the 'QuantizedXPU' backend
        "test_compare_per_channel_device_numerics_xpu",
        # NotImplementedError: Could not run 'aten::dequantize.self' with arguments from the 'QuantizedXPU' backend.
        "test_compare_per_tensor_device_numerics_xpu",
        # NotImplementedError: Could not run 'aten::empty_quantized' with arguments from the 'QuantizedXPU' backend.
        "test_cuda_quantization_does_not_pin_memory_xpu",
        # NotImplementedError: Could not run 'aten::_empty_per_channel_affine_quantized' with arguments from the 'QuantizedXPU' backend.
        "test_per_channel_qtensor_creation_cuda_xpu",
        # NotImplementedError: Could not run 'aten::empty_quantized' with arguments from the 'QuantizedXPU' backend.
        "test_per_channel_to_device_xpu",
        # NotImplementedError: Could not run 'aten::empty_quantized' with arguments from the 'QuantizedXPU' backend.
        "test_per_tensor_to_device_xpu",
        # NotImplementedError: Could not run 'aten::q_scale' with arguments from the 'QuantizedXPU' backend.
        "test_qtensor_cuda_xpu",
        # NotImplementedError: Could not run 'aten::_index_put_impl_' with arguments from the 'QuantizedXPU' backend.
        "test_qtensor_index_put_cuda_xpu",
        # NotImplementedError: Could not run 'aten::index_select' with arguments from the 'QuantizedXPU' backend.
        "test_qtensor_index_select_cuda_xpu",
        # NotImplementedError: Could not run 'aten::_empty_affine_quantized' with arguments from the 'QuantizedXPU' backend.
        "test_qtensor_masked_fill_cuda_xpu",
    ),
    "nn/test_packed_sequence_xpu.py": (
        # test case porting issue
        "test_to and not test_to_memory and not test_total",
    ),
    "test_ops_gradients_xpu.py": (
        # All are oneDNN issues
        ### Error #0 in TestBwdGradientsXPU , totally 271 , RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_fn_grad_index_reduce_prod_xpu_float64",
        "test_inplace_grad_index_reduce_prod_xpu_float64",
        "test_fn_grad___rmatmul___xpu_complex128",
        "test_fn_grad___rmatmul___xpu_float64",
        "test_fn_grad_addbmm_xpu_float64",
        "test_fn_grad_addmm_decomposed_xpu_complex128",
        "test_fn_grad_addmm_decomposed_xpu_float64",
        "test_fn_grad_addmm_xpu_complex128",
        "test_fn_grad_addmm_xpu_float64",
        "test_fn_grad_addmv_xpu_complex128",
        "test_fn_grad_addmv_xpu_float64",
        "test_fn_grad_addr_xpu_complex128",
        "test_fn_grad_addr_xpu_float64",
        "test_fn_grad_baddbmm_xpu_complex128",
        "test_fn_grad_baddbmm_xpu_float64",
        "test_fn_grad_bmm_xpu_complex128",
        "test_fn_grad_bmm_xpu_float64",
        "test_fn_grad_cdist_xpu_float64",
        "test_fn_grad_cholesky_inverse_xpu_complex128",
        "test_fn_grad_cholesky_inverse_xpu_float64",
        "test_fn_grad_cholesky_solve_xpu_complex128",
        "test_fn_grad_cholesky_solve_xpu_float64",
        "test_fn_grad_cholesky_xpu_complex128",
        "test_fn_grad_cholesky_xpu_float64",
        "test_fn_grad_corrcoef_xpu_complex128",
        "test_fn_grad_corrcoef_xpu_float64",
        "test_fn_grad_einsum_xpu_complex128",
        "test_fn_grad_einsum_xpu_float64",
        "test_fn_grad_inner_xpu_complex128",
        "test_fn_grad_inner_xpu_float64",
        "test_fn_grad_linalg_cholesky_ex_xpu_complex128",
        "test_fn_grad_linalg_cholesky_ex_xpu_float64",
        "test_fn_grad_linalg_cholesky_xpu_complex128",
        "test_fn_grad_linalg_cholesky_xpu_float64",
        "test_fn_grad_linalg_cond_xpu_complex128",
        "test_fn_grad_linalg_cond_xpu_float64",
        "test_fn_grad_linalg_det_singular_xpu_complex128",
        "test_fn_grad_linalg_det_singular_xpu_float64",
        "test_fn_grad_linalg_det_xpu_complex128",
        "test_fn_grad_linalg_det_xpu_float64",
        "test_fn_grad_linalg_eig_xpu_complex128",
        "test_fn_grad_linalg_eig_xpu_float64",
        "test_fn_grad_linalg_eigh_xpu_complex128",
        "test_fn_grad_linalg_eigh_xpu_float64",
        "test_fn_grad_linalg_eigvals_xpu_complex128",
        "test_fn_grad_linalg_eigvals_xpu_float64",
        "test_fn_grad_linalg_eigvalsh_xpu_complex128",
        "test_fn_grad_linalg_eigvalsh_xpu_float64",
        "test_fn_grad_linalg_householder_product_xpu_complex128",
        "test_fn_grad_linalg_householder_product_xpu_float64",
        "test_fn_grad_linalg_inv_ex_xpu_complex128",
        "test_fn_grad_linalg_inv_ex_xpu_float64",
        "test_fn_grad_linalg_inv_xpu_complex128",
        "test_fn_grad_linalg_inv_xpu_float64",
        "test_fn_grad_linalg_lstsq_grad_oriented_xpu_complex128",
        "test_fn_grad_linalg_lstsq_grad_oriented_xpu_float64",
        "test_fn_grad_linalg_lu_factor_ex_xpu_complex128",
        "test_fn_grad_linalg_lu_factor_ex_xpu_float64",
        "test_fn_grad_linalg_lu_factor_xpu_complex128",
        "test_fn_grad_linalg_lu_factor_xpu_float64",
        "test_fn_grad_linalg_lu_solve_xpu_complex128",
        "test_fn_grad_linalg_lu_solve_xpu_float64",
        "test_fn_grad_linalg_lu_xpu_complex128",
        "test_fn_grad_linalg_lu_xpu_float64",
        "test_fn_grad_linalg_matrix_norm_xpu_complex128",
        "test_fn_grad_linalg_matrix_norm_xpu_float64",
        "test_fn_grad_linalg_matrix_power_xpu_complex128",
        "test_fn_grad_linalg_matrix_power_xpu_float64",
        "test_fn_grad_linalg_multi_dot_xpu_complex128",
        "test_fn_grad_linalg_multi_dot_xpu_float64",
        "test_fn_grad_linalg_norm_xpu_float64",
        "test_fn_grad_linalg_pinv_hermitian_xpu_complex128",
        "test_fn_grad_linalg_pinv_hermitian_xpu_float64",
        "test_fn_grad_linalg_pinv_singular_xpu_complex128",
        "test_fn_grad_linalg_pinv_singular_xpu_float64",
        "test_fn_grad_linalg_pinv_xpu_complex128",
        "test_fn_grad_linalg_pinv_xpu_float64",
        "test_fn_grad_linalg_qr_xpu_complex128",
        "test_fn_grad_linalg_qr_xpu_float64",
        "test_fn_grad_linalg_slogdet_xpu_complex128",
        "test_fn_grad_linalg_slogdet_xpu_float64",
        "test_fn_grad_linalg_solve_ex_xpu_complex128",
        "test_fn_grad_linalg_solve_ex_xpu_float64",
        "test_fn_grad_linalg_solve_triangular_xpu_complex128",
        "test_fn_grad_linalg_solve_triangular_xpu_float64",
        "test_fn_grad_linalg_solve_xpu_complex128",
        "test_fn_grad_linalg_solve_xpu_float64",
        "test_fn_grad_linalg_svd_xpu_complex128",
        "test_fn_grad_linalg_svd_xpu_float64",
        "test_fn_grad_linalg_svdvals_xpu_complex128",
        "test_fn_grad_linalg_svdvals_xpu_float64",
        "test_fn_grad_linalg_tensorinv_xpu_complex128",
        "test_fn_grad_linalg_tensorinv_xpu_float64",
        "test_fn_grad_linalg_tensorsolve_xpu_complex128",
        "test_fn_grad_linalg_tensorsolve_xpu_float64",
        "test_fn_grad_logdet_xpu_complex128",
        "test_fn_grad_logdet_xpu_float64",
        "test_fn_grad_lu_solve_xpu_complex128",
        "test_fn_grad_lu_solve_xpu_float64",
        "test_fn_grad_lu_xpu_complex128",
        "test_fn_grad_lu_xpu_float64",
        "test_fn_grad_matmul_xpu_complex128",
        "test_fn_grad_matmul_xpu_float64",
        "test_fn_grad_mm_xpu_complex128",
        "test_fn_grad_mm_xpu_float64",
        "test_fn_grad_mv_xpu_complex128",
        "test_fn_grad_mv_xpu_float64",
        "test_fn_grad_nn_functional_bilinear_xpu_float64",
        "test_fn_grad_nn_functional_linear_xpu_complex128",
        "test_fn_grad_nn_functional_linear_xpu_float64",
        "test_fn_grad_nn_functional_multi_head_attention_forward_xpu_float64",
        "test_fn_grad_nn_functional_scaled_dot_product_attention_xpu_float64",
        "test_fn_grad_norm_nuc_xpu_complex128",
        "test_fn_grad_norm_nuc_xpu_float64",
        "test_fn_grad_ormqr_xpu_complex128",
        "test_fn_grad_ormqr_xpu_float64",
        "test_fn_grad_pca_lowrank_xpu_float64",
        "test_fn_grad_pinverse_xpu_complex128",
        "test_fn_grad_pinverse_xpu_float64",
        "test_fn_grad_qr_xpu_complex128",
        "test_fn_grad_qr_xpu_float64",
        "test_fn_grad_svd_lowrank_xpu_float64",
        "test_fn_grad_svd_xpu_complex128",
        "test_fn_grad_svd_xpu_float64",
        "test_fn_grad_tensordot_xpu_complex128",
        "test_fn_grad_tensordot_xpu_float64",
        "test_fn_grad_triangular_solve_xpu_complex128",
        "test_fn_grad_triangular_solve_xpu_float64",
        "test_fn_gradgrad___rmatmul___xpu_complex128",
        "test_fn_gradgrad___rmatmul___xpu_float64",
        "test_fn_gradgrad_addbmm_xpu_float64",
        "test_fn_gradgrad_addmm_decomposed_xpu_complex128",
        "test_fn_gradgrad_addmm_decomposed_xpu_float64",
        "test_fn_gradgrad_addmm_xpu_complex128",
        "test_fn_gradgrad_addmm_xpu_float64",
        "test_fn_gradgrad_addmv_xpu_complex128",
        "test_fn_gradgrad_addmv_xpu_float64",
        "test_fn_gradgrad_addr_xpu_complex128",
        "test_fn_gradgrad_addr_xpu_float64",
        "test_fn_gradgrad_baddbmm_xpu_complex128",
        "test_fn_gradgrad_baddbmm_xpu_float64",
        "test_fn_gradgrad_bmm_xpu_complex128",
        "test_fn_gradgrad_bmm_xpu_float64",
        "test_fn_gradgrad_cholesky_inverse_xpu_complex128",
        "test_fn_gradgrad_cholesky_inverse_xpu_float64",
        "test_fn_gradgrad_cholesky_solve_xpu_complex128",
        "test_fn_gradgrad_cholesky_solve_xpu_float64",
        "test_fn_gradgrad_cholesky_xpu_complex128",
        "test_fn_gradgrad_cholesky_xpu_float64",
        "test_fn_gradgrad_corrcoef_xpu_complex128",
        "test_fn_gradgrad_corrcoef_xpu_float64",
        "test_fn_gradgrad_einsum_xpu_complex128",
        "test_fn_gradgrad_einsum_xpu_float64",
        "test_fn_gradgrad_inner_xpu_complex128",
        "test_fn_gradgrad_inner_xpu_float64",
        "test_fn_gradgrad_linalg_cholesky_ex_xpu_complex128",
        "test_fn_gradgrad_linalg_cholesky_ex_xpu_float64",
        "test_fn_gradgrad_linalg_cholesky_xpu_complex128",
        "test_fn_gradgrad_linalg_cholesky_xpu_float64",
        "test_fn_gradgrad_linalg_cond_xpu_complex128",
        "test_fn_gradgrad_linalg_cond_xpu_float64",
        "test_fn_gradgrad_linalg_det_xpu_complex128",
        "test_fn_gradgrad_linalg_det_xpu_float64",
        "test_fn_gradgrad_linalg_eig_xpu_complex128",
        "test_fn_gradgrad_linalg_eig_xpu_float64",
        "test_fn_gradgrad_linalg_eigh_xpu_complex128",
        "test_fn_gradgrad_linalg_eigh_xpu_float64",
        "test_fn_gradgrad_linalg_eigvals_xpu_complex128",
        "test_fn_gradgrad_linalg_eigvals_xpu_float64",
        "test_fn_gradgrad_linalg_eigvalsh_xpu_complex128",
        "test_fn_gradgrad_linalg_eigvalsh_xpu_float64",
        "test_fn_gradgrad_linalg_householder_product_xpu_complex128",
        "test_fn_gradgrad_linalg_householder_product_xpu_float64",
        "test_fn_gradgrad_linalg_inv_ex_xpu_complex128",
        "test_fn_gradgrad_linalg_inv_ex_xpu_float64",
        "test_fn_gradgrad_linalg_inv_xpu_complex128",
        "test_fn_gradgrad_linalg_inv_xpu_float64",
        "test_fn_gradgrad_linalg_lstsq_grad_oriented_xpu_complex128",
        "test_fn_gradgrad_linalg_lstsq_grad_oriented_xpu_float64",
        "test_fn_gradgrad_linalg_lu_factor_ex_xpu_complex128",
        "test_fn_gradgrad_linalg_lu_factor_ex_xpu_float64",
        "test_fn_gradgrad_linalg_lu_factor_xpu_complex128",
        "test_fn_gradgrad_linalg_lu_factor_xpu_float64",
        "test_fn_gradgrad_linalg_lu_solve_xpu_complex128",
        "test_fn_gradgrad_linalg_lu_solve_xpu_float64",
        "test_fn_gradgrad_linalg_lu_xpu_complex128",
        "test_fn_gradgrad_linalg_lu_xpu_float64",
        "test_fn_gradgrad_linalg_matrix_norm_xpu_complex128",
        "test_fn_gradgrad_linalg_matrix_norm_xpu_float64",
        "test_fn_gradgrad_linalg_matrix_power_xpu_complex128",
        "test_fn_gradgrad_linalg_matrix_power_xpu_float64",
        "test_fn_gradgrad_linalg_multi_dot_xpu_complex128",
        "test_fn_gradgrad_linalg_multi_dot_xpu_float64",
        "test_fn_gradgrad_linalg_pinv_hermitian_xpu_complex128",
        "test_fn_gradgrad_linalg_pinv_hermitian_xpu_float64",
        "test_fn_gradgrad_linalg_pinv_singular_xpu_float64",
        "test_fn_gradgrad_linalg_pinv_xpu_complex128",
        "test_fn_gradgrad_linalg_pinv_xpu_float64",
        "test_fn_gradgrad_linalg_qr_xpu_complex128",
        "test_fn_gradgrad_linalg_qr_xpu_float64",
        "test_fn_gradgrad_linalg_slogdet_xpu_complex128",
        "test_fn_gradgrad_linalg_slogdet_xpu_float64",
        "test_fn_gradgrad_linalg_solve_ex_xpu_complex128",
        "test_fn_gradgrad_linalg_solve_ex_xpu_float64",
        "test_fn_gradgrad_linalg_solve_triangular_xpu_complex128",
        "test_fn_gradgrad_linalg_solve_triangular_xpu_float64",
        "test_fn_gradgrad_linalg_solve_xpu_complex128",
        "test_fn_gradgrad_linalg_solve_xpu_float64",
        "test_fn_gradgrad_linalg_svd_xpu_complex128",
        "test_fn_gradgrad_linalg_svd_xpu_float64",
        "test_fn_gradgrad_linalg_svdvals_xpu_complex128",
        "test_fn_gradgrad_linalg_svdvals_xpu_float64",
        "test_fn_gradgrad_linalg_tensorinv_xpu_complex128",
        "test_fn_gradgrad_linalg_tensorinv_xpu_float64",
        "test_fn_gradgrad_linalg_tensorsolve_xpu_complex128",
        "test_fn_gradgrad_linalg_tensorsolve_xpu_float64",
        "test_fn_gradgrad_logdet_xpu_complex128",
        "test_fn_gradgrad_logdet_xpu_float64",
        "test_fn_gradgrad_lu_solve_xpu_complex128",
        "test_fn_gradgrad_lu_solve_xpu_float64",
        "test_fn_gradgrad_lu_xpu_complex128",
        "test_fn_gradgrad_lu_xpu_float64",
        "test_fn_gradgrad_matmul_xpu_complex128",
        "test_fn_gradgrad_matmul_xpu_float64",
        "test_fn_gradgrad_mm_xpu_complex128",
        "test_fn_gradgrad_mm_xpu_float64",
        "test_fn_gradgrad_mv_xpu_complex128",
        "test_fn_gradgrad_mv_xpu_float64",
        "test_fn_gradgrad_nn_functional_bilinear_xpu_float64",
        "test_fn_gradgrad_nn_functional_linear_xpu_complex128",
        "test_fn_gradgrad_nn_functional_linear_xpu_float64",
        "test_fn_gradgrad_nn_functional_multi_head_attention_forward_xpu_float64",
        "test_fn_gradgrad_nn_functional_scaled_dot_product_attention_xpu_float64",
        "test_fn_gradgrad_norm_nuc_xpu_complex128",
        "test_fn_gradgrad_norm_nuc_xpu_float64",
        "test_fn_gradgrad_ormqr_xpu_complex128",
        "test_fn_gradgrad_ormqr_xpu_float64",
        "test_fn_gradgrad_pca_lowrank_xpu_float64",
        "test_fn_gradgrad_pinverse_xpu_complex128",
        "test_fn_gradgrad_pinverse_xpu_float64",
        "test_fn_gradgrad_qr_xpu_complex128",
        "test_fn_gradgrad_qr_xpu_float64",
        "test_fn_gradgrad_svd_lowrank_xpu_float64",
        "test_fn_gradgrad_svd_xpu_complex128",
        "test_fn_gradgrad_svd_xpu_float64",
        "test_fn_gradgrad_tensordot_xpu_complex128",
        "test_fn_gradgrad_tensordot_xpu_float64",
        "test_fn_gradgrad_triangular_solve_xpu_complex128",
        "test_fn_gradgrad_triangular_solve_xpu_float64",
        "test_inplace_grad_addbmm_xpu_float64",
        "test_inplace_grad_addmm_decomposed_xpu_complex128",
        "test_inplace_grad_addmm_decomposed_xpu_float64",
        "test_inplace_grad_addmm_xpu_complex128",
        "test_inplace_grad_addmm_xpu_float64",
        "test_inplace_grad_addmv_xpu_complex128",
        "test_inplace_grad_addmv_xpu_float64",
        "test_inplace_grad_addr_xpu_complex128",
        "test_inplace_grad_addr_xpu_float64",
        "test_inplace_grad_baddbmm_xpu_complex128",
        "test_inplace_grad_baddbmm_xpu_float64",
        "test_inplace_gradgrad_addbmm_xpu_float64",
        "test_inplace_gradgrad_addmm_decomposed_xpu_complex128",
        "test_inplace_gradgrad_addmm_decomposed_xpu_float64",
        "test_inplace_gradgrad_addmm_xpu_complex128",
        "test_inplace_gradgrad_addmm_xpu_float64",
        "test_inplace_gradgrad_addmv_xpu_complex128",
        "test_inplace_gradgrad_addmv_xpu_float64",
        "test_inplace_gradgrad_addr_xpu_complex128",
        "test_inplace_gradgrad_addr_xpu_float64",
        "test_inplace_gradgrad_baddbmm_xpu_complex128",
        "test_inplace_gradgrad_baddbmm_xpu_float64",
        "test_fn_grad_pca_lowrank_xpu_complex128",
        "test_fn_grad_svd_lowrank_xpu_complex128",
        "test_fn_gradgrad_pca_lowrank_xpu_complex128",
        "test_fn_gradgrad_svd_lowrank_xpu_complex128",
        "test_fn_grad_linalg_norm_xpu_complex128",
        ### Error #1 in TestBwdGradientsXPU , totally 4 , RuntimeError: value cannot be converted to type float without overflow
        "test_fn_grad_addbmm_xpu_complex128",
        "test_fn_gradgrad_addbmm_xpu_complex128",
        "test_inplace_grad_addbmm_xpu_complex128",
        "test_inplace_gradgrad_addbmm_xpu_complex128",
        ### Error #4 in TestBwdGradientsXPU , totally 8 , RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive
        "test_fn_grad_nn_functional_conv_transpose2d_xpu_complex128",
        "test_fn_grad_nn_functional_conv_transpose2d_xpu_float64",
        "test_fn_grad_nn_functional_conv_transpose3d_xpu_complex128",
        "test_fn_grad_nn_functional_conv_transpose3d_xpu_float64",
        "test_fn_gradgrad_nn_functional_conv_transpose2d_xpu_complex128",
        "test_fn_gradgrad_nn_functional_conv_transpose2d_xpu_float64",
        "test_fn_gradgrad_nn_functional_conv_transpose3d_xpu_complex128",
        "test_fn_gradgrad_nn_functional_conv_transpose3d_xpu_float64",
        "test_fn_gradgrad_index_reduce_mean_xpu_float64",
        "test_fn_gradgrad_index_reduce_prod_xpu_float64",
        "test_inplace_gradgrad_index_reduce_mean_xpu_float64",
        "test_inplace_gradgrad_index_reduce_prod_xpu_float64",
        # issue: https://github.com/intel/torch-xpu-ops/issues/809
        "test_fn_gradgrad_nn_functional_conv3d_xpu_complex128",
        "test_fn_gradgrad_nn_functional_conv3d_xpu_float64",
    ),
    "test_torch_xpu.py": (
        # 'torch.xpu' has no attribute ...
        ### Error #1 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'FloatTensor'
        "test_grad_scaling_state_dict_xpu",
        ### Error #2 in TestTorchDeviceTypeXPU , totally 1 , AttributeError: 'torch.storage.TypedStorage' object has no attribute 'is_xpu'
        ### Error #3 in TestTorchDeviceTypeXPU , totally 3 , AttributeError: module 'torch.xpu' has no attribute 'ByteStorage'
        "test_storage_setitem_xpu_uint8",
        "test_tensor_storage_type_xpu_uint8",
        ### Error #4 in TestTorchDeviceTypeXPU , totally 4 , AttributeError: module 'torch.xpu' has no attribute 'FloatStorage'
        "test_storage_setitem_xpu_float32",
        "test_tensor_storage_type_xpu_float32",
        ### Error #7 in TestTorchDeviceTypeXPU , totally 1 , TypeError: map2_ is only implemented on CPU tensors
        "test_broadcast_fn_map2_xpu",
        ### Error #8 in TestTorchDeviceTypeXPU , totally 1 , TypeError: map_ is only implemented on CPU tensors
        "test_broadcast_fn_map_xpu",
        ### Error #9 in TestTorchDeviceTypeXPU , totally 1 , RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_corrcoef_xpu_complex64",
        ### Error #12 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'amp'
        "test_grad_scaler_pass_itself_xpu",
        "test_pickle_gradscaler_xpu",
        ### Error #15 in TestTorchDeviceTypeXPU , totally 2 , AssertionError: Tensor-likes are not close!
        "test_index_put_non_accumulate_deterministic_xpu",
        ### Error #17 in TestTorchDeviceTypeXPU , totally 2 , AssertionError: False is not true
        "test_sync_warning_xpu",
        ### Error #19 in TestTorchDeviceTypeXPU , totally 1 , RuntimeError: _share_fd_: only available on CPU
        "test_module_share_memory_xpu",
        # 'torch.xpu' has no attribute ...
        ### Error #30 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'BoolStorage'
        "test_storage_setitem_xpu_bool",
        "test_tensor_storage_type_xpu_bool",
        ### Error #31 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'ComplexDoubleStorage'
        "test_storage_setitem_xpu_complex128",
        "test_tensor_storage_type_xpu_complex128",
        ### Error #32 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'ComplexFloatStorage'
        "test_storage_setitem_xpu_complex64",
        "test_tensor_storage_type_xpu_complex64",
        ### Error #33 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'DoubleStorage'
        "test_storage_setitem_xpu_float64",
        "test_tensor_storage_type_xpu_float64",
        ### Error #34 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'ShortStorage'
        "test_storage_setitem_xpu_int16",
        "test_tensor_storage_type_xpu_int16",
        ### Error #35 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'IntStorage'
        "test_storage_setitem_xpu_int32",
        "test_tensor_storage_type_xpu_int32",
        ### Error #36 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'LongStorage'
        "test_storage_setitem_xpu_int64",
        "test_tensor_storage_type_xpu_int64",
        ### Error #37 in TestTorchDeviceTypeXPU , totally 2 , AttributeError: module 'torch.xpu' has no attribute 'CharStorage'
        "test_storage_setitem_xpu_int8",
        "test_tensor_storage_type_xpu_int8",
        ### Error #38 in TestTorchDeviceTypeXPU , totally 1 , AttributeError: module 'torch.xpu' has no attribute 'BFloat16Storage'
        "test_tensor_storage_type_xpu_bfloat16",
        ### Error #39 in TestTorchDeviceTypeXPU , totally 1 , AttributeError: module 'torch.xpu' has no attribute 'HalfStorage'
        "test_tensor_storage_type_xpu_float16",
        ### Module 'torch.xpu' has no attribute 'ByteStorage'
        "test_tensor_storage_type_xpu_uint8",
        # issue 302 , 8
        "test_print",
        "test_storage_error",
        "test_storage_error_no_attribute",
        # issue 302, 6
        "test_storage_error",
        "test_typed_storage_deprecation_warning",
        "test_typed_storage_internal_no_warning",
        # issue 302, 11
        "test_cuda_vitals_gpu_only_xpu",
        # torch.utils.swap_tensors AssertionError: RuntimeError not raised
        "test_swap_basic",
        # internally uses index_put deterministic implementation
        # dependent on "test_index_put_non_accumulate_deterministic"
        "test_index_copy_deterministic",
        # scatter_add needs handle XPU deterministic
        # https://github.com/intel/torch-xpu-ops/issues/906
        "test_gather_backward_deterministic_path_xpu",
        "test_scatter_add_one_dim_deterministic_xpu",
        # Precision error
        # Fail occasionally
        # Mismatched elements: 1 / 60 (1.7%)
        # Greatest absolute difference: 0.0625 at index (2, 1, 4) (up to 1e-05 allowed)
        # Greatest relative difference: 0.001125335693359375 at index (2, 1, 4) (up to 0.001 allowed)
        "test_index_reduce_reduce_mean_xpu_bfloat16",
        "test_index_reduce_reduce_mean_xpu_float16",
        "test_index_reduce_reduce_prod_xpu_float16",
    ),
    "nn/test_multihead_attention_xpu.py": (
        # known oneDNN issue
        # RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_multihead_attention_dtype_batch_first_xpu_float64",
        "test_multihead_attention_dtype_xpu_float64",
        "test_multihead_attn_fast_path_query_and_bias_have_different_dtypes_xpu_float64",
        "test_multihead_attn_fast_path_small_test_xpu_float64",
        "test_multihead_attn_in_proj_bias_none_xpu_float64",
        "test_multihead_attn_in_proj_weight_none_xpu_float64",
    ),
    "test_native_mha_xpu.py": (
        # NestedTensorXPU related OPs
        # NotImplementedError: Could not run 'aten::_native_multi_head_attention' with arguments from the 'NestedTensorXPU' backend.
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float32",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float32",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float32",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_False_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_False_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_False_fused_True_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_False_need_weights_False_average_attn_weights_True_fused_True_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_False_fused_True_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_False_xpu_float32",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float16",
        "test_native_multihead_self_attention_use_nt_True_use_padding_True_pad_all_True_need_weights_False_average_attn_weights_True_fused_True_xpu_float32",
        "test_transform_bias_rescale_qkv_nested_xpu_float32",
    ),
    "test_comparison_utils_xpu.py": None,
    "test_segment_reductions_xpu.py": None,
    "nn/test_pruning_xpu.py": None,
    "test_foreach_xpu.py": (
        # RuntimeError: Tried to instantiate dummy base class CUDAGraph
        "use_cuda_graph_True",
    ),
    "nn/test_convolution_xpu.py": (
        # Summary: all of them are oneDNN related issues
        # XPU unsupport ops, skip.
        # https://github.com/intel/torch-xpu-ops/issues/348
        "test_cudnn_convolution_relu_xpu_float16",
        "test_cudnn_convolution_relu_xpu_float32",
        "test_cudnn_convolution_add_relu_xpu_float16",
        "test_cudnn_convolution_add_relu_xpu_float32",
        # accuracy issue, TODO
        "test_Conv2d_naive_groups_xpu_float16",
        "test_Conv2d_groups_nobias",
        # issue: https://github.com/intel/torch-xpu-ops/issues/809
        "test_thnn_conv_strided_padded_dilated",
    ),
    "test_dynamic_shapes_xpu.py": None,
    "nn/test_load_state_dict_xpu.py": None,
    "nn/test_module_hooks_xpu.py": (
        # TypeError: TestStateDictHooks.test_register_state_dict_post_hook() missing 1 required positional argument: 'private'
        # https://github.com/intel/torch-xpu-ops/issues/658
        "test_register_state_dict_post_hook",
    ),
    "nn/test_parametrization_xpu.py": None,
    "test_meta_xpu.py": (
        # https://github.com/intel/torch-xpu-ops/issues/774
        "_jiterator_",
        # RuntimeError: Short is not supported in oneDNN! Need oneDNN's support, suggest to keep skip.
        "test_dispatch_meta_outplace_nn_functional_linear_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_int16",
        "test_meta_outplace_nn_functional_linear_xpu_int16",
        # RuntimeError: Long is not supported in oneDNN! Need oneDNN's support, suggest to keep skip.
        "test_dispatch_meta_outplace_nn_functional_linear_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_int64",
        "test_meta_outplace_nn_functional_linear_xpu_int64",
        # RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_dispatch_meta_inplace_addbmm_xpu_complex",
        "test_dispatch_meta_outplace_addbmm_xpu_complex",
        "test_dispatch_symbolic_meta_inplace_addbmm_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_complex",
        "test_meta_inplace_addbmm_xpu_complex",
        "test_meta_outplace_addbmm_xpu_complex",
        "test_dispatch_meta_inplace_addbmm_xpu_float64",
        "test_dispatch_meta_inplace_addmm_decomposed_xpu_complex",
        "test_dispatch_meta_inplace_addmm_decomposed_xpu_float64",
        "test_dispatch_meta_inplace_addmm_xpu_complex",
        "test_dispatch_meta_inplace_addmm_xpu_float64",
        "test_dispatch_meta_inplace_addmv_xpu_complex",
        "test_dispatch_meta_inplace_addmv_xpu_float64",
        "test_dispatch_meta_inplace_baddbmm_xpu_complex",
        "test_dispatch_meta_inplace_baddbmm_xpu_float64",
        "test_dispatch_meta_outplace___rmatmul___xpu_complex",
        "test_dispatch_meta_outplace___rmatmul___xpu_float64",
        "test_dispatch_meta_outplace_addbmm_xpu_float64",
        "test_dispatch_meta_outplace_addmm_decomposed_xpu_complex",
        "test_dispatch_meta_outplace_addmm_decomposed_xpu_float64",
        "test_dispatch_meta_outplace_addmm_xpu_complex",
        "test_dispatch_meta_outplace_addmm_xpu_float64",
        "test_dispatch_meta_outplace_addmv_xpu_complex",
        "test_dispatch_meta_outplace_addmv_xpu_float64",
        "test_dispatch_meta_outplace_baddbmm_xpu_complex",
        "test_dispatch_meta_outplace_baddbmm_xpu_float64",
        "test_dispatch_meta_outplace_bmm_xpu_complex",
        "test_dispatch_meta_outplace_bmm_xpu_float64",
        "test_dispatch_meta_outplace_cdist_xpu_float64",
        "test_dispatch_meta_outplace_cholesky_inverse_xpu_complex",
        "test_dispatch_meta_outplace_cholesky_inverse_xpu_float64",
        "test_dispatch_meta_outplace_cholesky_solve_xpu_complex",
        "test_dispatch_meta_outplace_cholesky_solve_xpu_float64",
        "test_dispatch_meta_outplace_cholesky_xpu_complex",
        "test_dispatch_meta_outplace_cholesky_xpu_float64",
        "test_dispatch_meta_outplace_corrcoef_xpu_complex",
        "test_dispatch_meta_outplace_corrcoef_xpu_float64",
        "test_dispatch_meta_outplace_cov_xpu_complex",
        "test_dispatch_meta_outplace_cov_xpu_float64",
        "test_dispatch_meta_outplace_einsum_xpu_complex",
        "test_dispatch_meta_outplace_einsum_xpu_float64",
        "test_dispatch_meta_outplace_geqrf_xpu_complex",
        "test_dispatch_meta_outplace_geqrf_xpu_float64",
        "test_dispatch_meta_outplace_inner_xpu_complex",
        "test_dispatch_meta_outplace_inner_xpu_float64",
        "test_dispatch_meta_outplace_linalg_cholesky_ex_xpu_complex",
        "test_dispatch_meta_outplace_linalg_cholesky_ex_xpu_float64",
        "test_dispatch_meta_outplace_linalg_cholesky_xpu_complex",
        "test_dispatch_meta_outplace_linalg_cholesky_xpu_float64",
        "test_dispatch_meta_outplace_linalg_det_singular_xpu_complex",
        "test_dispatch_meta_outplace_linalg_det_singular_xpu_float64",
        "test_dispatch_meta_outplace_linalg_det_xpu_complex",
        "test_dispatch_meta_outplace_linalg_det_xpu_float64",
        "test_dispatch_meta_outplace_linalg_eig_xpu_complex",
        "test_dispatch_meta_outplace_linalg_eig_xpu_float64",
        "test_dispatch_meta_outplace_linalg_eigh_xpu_complex",
        "test_dispatch_meta_outplace_linalg_eigh_xpu_float64",
        "test_dispatch_meta_outplace_linalg_eigvals_xpu_complex",
        "test_dispatch_meta_outplace_linalg_eigvals_xpu_float64",
        "test_dispatch_meta_outplace_linalg_eigvalsh_xpu_complex",
        "test_dispatch_meta_outplace_linalg_eigvalsh_xpu_float64",
        "test_dispatch_meta_outplace_linalg_inv_ex_xpu_complex",
        "test_dispatch_meta_outplace_linalg_inv_ex_xpu_float64",
        "test_dispatch_meta_outplace_linalg_inv_xpu_complex",
        "test_dispatch_meta_outplace_linalg_inv_xpu_float64",
        "test_dispatch_meta_outplace_linalg_ldl_factor_ex_xpu_complex",
        "test_dispatch_meta_outplace_linalg_ldl_factor_ex_xpu_float64",
        "test_dispatch_meta_outplace_linalg_ldl_factor_xpu_complex",
        "test_dispatch_meta_outplace_linalg_ldl_factor_xpu_float64",
        "test_dispatch_meta_outplace_linalg_ldl_solve_xpu_complex",
        "test_dispatch_meta_outplace_linalg_ldl_solve_xpu_float64",
        "test_dispatch_meta_outplace_linalg_lstsq_grad_oriented_xpu_complex",
        "test_dispatch_meta_outplace_linalg_lstsq_grad_oriented_xpu_float64",
        "test_dispatch_meta_outplace_linalg_lstsq_xpu_complex",
        "test_dispatch_meta_outplace_linalg_lstsq_xpu_float64",
        "test_dispatch_meta_outplace_linalg_lu_factor_xpu_complex",
        "test_dispatch_meta_outplace_linalg_lu_factor_xpu_float64",
        "test_dispatch_meta_outplace_linalg_lu_solve_xpu_complex",
        "test_dispatch_meta_outplace_linalg_lu_solve_xpu_float64",
        "test_dispatch_meta_outplace_linalg_matrix_power_xpu_complex",
        "test_dispatch_meta_outplace_linalg_matrix_power_xpu_float64",
        "test_dispatch_meta_outplace_linalg_matrix_rank_hermitian_xpu_complex",
        "test_dispatch_meta_outplace_linalg_matrix_rank_hermitian_xpu_float64",
        "test_dispatch_meta_outplace_linalg_matrix_rank_xpu_complex",
        "test_dispatch_meta_outplace_linalg_matrix_rank_xpu_float64",
        "test_dispatch_meta_outplace_linalg_multi_dot_xpu_complex",
        "test_dispatch_meta_outplace_linalg_multi_dot_xpu_float64",
        "test_dispatch_meta_outplace_linalg_pinv_hermitian_xpu_complex",
        "test_dispatch_meta_outplace_linalg_pinv_hermitian_xpu_float64",
        "test_dispatch_meta_outplace_linalg_pinv_singular_xpu_complex",
        "test_dispatch_meta_outplace_linalg_pinv_singular_xpu_float64",
        "test_dispatch_meta_outplace_linalg_pinv_xpu_complex",
        "test_dispatch_meta_outplace_linalg_pinv_xpu_float64",
        "test_dispatch_meta_outplace_linalg_qr_xpu_complex",
        "test_dispatch_meta_outplace_linalg_qr_xpu_float64",
        "test_dispatch_meta_outplace_linalg_slogdet_xpu_complex",
        "test_dispatch_meta_outplace_linalg_slogdet_xpu_float64",
        "test_dispatch_meta_outplace_linalg_solve_ex_xpu_complex",
        "test_dispatch_meta_outplace_linalg_solve_ex_xpu_float64",
        "test_dispatch_meta_outplace_linalg_solve_xpu_complex",
        "test_dispatch_meta_outplace_linalg_solve_xpu_float64",
        "test_dispatch_meta_outplace_linalg_svd_xpu_complex",
        "test_dispatch_meta_outplace_linalg_svd_xpu_float64",
        "test_dispatch_meta_outplace_linalg_tensorinv_xpu_complex",
        "test_dispatch_meta_outplace_linalg_tensorinv_xpu_float64",
        "test_dispatch_meta_outplace_logdet_xpu_complex",
        "test_dispatch_meta_outplace_logdet_xpu_float64",
        "test_dispatch_meta_outplace_lu_solve_xpu_complex",
        "test_dispatch_meta_outplace_lu_solve_xpu_float64",
        "test_dispatch_meta_outplace_lu_xpu_complex",
        "test_dispatch_meta_outplace_lu_xpu_float64",
        "test_dispatch_meta_outplace_matmul_xpu_complex",
        "test_dispatch_meta_outplace_matmul_xpu_float64",
        "test_dispatch_meta_outplace_mm_xpu_complex",
        "test_dispatch_meta_outplace_mm_xpu_float64",
        "test_dispatch_meta_outplace_mv_xpu_complex",
        "test_dispatch_meta_outplace_mv_xpu_float64",
        "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_float64",
        "test_dispatch_meta_outplace_nn_functional_linear_xpu_complex",
        "test_dispatch_meta_outplace_nn_functional_linear_xpu_float64",
        "test_dispatch_meta_outplace_nn_functional_multi_head_attention_forward_xpu_float64",
        "test_dispatch_meta_outplace_nn_functional_scaled_dot_product_attention_xpu_float64",
        "test_dispatch_meta_outplace_pca_lowrank_xpu_complex",
        "test_dispatch_meta_outplace_pca_lowrank_xpu_float64",
        "test_dispatch_meta_outplace_pinverse_xpu_complex",
        "test_dispatch_meta_outplace_pinverse_xpu_float64",
        "test_dispatch_meta_outplace_qr_xpu_complex",
        "test_dispatch_meta_outplace_qr_xpu_float64",
        "test_dispatch_meta_outplace_svd_lowrank_xpu_complex",
        "test_dispatch_meta_outplace_svd_lowrank_xpu_float64",
        "test_dispatch_meta_outplace_svd_xpu_complex",
        "test_dispatch_meta_outplace_svd_xpu_float64",
        "test_dispatch_meta_outplace_tensordot_xpu_complex",
        "test_dispatch_meta_outplace_tensordot_xpu_float64",
        "test_dispatch_meta_outplace_triangular_solve_xpu_complex",
        "test_dispatch_meta_outplace_triangular_solve_xpu_float64",
        "test_dispatch_symbolic_meta_inplace_addbmm_xpu_float64",
        "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_complex",
        "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_float64",
        "test_dispatch_symbolic_meta_inplace_addmm_xpu_complex",
        "test_dispatch_symbolic_meta_inplace_addmm_xpu_float64",
        "test_dispatch_symbolic_meta_inplace_addmv_xpu_complex",
        "test_dispatch_symbolic_meta_inplace_addmv_xpu_float64",
        "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_complex",
        "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_float64",
        "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_complex",
        "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_float64",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_bmm_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_bmm_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_cdist_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_cholesky_inverse_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_cholesky_inverse_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_cholesky_solve_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_cholesky_solve_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_cholesky_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_cholesky_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_corrcoef_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_corrcoef_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_cov_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_cov_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_einsum_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_einsum_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_geqrf_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_geqrf_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_inner_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_inner_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_cholesky_ex_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_cholesky_ex_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_cholesky_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_cholesky_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_det_singular_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_det_singular_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_det_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_det_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_eig_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_eig_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_eigh_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_eigh_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_eigvals_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_eigvals_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_eigvalsh_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_eigvalsh_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_inv_ex_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_inv_ex_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_inv_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_inv_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_ldl_factor_ex_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_ldl_factor_ex_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_ldl_factor_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_ldl_factor_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_ldl_solve_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_ldl_solve_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_lstsq_grad_oriented_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_lstsq_grad_oriented_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_lstsq_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_lstsq_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_lu_factor_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_lu_factor_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_lu_solve_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_lu_solve_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_matrix_power_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_matrix_power_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_hermitian_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_hermitian_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_matrix_rank_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_pinv_hermitian_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_pinv_hermitian_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_pinv_singular_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_pinv_singular_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_pinv_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_pinv_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_qr_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_qr_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_slogdet_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_slogdet_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_solve_ex_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_solve_ex_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_solve_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_solve_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_svd_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_svd_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_linalg_tensorinv_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_linalg_tensorinv_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_logdet_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_logdet_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_lu_solve_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_lu_solve_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_lu_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_lu_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_matmul_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_matmul_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_mm_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_mm_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_mv_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_mv_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_multi_head_attention_forward_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_scaled_dot_product_attention_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_pca_lowrank_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_pca_lowrank_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_pinverse_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_pinverse_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_qr_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_qr_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_svd_lowrank_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_svd_lowrank_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_svd_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_svd_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_tensordot_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_tensordot_xpu_float64",
        "test_dispatch_symbolic_meta_outplace_triangular_solve_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_triangular_solve_xpu_float64",
        "test_meta_inplace_addbmm_xpu_float64",
        "test_meta_inplace_addmm_decomposed_xpu_complex",
        "test_meta_inplace_addmm_decomposed_xpu_float64",
        "test_meta_inplace_addmm_xpu_complex",
        "test_meta_inplace_addmm_xpu_float64",
        "test_meta_inplace_addmv_xpu_complex",
        "test_meta_inplace_addmv_xpu_float64",
        "test_meta_inplace_baddbmm_xpu_complex",
        "test_meta_inplace_baddbmm_xpu_float64",
        "test_meta_outplace___rmatmul___xpu_complex",
        "test_meta_outplace___rmatmul___xpu_float64",
        "test_meta_outplace_addbmm_xpu_float64",
        "test_meta_outplace_addmm_decomposed_xpu_complex",
        "test_meta_outplace_addmm_decomposed_xpu_float64",
        "test_meta_outplace_addmm_xpu_complex",
        "test_meta_outplace_addmm_xpu_float64",
        "test_meta_outplace_addmv_xpu_complex",
        "test_meta_outplace_addmv_xpu_float64",
        "test_meta_outplace_baddbmm_xpu_complex",
        "test_meta_outplace_baddbmm_xpu_float64",
        "test_meta_outplace_bmm_xpu_complex",
        "test_meta_outplace_bmm_xpu_float64",
        "test_meta_outplace_cdist_xpu_float64",
        "test_meta_outplace_cholesky_inverse_xpu_complex",
        "test_meta_outplace_cholesky_inverse_xpu_float64",
        "test_meta_outplace_cholesky_solve_xpu_complex",
        "test_meta_outplace_cholesky_solve_xpu_float64",
        "test_meta_outplace_cholesky_xpu_complex",
        "test_meta_outplace_cholesky_xpu_float64",
        "test_meta_outplace_corrcoef_xpu_complex",
        "test_meta_outplace_corrcoef_xpu_float64",
        "test_meta_outplace_cov_xpu_complex",
        "test_meta_outplace_cov_xpu_float64",
        "test_meta_outplace_einsum_xpu_complex",
        "test_meta_outplace_einsum_xpu_float64",
        "test_meta_outplace_geqrf_xpu_complex",
        "test_meta_outplace_geqrf_xpu_float64",
        "test_meta_outplace_inner_xpu_complex",
        "test_meta_outplace_inner_xpu_float64",
        "test_meta_outplace_linalg_cholesky_ex_xpu_complex",
        "test_meta_outplace_linalg_cholesky_ex_xpu_float64",
        "test_meta_outplace_linalg_cholesky_xpu_complex",
        "test_meta_outplace_linalg_cholesky_xpu_float64",
        "test_meta_outplace_linalg_det_singular_xpu_complex",
        "test_meta_outplace_linalg_det_singular_xpu_float64",
        "test_meta_outplace_linalg_det_xpu_complex",
        "test_meta_outplace_linalg_det_xpu_float64",
        "test_meta_outplace_linalg_eig_xpu_complex",
        "test_meta_outplace_linalg_eig_xpu_float64",
        "test_meta_outplace_linalg_eigh_xpu_complex",
        "test_meta_outplace_linalg_eigh_xpu_float64",
        "test_meta_outplace_linalg_eigvals_xpu_complex",
        "test_meta_outplace_linalg_eigvals_xpu_float64",
        "test_meta_outplace_linalg_eigvalsh_xpu_complex",
        "test_meta_outplace_linalg_eigvalsh_xpu_float64",
        "test_meta_outplace_linalg_inv_ex_xpu_complex",
        "test_meta_outplace_linalg_inv_ex_xpu_float64",
        "test_meta_outplace_linalg_inv_xpu_complex",
        "test_meta_outplace_linalg_inv_xpu_float64",
        "test_meta_outplace_linalg_ldl_factor_ex_xpu_complex",
        "test_meta_outplace_linalg_ldl_factor_ex_xpu_float64",
        "test_meta_outplace_linalg_ldl_factor_xpu_complex",
        "test_meta_outplace_linalg_ldl_factor_xpu_float64",
        "test_meta_outplace_linalg_ldl_solve_xpu_complex",
        "test_meta_outplace_linalg_ldl_solve_xpu_float64",
        "test_meta_outplace_linalg_lstsq_grad_oriented_xpu_complex",
        "test_meta_outplace_linalg_lstsq_grad_oriented_xpu_float64",
        "test_meta_outplace_linalg_lstsq_xpu_complex",
        "test_meta_outplace_linalg_lstsq_xpu_float64",
        "test_meta_outplace_linalg_lu_factor_xpu_complex",
        "test_meta_outplace_linalg_lu_factor_xpu_float64",
        "test_meta_outplace_linalg_lu_solve_xpu_complex",
        "test_meta_outplace_linalg_lu_solve_xpu_float64",
        "test_meta_outplace_linalg_matrix_power_xpu_complex",
        "test_meta_outplace_linalg_matrix_power_xpu_float64",
        "test_meta_outplace_linalg_matrix_rank_hermitian_xpu_complex",
        "test_meta_outplace_linalg_matrix_rank_hermitian_xpu_float64",
        "test_meta_outplace_linalg_matrix_rank_xpu_complex",
        "test_meta_outplace_linalg_matrix_rank_xpu_float64",
        "test_meta_outplace_linalg_multi_dot_xpu_complex",
        "test_meta_outplace_linalg_multi_dot_xpu_float64",
        "test_meta_outplace_linalg_pinv_hermitian_xpu_complex",
        "test_meta_outplace_linalg_pinv_hermitian_xpu_float64",
        "test_meta_outplace_linalg_pinv_singular_xpu_complex",
        "test_meta_outplace_linalg_pinv_singular_xpu_float64",
        "test_meta_outplace_linalg_pinv_xpu_complex",
        "test_meta_outplace_linalg_pinv_xpu_float64",
        "test_meta_outplace_linalg_qr_xpu_complex",
        "test_meta_outplace_linalg_qr_xpu_float64",
        "test_meta_outplace_linalg_slogdet_xpu_complex",
        "test_meta_outplace_linalg_slogdet_xpu_float64",
        "test_meta_outplace_linalg_solve_ex_xpu_complex",
        "test_meta_outplace_linalg_solve_ex_xpu_float64",
        "test_meta_outplace_linalg_solve_xpu_complex",
        "test_meta_outplace_linalg_solve_xpu_float64",
        "test_meta_outplace_linalg_svd_xpu_complex",
        "test_meta_outplace_linalg_svd_xpu_float64",
        "test_meta_outplace_linalg_tensorinv_xpu_complex",
        "test_meta_outplace_linalg_tensorinv_xpu_float64",
        "test_meta_outplace_logdet_xpu_complex",
        "test_meta_outplace_logdet_xpu_float64",
        "test_meta_outplace_lu_solve_xpu_complex",
        "test_meta_outplace_lu_solve_xpu_float64",
        "test_meta_outplace_lu_xpu_complex",
        "test_meta_outplace_lu_xpu_float64",
        "test_meta_outplace_matmul_xpu_complex",
        "test_meta_outplace_matmul_xpu_float64",
        "test_meta_outplace_mm_xpu_complex",
        "test_meta_outplace_mm_xpu_float64",
        "test_meta_outplace_mv_xpu_complex",
        "test_meta_outplace_mv_xpu_float64",
        "test_meta_outplace_nn_functional_bilinear_xpu_float64",
        "test_meta_outplace_nn_functional_linear_xpu_complex",
        "test_meta_outplace_nn_functional_linear_xpu_float64",
        "test_meta_outplace_nn_functional_multi_head_attention_forward_xpu_float64",
        "test_meta_outplace_nn_functional_scaled_dot_product_attention_xpu_float64",
        "test_meta_outplace_pca_lowrank_xpu_complex",
        "test_meta_outplace_pca_lowrank_xpu_float64",
        "test_meta_outplace_pinverse_xpu_complex",
        "test_meta_outplace_pinverse_xpu_float64",
        "test_meta_outplace_qr_xpu_complex",
        "test_meta_outplace_qr_xpu_float64",
        "test_meta_outplace_svd_lowrank_xpu_complex",
        "test_meta_outplace_svd_lowrank_xpu_float64",
        "test_meta_outplace_svd_xpu_complex",
        "test_meta_outplace_svd_xpu_float64",
        "test_meta_outplace_tensordot_xpu_complex",
        "test_meta_outplace_tensordot_xpu_float64",
        "test_meta_outplace_triangular_solve_xpu_complex",
        "test_meta_outplace_triangular_solve_xpu_float64",
        # RuntimeError: Short is not supported in oneDNN!
        "test_dispatch_meta_inplace_addbmm_xpu_int16",
        "test_dispatch_meta_inplace_addmm_decomposed_xpu_int16",
        "test_dispatch_meta_inplace_addmm_xpu_int16",
        "test_dispatch_meta_inplace_addmv_xpu_int16",
        "test_dispatch_meta_inplace_baddbmm_xpu_int16",
        "test_dispatch_meta_outplace___rmatmul___xpu_int16",
        "test_dispatch_meta_outplace_addbmm_xpu_int16",
        "test_dispatch_meta_outplace_addmm_decomposed_xpu_int16",
        "test_dispatch_meta_outplace_addmm_xpu_int16",
        "test_dispatch_meta_outplace_addmv_xpu_int16",
        "test_dispatch_meta_outplace_baddbmm_xpu_int16",
        "test_dispatch_meta_outplace_bmm_xpu_int16",
        "test_dispatch_meta_outplace_einsum_xpu_int16",
        "test_dispatch_meta_outplace_inner_xpu_int16",
        "test_dispatch_meta_outplace_linalg_multi_dot_xpu_int16",
        "test_dispatch_meta_outplace_matmul_xpu_int16",
        "test_dispatch_meta_outplace_mm_xpu_int16",
        "test_dispatch_meta_outplace_mv_xpu_int16",
        "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_int16",
        "test_dispatch_meta_outplace_tensordot_xpu_int16",
        "test_dispatch_symbolic_meta_inplace_addbmm_xpu_int16",
        "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_int16",
        "test_dispatch_symbolic_meta_inplace_addmm_xpu_int16",
        "test_dispatch_symbolic_meta_inplace_addmv_xpu_int16",
        "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_int16",
        "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_int16",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_bmm_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_einsum_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_inner_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_matmul_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_mm_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_mv_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_int16",
        "test_dispatch_symbolic_meta_outplace_tensordot_xpu_int16",
        "test_meta_inplace_addbmm_xpu_int16",
        "test_meta_inplace_addmm_decomposed_xpu_int16",
        "test_meta_inplace_addmm_xpu_int16",
        "test_meta_inplace_addmv_xpu_int16",
        "test_meta_inplace_baddbmm_xpu_int16",
        "test_meta_outplace___rmatmul___xpu_int16",
        "test_meta_outplace_addbmm_xpu_int16",
        "test_meta_outplace_addmm_decomposed_xpu_int16",
        "test_meta_outplace_addmm_xpu_int16",
        "test_meta_outplace_addmv_xpu_int16",
        "test_meta_outplace_baddbmm_xpu_int16",
        "test_meta_outplace_bmm_xpu_int16",
        "test_meta_outplace_einsum_xpu_int16",
        "test_meta_outplace_inner_xpu_int16",
        "test_meta_outplace_linalg_multi_dot_xpu_int16",
        "test_meta_outplace_matmul_xpu_int16",
        "test_meta_outplace_mm_xpu_int16",
        "test_meta_outplace_mv_xpu_int16",
        "test_meta_outplace_nn_functional_bilinear_xpu_int16",
        "test_meta_outplace_tensordot_xpu_int16",
        # RuntimeError: could not create a primitive descriptor for a matmul primitive
        "test_dispatch_meta_inplace_addbmm_xpu_int32",
        "test_dispatch_meta_inplace_addbmm_xpu_uint8",
        "test_dispatch_meta_inplace_addmm_decomposed_xpu_int32",
        "test_dispatch_meta_inplace_addmm_decomposed_xpu_uint8",
        "test_dispatch_meta_inplace_addmm_xpu_int32",
        "test_dispatch_meta_inplace_addmm_xpu_uint8",
        "test_dispatch_meta_inplace_addmv_xpu_int32",
        "test_dispatch_meta_inplace_addmv_xpu_uint8",
        "test_dispatch_meta_inplace_baddbmm_xpu_int32",
        "test_dispatch_meta_inplace_baddbmm_xpu_uint8",
        "test_dispatch_meta_outplace___rmatmul___xpu_int32",
        "test_dispatch_meta_outplace___rmatmul___xpu_uint8",
        "test_dispatch_meta_outplace_addbmm_xpu_int32",
        "test_dispatch_meta_outplace_addbmm_xpu_uint8",
        "test_dispatch_meta_outplace_addmm_decomposed_xpu_int32",
        "test_dispatch_meta_outplace_addmm_decomposed_xpu_uint8",
        "test_dispatch_meta_outplace_addmm_xpu_int32",
        "test_dispatch_meta_outplace_addmm_xpu_uint8",
        "test_dispatch_meta_outplace_addmv_xpu_int32",
        "test_dispatch_meta_outplace_addmv_xpu_uint8",
        "test_dispatch_meta_outplace_baddbmm_xpu_int32",
        "test_dispatch_meta_outplace_baddbmm_xpu_uint8",
        "test_dispatch_meta_outplace_bmm_xpu_int32",
        "test_dispatch_meta_outplace_bmm_xpu_uint8",
        "test_dispatch_meta_outplace_einsum_xpu_int32",
        "test_dispatch_meta_outplace_einsum_xpu_uint8",
        "test_dispatch_meta_outplace_inner_xpu_int32",
        "test_dispatch_meta_outplace_inner_xpu_uint8",
        "test_dispatch_meta_outplace_linalg_multi_dot_xpu_int32",
        "test_dispatch_meta_outplace_linalg_multi_dot_xpu_uint8",
        "test_dispatch_meta_outplace_matmul_xpu_int32",
        "test_dispatch_meta_outplace_matmul_xpu_uint8",
        "test_dispatch_meta_outplace_mm_xpu_int32",
        "test_dispatch_meta_outplace_mm_xpu_uint8",
        "test_dispatch_meta_outplace_mv_xpu_int32",
        "test_dispatch_meta_outplace_mv_xpu_uint8",
        "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_int32",
        "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_uint8",
        "test_dispatch_meta_outplace_nn_functional_linear_xpu_int32",
        "test_dispatch_meta_outplace_nn_functional_linear_xpu_uint8",
        "test_dispatch_meta_outplace_tensordot_xpu_int32",
        "test_dispatch_meta_outplace_tensordot_xpu_uint8",
        "test_dispatch_symbolic_meta_inplace_addbmm_xpu_int32",
        "test_dispatch_symbolic_meta_inplace_addbmm_xpu_uint8",
        "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_int32",
        "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_uint8",
        "test_dispatch_symbolic_meta_inplace_addmm_xpu_int32",
        "test_dispatch_symbolic_meta_inplace_addmm_xpu_uint8",
        "test_dispatch_symbolic_meta_inplace_addmv_xpu_int32",
        "test_dispatch_symbolic_meta_inplace_addmv_xpu_uint8",
        "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_int32",
        "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_int32",
        "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_bmm_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_bmm_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_einsum_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_einsum_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_inner_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_inner_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_matmul_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_matmul_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_mm_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_mm_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_mv_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_mv_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_nn_functional_linear_xpu_uint8",
        "test_dispatch_symbolic_meta_outplace_tensordot_xpu_int32",
        "test_dispatch_symbolic_meta_outplace_tensordot_xpu_uint8",
        "test_meta_inplace_addbmm_xpu_int32",
        "test_meta_inplace_addbmm_xpu_uint8",
        "test_meta_inplace_addmm_decomposed_xpu_int32",
        "test_meta_inplace_addmm_decomposed_xpu_uint8",
        "test_meta_inplace_addmm_xpu_int32",
        "test_meta_inplace_addmm_xpu_uint8",
        "test_meta_inplace_addmv_xpu_int32",
        "test_meta_inplace_addmv_xpu_uint8",
        "test_meta_inplace_baddbmm_xpu_int32",
        "test_meta_inplace_baddbmm_xpu_uint8",
        "test_meta_outplace___rmatmul___xpu_int32",
        "test_meta_outplace___rmatmul___xpu_uint8",
        "test_meta_outplace_addbmm_xpu_int32",
        "test_meta_outplace_addbmm_xpu_uint8",
        "test_meta_outplace_addmm_decomposed_xpu_int32",
        "test_meta_outplace_addmm_decomposed_xpu_uint8",
        "test_meta_outplace_addmm_xpu_int32",
        "test_meta_outplace_addmm_xpu_uint8",
        "test_meta_outplace_addmv_xpu_int32",
        "test_meta_outplace_addmv_xpu_uint8",
        "test_meta_outplace_baddbmm_xpu_int32",
        "test_meta_outplace_baddbmm_xpu_uint8",
        "test_meta_outplace_bmm_xpu_int32",
        "test_meta_outplace_bmm_xpu_uint8",
        "test_meta_outplace_einsum_xpu_int32",
        "test_meta_outplace_einsum_xpu_uint8",
        "test_meta_outplace_inner_xpu_int32",
        "test_meta_outplace_inner_xpu_uint8",
        "test_meta_outplace_linalg_multi_dot_xpu_int32",
        "test_meta_outplace_linalg_multi_dot_xpu_uint8",
        "test_meta_outplace_matmul_xpu_int32",
        "test_meta_outplace_matmul_xpu_uint8",
        "test_meta_outplace_mm_xpu_int32",
        "test_meta_outplace_mm_xpu_uint8",
        "test_meta_outplace_mv_xpu_int32",
        "test_meta_outplace_mv_xpu_uint8",
        "test_meta_outplace_nn_functional_bilinear_xpu_int32",
        "test_meta_outplace_nn_functional_bilinear_xpu_uint8",
        "test_meta_outplace_nn_functional_linear_xpu_int32",
        "test_meta_outplace_nn_functional_linear_xpu_uint8",
        "test_meta_outplace_tensordot_xpu_int32",
        "test_meta_outplace_tensordot_xpu_uint8",
        # RuntimeError: Long is not supported in oneDNN!
        "test_dispatch_meta_inplace_addbmm_xpu_int64",
        "test_dispatch_meta_inplace_addmm_decomposed_xpu_int64",
        "test_dispatch_meta_inplace_addmm_xpu_int64",
        "test_dispatch_meta_inplace_addmv_xpu_int64",
        "test_dispatch_meta_inplace_baddbmm_xpu_int64",
        "test_dispatch_meta_outplace___rmatmul___xpu_int64",
        "test_dispatch_meta_outplace_addbmm_xpu_int64",
        "test_dispatch_meta_outplace_addmm_decomposed_xpu_int64",
        "test_dispatch_meta_outplace_addmm_xpu_int64",
        "test_dispatch_meta_outplace_addmv_xpu_int64",
        "test_dispatch_meta_outplace_baddbmm_xpu_int64",
        "test_dispatch_meta_outplace_bmm_xpu_int64",
        "test_dispatch_meta_outplace_einsum_xpu_int64",
        "test_dispatch_meta_outplace_inner_xpu_int64",
        "test_dispatch_meta_outplace_linalg_multi_dot_xpu_int64",
        "test_dispatch_meta_outplace_matmul_xpu_int64",
        "test_dispatch_meta_outplace_mm_xpu_int64",
        "test_dispatch_meta_outplace_mv_xpu_int64",
        "test_dispatch_meta_outplace_nn_functional_bilinear_xpu_int64",
        "test_dispatch_meta_outplace_nn_functional_conv1d_xpu_int64",
        "test_dispatch_meta_outplace_nn_functional_conv2d_xpu_int64",
        "test_dispatch_meta_outplace_nn_functional_conv3d_xpu_int64",
        "test_dispatch_meta_outplace_nn_functional_conv_transpose1d_xpu_int64",
        "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_int64",
        "test_dispatch_meta_outplace_nn_functional_conv_transpose3d_xpu_int64",
        "test_dispatch_meta_outplace_tensordot_xpu_int64",
        "test_dispatch_symbolic_meta_inplace_addbmm_xpu_int64",
        "test_dispatch_symbolic_meta_inplace_addmm_decomposed_xpu_int64",
        "test_dispatch_symbolic_meta_inplace_addmm_xpu_int64",
        "test_dispatch_symbolic_meta_inplace_addmv_xpu_int64",
        "test_dispatch_symbolic_meta_inplace_baddbmm_xpu_int64",
        "test_dispatch_symbolic_meta_outplace___rmatmul___xpu_int64",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_addmm_decomposed_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_baddbmm_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_bmm_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_einsum_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_inner_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_linalg_multi_dot_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_matmul_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_mm_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_mv_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_bilinear_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv1d_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv2d_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv3d_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose1d_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose2d_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose3d_xpu_int64",
        "test_dispatch_symbolic_meta_outplace_tensordot_xpu_int64",
        "test_meta_inplace_addbmm_xpu_int64",
        "test_meta_inplace_addmm_decomposed_xpu_int64",
        "test_meta_inplace_addmm_xpu_int64",
        "test_meta_inplace_addmv_xpu_int64",
        "test_meta_inplace_baddbmm_xpu_int64",
        "test_meta_outplace___rmatmul___xpu_int64",
        "test_meta_outplace_addbmm_xpu_int64",
        "test_meta_outplace_addmm_decomposed_xpu_int64",
        "test_meta_outplace_addmm_xpu_int64",
        "test_meta_outplace_addmv_xpu_int64",
        "test_meta_outplace_baddbmm_xpu_int64",
        "test_meta_outplace_bmm_xpu_int64",
        "test_meta_outplace_einsum_xpu_int64",
        "test_meta_outplace_inner_xpu_int64",
        "test_meta_outplace_linalg_multi_dot_xpu_int64",
        "test_meta_outplace_matmul_xpu_int64",
        "test_meta_outplace_mm_xpu_int64",
        "test_meta_outplace_mv_xpu_int64",
        "test_meta_outplace_nn_functional_bilinear_xpu_int64",
        "test_meta_outplace_nn_functional_conv1d_xpu_int64",
        "test_meta_outplace_nn_functional_conv2d_xpu_int64",
        "test_meta_outplace_nn_functional_conv3d_xpu_int64",
        "test_meta_outplace_nn_functional_conv_transpose1d_xpu_int64",
        "test_meta_outplace_nn_functional_conv_transpose2d_xpu_int64",
        "test_meta_outplace_nn_functional_conv_transpose3d_xpu_int64",
        "test_meta_outplace_tensordot_xpu_int64",
        # RuntimeError: could not create a primitive
        "test_dispatch_meta_outplace_addbmm_xpu_bfloat16",
        "test_dispatch_meta_outplace_addbmm_xpu_float16",
        "test_dispatch_meta_outplace_addbmm_xpu_float32",
        "test_dispatch_meta_outplace_addbmm_xpu_int8",
        "test_dispatch_meta_outplace_addmm_xpu_bfloat16",
        "test_dispatch_meta_outplace_addmm_xpu_float16",
        "test_dispatch_meta_outplace_addmm_xpu_float32",
        "test_dispatch_meta_outplace_addmm_xpu_int8",
        "test_dispatch_meta_outplace_addmv_xpu_bfloat16",
        "test_dispatch_meta_outplace_addmv_xpu_float16",
        "test_dispatch_meta_outplace_addmv_xpu_float32",
        "test_dispatch_meta_outplace_addmv_xpu_int8",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_bfloat16",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_float16",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_float32",
        "test_dispatch_symbolic_meta_outplace_addbmm_xpu_int8",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_bfloat16",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_float16",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_float32",
        "test_dispatch_symbolic_meta_outplace_addmm_xpu_int8",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_bfloat16",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_float16",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_float32",
        "test_dispatch_symbolic_meta_outplace_addmv_xpu_int8",
        "test_dispatch_symbolic_meta_outplace_all_strides_addbmm_xpu_float32",
        "test_dispatch_symbolic_meta_outplace_all_strides_addmm_xpu_float32",
        "test_dispatch_symbolic_meta_outplace_all_strides_addmv_xpu_float32",
        "test_meta_outplace_addbmm_xpu_bfloat16",
        "test_meta_outplace_addbmm_xpu_float16",
        "test_meta_outplace_addbmm_xpu_float32",
        "test_meta_outplace_addbmm_xpu_int8",
        "test_meta_outplace_addmm_xpu_bfloat16",
        "test_meta_outplace_addmm_xpu_float16",
        "test_meta_outplace_addmm_xpu_float32",
        "test_meta_outplace_addmm_xpu_int8",
        "test_meta_outplace_addmv_xpu_bfloat16",
        "test_meta_outplace_addmv_xpu_float16",
        "test_meta_outplace_addmv_xpu_float32",
        "test_meta_outplace_addmv_xpu_int8",
        # RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive
        "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_bfloat16",
        "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_complex",
        "test_dispatch_meta_outplace_nn_functional_conv_transpose2d_xpu_float",
        "test_dispatch_meta_outplace_nn_functional_conv_transpose3d_xpu_bfloat16",
        "test_dispatch_meta_outplace_nn_functional_conv_transpose3d_xpu_complex",
        "test_dispatch_meta_outplace_nn_functional_conv_transpose3d_xpu_float",
        "test_dispatch_symbolic_meta_outplace_all_strides_nn_functional_conv_transpose2d_xpu_float32",
        "test_dispatch_symbolic_meta_outplace_all_strides_nn_functional_conv_transpose3d_xpu_float32",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose2d_xpu_bfloat16",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose2d_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose2d_xpu_float",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose3d_xpu_bfloat16",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose3d_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_nn_functional_conv_transpose3d_xpu_float",
        "test_meta_outplace_nn_functional_conv_transpose2d_xpu_bfloat16",
        "test_meta_outplace_nn_functional_conv_transpose2d_xpu_complex",
        "test_meta_outplace_nn_functional_conv_transpose2d_xpu_float",
        "test_meta_outplace_nn_functional_conv_transpose3d_xpu_bfloat16",
        "test_meta_outplace_nn_functional_conv_transpose3d_xpu_complex",
        "test_meta_outplace_nn_functional_conv_transpose3d_xpu_float",
        # Not implemented, try these cases after implementing vdot
        "test_dispatch_meta_outplace_vdot_xpu_complex",
        "test_dispatch_symbolic_meta_outplace_vdot_xpu_complex",
        "test_meta_outplace_vdot_xpu_complex",
        # Unexpected success:
        "test_dispatch_symbolic_meta_outplace_all_strides_narrow_copy_xpu_float32",
        # New added case in 2.7
        "test_nonzero_xpu",
    ),
    "test_type_promotion_xpu.py": None,
    "test_distributions_xpu.py": (
        # TODO: Passed on lts driver version, but failed on rolling driver version
        "test_gamma_gpu_sample_xpu",
    ),
    "test_optim_xpu.py": (
        # oneDNN issues
        # RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_foreach_matches_forloop_ASGD_xpu_float64",
        "test_foreach_matches_forloop_Adadelta_xpu_float64",
        "test_foreach_matches_forloop_Adafactor_xpu_float64",
        "test_foreach_matches_forloop_Adagrad_xpu_float64",
        "test_foreach_matches_forloop_AdamW_xpu_float64",
        "test_foreach_matches_forloop_Adam_xpu_float64",
        "test_foreach_matches_forloop_Adamax_xpu_float64",
        "test_foreach_matches_forloop_NAdam_xpu_float64",
        "test_foreach_matches_forloop_RAdam_xpu_float64",
        "test_foreach_matches_forloop_RMSprop_xpu_float64",
        "test_foreach_matches_forloop_Rprop_xpu_float64",
        "test_foreach_matches_forloop_SGD_xpu_float64",
        "test_fused_cpu_matches_cuda_AdamW_xpu_float64",
        "test_fused_cpu_matches_cuda_Adam_xpu_float64",
        "test_fused_cpu_matches_cuda_SGD_xpu_float64",
        "test_fused_matches_forloop_AdamW_xpu_float64",
        "test_fused_matches_forloop_Adam_xpu_float64",
        "test_fused_matches_forloop_SGD_xpu_float64",
        "test_set_default_dtype_works_with_foreach_ASGD_xpu_float64",
        "test_set_default_dtype_works_with_foreach_Adadelta_xpu_float64",
        "test_set_default_dtype_works_with_foreach_Adafactor_xpu_float64",
        "test_set_default_dtype_works_with_foreach_Adagrad_xpu_float64",
        "test_set_default_dtype_works_with_foreach_AdamW_xpu_float64",
        "test_set_default_dtype_works_with_foreach_Adam_xpu_float64",
        "test_set_default_dtype_works_with_foreach_Adamax_xpu_float64",
        "test_set_default_dtype_works_with_foreach_NAdam_xpu_float64",
        "test_set_default_dtype_works_with_foreach_RAdam_xpu_float64",
        "test_set_default_dtype_works_with_foreach_RMSprop_xpu_float64",
        "test_set_default_dtype_works_with_foreach_Rprop_xpu_float64",
        "test_set_default_dtype_works_with_foreach_SGD_xpu_float64",
    ),
    "test_spectral_ops_xpu.py": (
        # CUDA specific case
        "test_cufft_plan_cache_xpu_float64",
    ),
    "test_sparse_xpu.py": (
        "test_bmm_deterministic_xpu_float64",  # - AssertionError: Torch not compiled with CUDA enabled
        "test_bmm_oob_xpu",  # - NotImplementedError: Could not run 'aten::bmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or was ...
        "test_bmm_xpu_float64",  # - NotImplementedError: Could not run 'aten::bmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or was ...
        "test_dsmm_xpu_float64",  # - NotImplementedError: Could not run 'aten::mm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or was o...
        "test_empty_like_xpu_complex128",  # - AssertionError: "Could not run 'aten::empty_strided' with arguments from the 'Sparse(CPU|CUDA)' backend" does not match "Could not run 'aten::empty_strided' with argu...
        "test_empty_like_xpu_float64",  # - AssertionError: "Could not run 'aten::empty_strided' with arguments from the 'Sparse(CPU|CUDA)' backend" does not match "Could not run 'aten::empty_strided' with argu...
        "test_factory_device_type_inference_xpu",  # - RuntimeError: PyTorch is not linked with support for cuda devices
        "test_hsmm_xpu_float64",  # - NotImplementedError: Could not run 'aten::hspmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa...
        "test_log_softmax_zero_nnz_xpu_float32",  # - NotImplementedError: Could not run 'aten::_sparse_log_softmax' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this ...
        "test_log_softmax_zero_nnz_xpu_float64",  # - NotImplementedError: Could not run 'aten::_sparse_log_softmax' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this ...
        "test_mv_xpu_float64",  # - NotImplementedError: Could not run 'aten::mm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or was o...
        "test_new_device_single_gpu_xpu",  # - RuntimeError: PyTorch was compiled without CUDA support
        "test_print_coalesced_xpu_float64",  # - RuntimeError: I got this output for TestSparseXPU.test_print_coalesced_xpu_float64:
        "test_print_uncoalesced_xpu_float64",  # - RuntimeError: I got this output for TestSparseXPU.test_print_uncoalesced_xpu_float64:
        "test_softmax_xpu_float64",  # - NotImplementedError: Could not run 'aten::_sparse_softmax' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this back...
        "test_softmax_zero_nnz_xpu_float32",  # - NotImplementedError: Could not run 'aten::_sparse_softmax' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this back...
        "test_softmax_zero_nnz_xpu_float64",  # - NotImplementedError: Could not run 'aten::_sparse_softmax' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this back...
        "test_sparse_addmm_xpu_bfloat16",  # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa...
        "test_sparse_addmm_xpu_complex128",  # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa...
        "test_sparse_addmm_xpu_float16",  # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa...
        "test_sparse_addmm_xpu_float64",  # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa...
        "test_sparse_matmul_xpu_complex128",  # - RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_sparse_matmul_xpu_complex64",  # - RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_sparse_matmul_xpu_float32",  # - NotImplementedError: Could not run 'aten::_sparse_sparse_matmul' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for thi...
        "test_sparse_matmul_xpu_float64",  # - RuntimeError: Double and complex datatype matmul is not supported in oneDNN
        "test_sparse_mm_xpu_float64",  # - NotImplementedError: Could not run 'aten::addmm' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this backend, or wa...
        "test_sparse_sum_xpu_float64",  # - NotImplementedError: Could not run 'aten::_sparse_sum_backward' with arguments from the 'SparseXPU' backend. This could be because the operator doesn't exist for this...
    ),
}
