# import torch
# import torch_directml
# a = torch.cuda.is_available()
# print(a)
# b = torch_directml.is_available()
# print(b)
import time

import numpy as np
import torch

# import torch_directml
# print(dir(torch_directml))

# import torch
# import torch_directml
#
# print("CUDA available:", torch.cuda.is_available())
# print("torch_directml available:", torch_directml.is_available())

import numpy as np
import torch
import torch_directml
import time

# 定义矩阵大小
size = 10000

# 创建一个随机的 NumPy 数组
numpy_array = np.random.rand(size, size)

# 将 NumPy 数组转换为 PyTorch 张量
pytorch_tensor = torch.from_numpy(numpy_array).float()  # 确保张量是浮点类型

# 创建 DirectML 设备
dml = torch_directml.device()

# 将张量移动到 DirectML 设备
pytorch_tensor = pytorch_tensor.to(dml)

# NumPy 矩阵乘法
start_time = time.time()
result_numpy = np.dot(numpy_array, numpy_array)
numpy_time = time.time() - start_time

# PyTorch 矩阵乘法
start_time = time.time()
result_torch = torch.mm(pytorch_tensor, pytorch_tensor)
torch_time = time.time() - start_time

# 输出时间结果
print("Numpy time:", numpy_time, "s")
print("PyTorch time:", torch_time, "s")