import torch

# 1. 创建张量
# 创建一个2x3的张量，元素值为随机初始化
tensor_a = torch.rand(2, 3)
print("Tensor A:\n", tensor_a)

# 创建一个全为0的张量
tensor_b = torch.zeros(2, 3)
print("\nTensor B:\n", tensor_b)

# 创建一个全为1的张量，并指定数据类型为long
tensor_c = torch.ones(2, 3, dtype=torch.long)
print("\nTensor C:\n", tensor_c)

# 使用字面量数据创建张量
tensor_d = torch.tensor([[1.0, 2.0, 3.0], [3.0, 4.0, 5.0]])
print("\nTensor D:\n", tensor_d)

# 2. 张量运算
# 张量相加
tensor_sum = tensor_a + tensor_d
print("\nSum of Tensor A and Tensor D:\n", tensor_sum)

# 张量相乘（双点乘）
tensor_product = tensor_a * tensor_d
print("\nProduct of Tensor A and Tensor D:\n", tensor_product)

# 张量矩阵乘法
tensor_matmul = torch.matmul(tensor_d, tensor_d.T)
print("\nMatrix Multiplication of Tensor D:\n", tensor_matmul)

# 3. 自动微分
# 创建一个可求导的张量
x = torch.tensor(2.0, requires_grad=True)
y = x**2 + 3*x + 4
print("\nFunction y = x^2 + 3x + 4, where x = 2.0")
print("y =", y.item())

# 反向传播计算梯度
y.backward()
print("Gradient of y with respect to x:", x.grad.item())

# 4. 张量在CPU和GPU之间的切换
# 检查GPU是否可用
if torch.cuda.is_available():
    tensor_gpu = tensor_d.to("cuda")
    print("\nTensor D moved to GPU:")
    print(tensor_gpu)
    # 将张量移回到CPU
    tensor_cpu = tensor_gpu.to("cpu")
    print("\nTensor D moved back to CPU:")
    print(tensor_cpu)
else:
    print("\nCUDA is not available. Tensor operations are performed on CPU.")

# 5. 张量的形状操作
# 改变张量形状
tensor_reshaped = tensor_d.view(6)
print("\nTensor D reshaped to 1D:\n", tensor_reshaped)

# 扩展张量形状
tensor_expanded = tensor_d.expand(2, 2, 3)
print("\nTensor D expanded to shape (2, 2, 3):\n", tensor_expanded)
