import torch

if __name__ == '__main__':
    print(torch.cuda.is_available())  # True
    pass

if __name__ == '__main__':
    # 验证 torch.cat() 方法
    a = torch.randn(3, 4)
    print(a.shape)  # torch.Size([3, 4])
    print(a)
    print(a[0])
    print(a[0].shape)  # torch.Size([4])
    print(a[2: 3])
    print(a[2: 3].shape)  # torch.Size([1, 4])
    # print(torch.cat((a, a[2])))  # 会出现问题
    cat_res = torch.cat((a, a[2: 3]))
    print(cat_res)
    print(cat_res.shape)  # torch.Size([4, 4])
    pass

# 测试一下如果 nn.Linear() 这个模块如果传入一个 3 维的向量会怎么样？
if __name__ == '__main__':
    a_linear_model = torch.nn.Linear(4, 8)
    input_v = torch.randn(16, 12, 4)  # torch.Size([16, 12, 4])
    print("input_v.size()", input_v.size())
    y = a_linear_model(input_v)
    print("y.size()", y.size())  # torch.Size([16, 12, 8])
    pass

# 测试一下一个三维，且最后一个维度为 2 的向量执行 split() 方法会发生什么？
if __name__ == '__main__':
    a_tensor = torch.randn(16, 10, 2)
    res1, res2 = a_tensor.split(1, dim=-1)
    print(res1.shape)  # torch.Size([16, 10, 1])
    print(res2.shape)  # torch.Size([16, 10, 1])
    pass

# 测试一下 torch.max() 方法
if __name__ == '__main__':
    a_tensor = torch.randn(3, 4, 5)
    print(a_tensor[:-1].shape)  # torch.Size([2, 4, 5])
    print(a_tensor.shape)  # torch.Size([3, 4, 5])
    print("torch.max()", torch.max(a_tensor))  # tensor(2.1992)
    print("torch.max()", torch.max(a_tensor, dim=1)[0].shape)  # torch.max() torch.Size([3, 5])
    pass

# 测试一下 topk() 这个方法
if __name__ == '__main__':
    print("torch.topk()")
    a_tensor = torch.tensor([
        [1, 2, 4, 6],
        [7, 3, 5, 8],
    ])
    top_k1 = torch.topk(a_tensor, 1)
    print(top_k1[0], top_k1[1])  # tensor([8]) tensor([7])
    top_k2 = torch.topk(a_tensor, 2)
    print(top_k2[0], top_k2[1])  # tensor([8, 7]) tensor([7, 4])
    top_k3 = torch.topk(a_tensor, 3)
    print(top_k3[0], top_k3[1])  # tensor([8, 7, 6]) tensor([7, 4, 3])
    pass


# 尝试一下 torch.squeeze() 方法
if __name__ == '__main__':
    a_tensor = torch.randn(3, 1, 4, 1)
    b_tensor = a_tensor.squeeze()
    print(b_tensor.shape)  # torch.Size([3, 4]) 可以看到，消除了所有不为 1 的维度
    c_tensor = a_tensor.squeeze(-1)
    print(c_tensor.shape)  # torch.Size([3, 1, 4])  可以看到，如果指定维度上为 1，则进行消除
    d_tensor = a_tensor.squeeze(0)
    print(d_tensor.shape)  # torch.Size([3, 1, 4, 1])  可以看到，如果指定维度上不为 1，则相当于不进行任何消除
    pass
