import torch

a_1d = torch.arange(12)
b_2d = torch.arange(12).reshape(3, 4)
print(a_1d)
# tensor([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])
print(b_2d)
# tensor([[ 0,  1,  2,  3],
#         [ 4,  5,  6,  7],
#         [ 8,  9, 10, 11]])

"""⭐  x.shape 或 x.size(): 返回张量的维度（形状）。  """
print(f"a_1d.shape: {a_1d.shape}, a_1d.size(): {a_1d.size()}")
# a_1d.shape: torch.Size([12]), a_1d.size(): torch.Size([12])
print(f"b_2d.shape: {b_2d.shape}, b_2d.size(): {b_2d.size()}")
# b_2d.shape: torch.Size([3, 4]), b_2d.size(): torch.Size([3, 4])

"""⭐  x.ndimension(): 返回张量的维度数量。  """
print(f"a_1d.ndimension(): {a_1d.ndimension()}")
# a_1d.ndimension(): 1
print(f"b_2d.ndimension(): {b_2d.ndimension()}")
# b_2d.ndimension(): 2

"""⭐  x.device: 返回张量所在的设备（CPU 或 GPU）。  """
print(f"a_1d.device: {a_1d.device}")
# a_1d.device: cpu
print(f"b_2d.device: {b_2d.device}")
# b_2d.device: cpu

"""⭐  x.is_cuda: 判断张量是否在 GPU 上。  """
print(f"a_1d.is_cuda: {a_1d.is_cuda}")
# a_1d.is_cuda: False
print(f"b_2d.is_cuda: {b_2d.is_cuda}")
# b_2d.is_cuda: False

"""⭐  x.dtype: 返回张量的数据类型。  """
print(f"a_1d.dtype: {a_1d.dtype}")
# a_1d.dtype: torch.int64
print(f"b_2d.dtype: {b_2d.dtype}")
# b_2d.dtype: torch.int64

"""⭐  x.numel():  返回张量中的元素总数。  """
print(f"a_1d.numel(): {a_1d.numel()}, len(a_1d): {len(a_1d)}")
# a_1d.numel(): 12, len(a_1d): 12
print(f"b_2d.numel(): {b_2d.numel()}, len(b_2d): {len(b_2d)}")
# b_2d.numel(): 12, len(b_2d): 3

"""⭐  x.clone(): 创建一个 x 的副本。  """
print(f"a_1d.clone(): {a_1d.clone()}")
# a_1d.clone(): tensor([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])
print(f"b_2d.clone(): {b_2d.clone()}")
# b_2d.clone(): tensor([[ 0,  1,  2,  3],
#                       [ 4,  5,  6,  7],
#                       [ 8,  9, 10, 11]])

"""⭐  x.numpy(): 将张量转换为 NumPy 数组。  """
print(f"a_1d.numpy(): {a_1d}")
# a_1d.numpy(): tensor([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])
print(f"b_2d.numpy(): {b_2d}")
# b_2d.numpy(): tensor([[ 0,  1,  2,  3],
#         [ 4,  5,  6,  7],
#         [ 8,  9, 10, 11]])

"""⭐  x.tolist(): 将张量转换为 Python 列表。  """
print(f"a_1d.tolist(): {a_1d}")
# a_1d.tolist(): tensor([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])
print(f"b_2d.tolist(): {b_2d}")
# b_2d.tolist(): tensor([[ 0,  1,  2,  3],
#                        [ 4,  5,  6,  7],
#                        [ 8,  9, 10, 11]])

"""⭐  x.T 或 x.transpose(): 返回张量的转置。  transpose可以接受两个参数dim0和dim1，用于指定要交换的两个维度。在处理高维张量时可以交换任意两个维度。  """
# print(f"a_1d.T: {a_1d.T}, a_1d.transpose(): {torch.transpose(a_1d, 1, 0)}")  # 一维无法交换维度
print(f"b_2d.T: {b_2d.T}, \nb_2d.transpose(): {torch.transpose(b_2d, 1, 0)}")
# b_2d.T: tensor([[ 0,  4,  8],
#                 [ 1,  5,  9],
#                 [ 2,  6, 10],
#                 [ 3,  7, 11]]),
# b_2d.transpose(): tensor([[ 0,  4,  8],
#                           [ 1,  5,  9],
#                           [ 2,  6, 10],
#                           [ 3,  7, 11]])

"""⭐  x.view(): 改变张量的形状。  """
print(f"a_1d.view(): {a_1d.view(2, 6)}")
# a_1d.view(): tensor([[ 0,  1,  2,  3,  4,  5],
#         [ 6,  7,  8,  9, 10, 11]])
print(f"b_2d.view(): {b_2d.view(2, 6)}")
# b_2d.view(): tensor([[ 0,  1,  2,  3,  4,  5],
#         [ 6,  7,  8,  9, 10, 11]])

"""⭐  x.unsqueeze(): 在指定位置插入一个维度。  """
print(f"a_1d.unsqueeze(1): {a_1d.unsqueeze(1)}")
# a_1d.unsqueeze(1): tensor([[ 0],
#                            [ 1],
#                            [ 2],
#                            [ 3],
#                            [ 4],
#                            [ 5],
#                            [ 6],
#                            [ 7],
#                            [ 8],
#                            [ 9],
#                            [10],
#                            [11]])
print(f"b_2d.unsqueeze(1): {b_2d.unsqueeze(1)}")
# b_2d.unsqueeze(1): tensor([[[ 0,  1,  2,  3]],
#                            [[ 4,  5,  6,  7]],
#                            [[ 8,  9, 10, 11]]])

"""⭐  x.squeeze(): 移除张量中维度大小为 1 的维度，没有则不执行。  """
# print(f"a_1d: \n{a_1d}\nb_2d: \n{b_2d}")
print(f"a_1d.squeeze(): {a_1d.squeeze(1)}")  # 没有维度大小为 1  的维度
# a_1d.squeeze(): tensor([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])
print(f"b_2d.squeeze(1): {b_2d.squeeze(1)}")  # 没有维度大小为 1  的维度
# b_2d.squeeze(): tensor([[ 0,  1,  2,  3],
#                         [ 4,  5,  6,  7],
#                         [ 8,  9, 10, 11]])

"""⭐  x.sum(): 返回张量中所有元素的和。  """
print(f"a_1d.sum(): {a_1d.sum()}")
# a_1d.sum(): 66
print(f"b_2d.sum(): {b_2d.sum()}")
# b_2d.sum(): 66

"""⭐  x.mean(): 返回张量中所有元素的平均值。【mean() 函数需要浮点数类型（如 float32 或 float64）来计算均值。】  """
print(f"a_1d.mean(): {a_1d.float().mean(dim=0)}")
# a_1d.mean(): 5.5
print(f"b_2d.mean(): {b_2d.float().mean(dim=0)}")
# b_2d.mean(): tensor([4., 5., 6., 7.])

"""⭐  x.min()` 和 `x.max(): 返回张量中的最小值和最大值。  """
print(f"a_1d.min(): {a_1d.min()}")
# a_1d.min(): 0
print(f"b_2d.min(): {b_2d.min()}")
# b_2d.min(): 0

"""⭐  x.eq(): 判断张量中的元素是否等于指定值。  """
print(f"a_1d.eq(0): {a_1d.eq(0)}")
# a_1d.eq(0): tensor([ True, False, False, False, False, False, False, False, False, False, False, False])
print(f"b_2d.eq(0): {b_2d.eq(0)}")
# b_2d.eq(0): tensor([[ True, False, False, False],
#                     [False, False, False, False],
#                     [False, False, False, False]])


"""⭐  x.requires_grad: 判断张量是否需要计算梯度。  """
print(f"a_1d.requires_grad: {a_1d.requires_grad}")
# a_1d.requires_grad: False
print(f"b_2d.requires_grad: {b_2d.requires_grad}")
# b_2d.requires_grad: False

"""⭐  x.is_leaf: 判断张量是否为叶子张量（不依赖于其他张量）。  """
print(f"a_1d.is_leaf: {a_1d.is_leaf}")
# a_1d.is_leaf: True
print(f"b_2d.is_leaf: {b_2d.is_leaf}")
# b_2d.is_leaf: True

"""⭐  x.fill_(): 用指定的值填充张量。  """
print(f"a_1d.fill_(0): {a_1d.fill_(0)}")
# a_1d.fill_(0): tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
print(f"b_2d.fill_(0): {b_2d.fill_(0)}")
# b_2d.fill_(0): tensor([[0, 0, 0, 0],
#                        [0, 0, 0, 0],
#                        [0, 0, 0, 0]])























