import torch

# 张量的拼接与分割

# 将张量按维度进行拼接
# torch.cat(tensors, # 张量序列
#           dim=0,  # 要拼接的维度
#           out=None)
# flag = True
flag = False
if flag:
    t = torch.ones((2, 3))

    t_0 = torch.cat([t, t], dim=0)
    t_1 = torch.cat([t, t], dim=1)

    print("t:{} shape:{}\nt_0:{} shape:{}\nt_1:{} shape:{}\n".format(t, t.shape,
                                                                     t_0,
                                                                     t_0.shape,
                                                                     t_1,
                                                                     t_1.shape))

# 在新创建的维度上进行拼接
# torch.stack(tensors,
#             dim=0,
#             out=None)
# flag = True
flag = False
if flag:
    t = torch.ones((3, 4))

    t_stack_0 = torch.stack([t, t], dim=0)
    t_stack_1 = torch.stack([t, t], dim=2)

    print("t:{} shape:{}\nt_stack_0:{} shape:{}\nt_stack_1:{} shape:{}\n".format(t, t.shape,
                                                                     t_stack_0,
                                                                     t_stack_0.shape,
                                                                     t_stack_1,
                                                                     t_stack_1.shape))

# 将张量按维度进行平均切分
# torch.chunk(input, # 要切分的张量
#             chunks, # 要切分的分数
#             dim=0) # 要切分的维度
# 返回张量列表
# 若不能整除，最后一份小于其他张量
# flag = True
flag = False
if flag:
    a = torch.ones((2, 5))
    list_of_tensors = torch.chunk(a, chunks=3, dim=1)
    print(a)

    for idx, t in enumerate(list_of_tensors):
        print("第{}个张量：{}，shape is {}".format(idx + 1, t, t.shape))

# 将张量按维度进行切分，可指定切分的长度
# torch.split(tensor,
#             split_size_or_sections, # 为int时，表示每一份的长度；为list时，按list元素切分
#             dim=0)
# flag = True
flag = False
if flag:
    a = torch.ones((2, 5))
    print(a)

    # 为 int
    # list_of_tensors = torch.split(a, 3, dim=1)
    # 为 list, list各项和需等于原tensor的长度，否则报错
    list_of_tensors = torch.split(a, [1, 3, 1], dim=1)
    for idx, t in enumerate(list_of_tensors):
        print("第{}个张量：{}，shape is {}".format(idx + 1, t, t.shape))

# 张量索引
# torch.index_select(input,dim,index,out=None)
flag = True
# flag = False
if flag:
    a = torch.randint(0, 9, size=(3, 3))

    # index_select的index必须是long(int64)类型
    inx = torch.tensor([0, 2], dtype=torch.long)
    # 按行索引，0行和2行
    t_select = torch.index_select(a, dim=0, index=inx)
    print("{}\n{}".format(a, t_select))

# torch.masked_select(input,
#                     mask, # 与input同形状的布尔类型张量
#                     out=None)
# 通常用来筛选数据，找出所有符合要求的项，返回1维张量
# flag = True
flag = False
if flag:
    a = torch.randint(0, 9, size=(3, 3))
    # 对a中数据进行判断，大于等于5为True
    # ge means greater than or equal, gt:greater than. le lt
    mask = a.ge(5)
    t_select = torch.masked_select(a, mask)
    print("a:{}\nmask:{}\nt_select:{}".format(a, mask, t_select))

# 变换张量形状
# torch.reshape(input,shape)
# 当张量在内存中是连续时，新张量与input共享内存
# flag = True
flag = False
if flag:
    t = torch.randint(2, 20, size=(2, 8))
    t_reshape = torch.reshape(t, (-1, 4))
    print("t:{}\nt_reshape:{}".format(t, t_reshape))

    # 验证共享内存
    t[0][0] = 45
    print("t:{}\nt_reshape:{}".format(t, t_reshape))

# 交换张量的两个维度
# 读图片用到，RGB -> BGR
# torch.transpose(input, # 要变换的张量
#                 dim0,  # 要变换的维度
#                 dim1)  # 要变换的维度
# torch.t(input) # 二维张量转置，对矩阵而言，
# 等价于 torch.transpose(input, 0, 1)
# flag = True
flag = False
if flag:
    t = torch.rand((2, 3, 4))
    t_transpose = torch.transpose(t, dim0=1, dim1=2)
    print("t:{}\nt_transpoe:{}".format(t, t_transpose))

# 压缩长度为1的维度（轴）
# torch.squeeze(input,
#               dim=None, # 若为None，移除所有长度为1的轴
#               out=None)
# flag = True
flag = False
if flag:
    t = torch.rand((1, 2, 3, 1))
    t_sq = torch.squeeze(t)
    t_0 = torch.squeeze(t, dim=0)
    t_1 = torch.squeeze(t, dim=2)
    print("t:{}\nt_sq:{}\nt_0:{}\nt_1:{}".format(t.shape, t_sq.shape, t_0.shape, t_1.shape))

# 依据dim扩展维度,拓展维度长度为1
# torch.unsqueeze(input,
#                 dim,
#                 out=None)
# flag = True
flag = False
if flag:
    t = torch.rand((1, 2, 3))
    t_unsq = torch.unsqueeze(t, 3)
    print(t_unsq)
