# import torch
# a = torch.rand((2, 3))
# a_list = [a for _ in range(3)]
# A = torch.stack(a_list).unsqueeze(1).unsqueeze(2)
#
# print('list = ', a_list)
# print('type of a_list : ', type(a_list))
# print('size of a_list : ', [len(a_list), len(a_list[0]), len(a_list[0][0])])
# print('type of transform: ', type(A))
# print('shape of transform: ', A.shape)
# print('A', A)


# import torch
#
# # 假设输入的张量为 x，形状为 (128, 1, 32)
# x = torch.randn(128, 1, 32)
# x = torch.nn.functional.softmax(x, dim=-1)
#
# keep_masks = []
# for i in range(x.shape[0]):
#     values, indices = torch.topk(x[i, :, :], k=x.shape[-1] // 2, sorted=True)
#     values = values.squeeze(dim=0)
#     threshold = values[-1]
#     keep_masks.append((x[i, :, :].squeeze(dim=0) > threshold).float())
# keep_masks = torch.stack(keep_masks)
# keep_masks = keep_masks.unsqueeze(dim=1)
#
# x = keep_masks * x

# import torch
#
# x = torch.randn(128, 32, 32)
# x = torch.nn.functional.softmax(x)
# x = torch.abs(x)
# print(x)
# count = 0
# values, indices = torch.topk(x, k=int(x.shape[-1] * 0.5),dim=-1, sorted=True)
# threshold = values[:, :, -1]  # 获取每个元素的阈值
# print('threshold', threshold, threshold.shape)
# keep_masks = (x > threshold.unsqueeze(-1)).float()
# print(x * keep_masks)



# print((x[0, :, :])[0, :])
# for i in range(x.shape[0]):
#     for j in range(x.shape[1]):
#         values, indices = torch.topk(x[i, :, :][j, :], k=x.shape[-1] // 2, sorted=True)
#         threshold = values[-1]
#         keep_maks.append((x[i, :, :][j, :] > threshold).float())
# keep_maks = torch.stack(keep_maks)
# keep_maks = keep_maks.view(128, 32, 32)
# print(x * keep_maks)

# print(x[0, :, :, ], x[0, :, :, ].shape)

# keep_masks = []
# for i in range(x.shape[0]):
#         for x1 in x[i, :, :]:
#             values, indices = torch.topk(x1, k=x.shape[-1] // 2, sorted=True)
#             print('value', values)
#             print('indices', indices)
#             threshold = values[-1]
#             print('threshold', threshold)
#             print('x1', x1)
#             keep_masks.append((x1 > threshold).float())

# print(keep_masks)
# keep_masks = torch.stack(keep_masks)
# print(keep_masks.shape)
# keep_masks = keep_masks.unsqueeze(dim=1)

# for x1 in x[0, :, :, ]:
#     print(x1)





# import torch as t
# x = t.randn(128, 32, 32)
# x = t.nn.functional.softmax(x, dim=-1)
# print('x', x)
# count = 0
# keep_masks = []
# for l_x in x:
#     # print(f'{count}', l_x)
#     for l_x_x in l_x:
#         values, indices = t.topk(l_x_x, k=len(l_x_x) // 2, sorted=True)
#         threshold = values[-1]
#         mask = (l_x_x > threshold).float()
#         keep_masks.append(mask)
#         count += 1
# keep_masks = t.stack(keep_masks)
# keep_masks = keep_masks.view(128, -1, 32)
# x = x * keep_masks

# import torch
# from torch import nn
# torch.manual_seed(0)
#
# vocab_size = 4  # 词表大小为4
# embedding_dim = 3  # 词向量维度为3
# weight = torch.randn(4, 3)  # 随机初始化权重矩阵
#
# print('weight', weight)
#
# # 保持线性层和嵌入层具有相同的权重
# linear_layer = nn.Linear(4, 3, bias=False)
# linear_layer.weight.data = weight.T  # 注意转置
# emb_layer = nn.Embedding(4, 3)
# emb_layer.weight.data = weight
#
# print('emb_layer', emb_layer.weight.data)
# print('emb_layer', emb_layer)
# idx = torch.tensor([2, 3])  # 假设某个单词在word2idx中的索引为2
#
# word = torch.tensor([0, 0, 1, 0]).to(torch.float)  # 上述单词的one-hot表示
# word1 = torch.tensor([0, 0, 0, 1]).to(torch.float)
# output = emb_layer(idx)
# print(emb_layer(idx))
# # tensor([ 0.4033,  0.8380, -0.7193], grad_fn=<EmbeddingBackward0>)
# print(linear_layer(word))
# # tensor([ 0.4033,  0.8380, -0.7193], grad_fn=<SqueezeBackward3>)
# print(linear_layer(word1))
#
#
# print('output', output)
#
# def print_grad(grad):
#     print('grad', grad)
#
# emb_layer.weight.register_hook(print_grad)
#
# output.sum().backward()
#
# print('out_put2', output)