import torch
from torch import nn
from d2l import torch as d2l


# @save
def get_tokens_and_segments(tokens_a, tokens_b=None):
    """获取输入序列的词元及其片段索引"""
    tokens = ['<cls>'] + tokens_a + ['<sep>']
    # 0和1分别标记片段A和B
    segments = [0] * (len(tokens_a) + 2)
    if tokens_b is not None:
        tokens += tokens_b + ['<sep>']
        segments += [1] * (len(tokens_b) + 1)
    return tokens, segments

"""
token_a = ["a","b","c"]
tokens_b = ["a1","b1","c1"]
print(get_tokens_and_segments(token_a))
print(get_tokens_and_segments(token_a,tokens_b))
(['<cls>', 'a', 'b', 'c', '<sep>'], [0, 0, 0, 0, 0])
(['<cls>', 'a', 'b', 'c', '<sep>', 'a1', 'b1', 'c1', '<sep>'], [0, 0, 0, 0, 0, 1, 1, 1, 1])
1、cls 是序列的开头
2、sep 是序列的分割符号
3、片段分为0片段和1片段
4、图14.8.2 BERT输入序列的嵌入是词元嵌入、片段嵌入和位置嵌入的和
"""

# @save
class BERTEncoder(nn.Module):
    """BERT编码器"""
    def __init__(self, vocab_size, num_hiddens, norm_shape, ffn_num_input,
                 ffn_num_hiddens, num_heads, num_layers, dropout,
                 max_len=1000, key_size=768, query_size=768, value_size=768,
                 **kwargs):
        super(BERTEncoder, self).__init__(**kwargs)
        self.token_embedding = nn.Embedding(vocab_size, num_hiddens)
        # 由于段落信息分为0，1所以这里段落的词表为2
        self.segment_embedding = nn.Embedding(2, num_hiddens)
        self.blks = nn.Sequential()
        for i in range(num_layers):
            self.blks.add_module(f"{i}", d2l.EncoderBlock(
                key_size, query_size, value_size, num_hiddens, norm_shape,
                ffn_num_input, ffn_num_hiddens, num_heads, dropout, True))
        # 在BERT中，位置嵌入是可学习的，因此我们创建一个足够长的位置嵌入参数
        self.pos_embedding = nn.Parameter(torch.randn(1, max_len,
                                                      num_hiddens))

    def forward(self, tokens, segments, valid_lens):
        # 在以下代码段中，X的形状保持不变：（批量大小，最大序列长度，num_hiddens）
        # print(self.token_embedding(tokens).shape)
        # print(self.segment_embedding(segments).shape)
        X = self.token_embedding(tokens) + self.segment_embedding(segments)
        # print("position",self.pos_embedding.data.shape,self.pos_embedding.data[:, :X.shape[1], :].shape)
        X = X + self.pos_embedding.data[:, :X.shape[1], :]
        for blk in self.blks:
            X = blk(X, valid_lens)
        return X
#
vocab_size, num_hiddens, ffn_num_hiddens, num_heads = 10000, 768, 1024, 4
norm_shape, ffn_num_input, num_layers, dropout = [768], 768, 2, 0.2
encoder = BERTEncoder(vocab_size, num_hiddens, norm_shape, ffn_num_input,
                      ffn_num_hiddens, num_heads, num_layers, dropout)
tokens = torch.randint(0, vocab_size, (4, 8))
segments = torch.tensor([[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1, 1],[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1, 1]])
print(tokens.shape,segments.shape)
encoded_X = encoder(tokens, segments, None)
# 将tokens进行编码，这里面包含了所有信息
print(encoded_X.shape)
#
#
# # @save
# class MaskLM(nn.Module):
#     """BERT的掩蔽语言模型任务"""
#
#     def __init__(self, vocab_size, num_hiddens, num_inputs=768, **kwargs):
#         super(MaskLM, self).__init__(**kwargs)
#         self.mlp = nn.Sequential(nn.Linear(num_inputs, num_hiddens),
#                                  nn.ReLU(),
#                                  nn.LayerNorm(num_hiddens),
#                                  nn.Linear(num_hiddens, vocab_size))
#
#     def forward(self, X, pred_positions):
#         num_pred_positions = pred_positions.shape[1]
#         pred_positions = pred_positions.reshape(-1)
#         batch_size = X.shape[0]
#         batch_idx = torch.arange(0, batch_size)
#         # 假设batch_size=2，num_pred_positions=3
#         # 那么batch_idx是np.array（[0,0,0,1,1,1]）
#         batch_idx = torch.repeat_interleave(batch_idx, num_pred_positions)
#         masked_X = X[batch_idx, pred_positions]
#         masked_X = masked_X.reshape((batch_size, num_pred_positions, -1))
#         mlm_Y_hat = self.mlp(masked_X)
#         return mlm_Y_hat
#
#
# mlm = MaskLM(vocab_size, num_hiddens)
# mlm_positions = torch.tensor([[1, 5, 2], [6, 1, 5]])
# mlm_Y_hat = mlm(encoded_X, mlm_positions)
# mlm_Y_hat.shape
#
# mlm_Y = torch.tensor([[7, 8, 9], [10, 20, 30]])
# loss = nn.CrossEntropyLoss(reduction='none')
# mlm_l = loss(mlm_Y_hat.reshape((-1, vocab_size)), mlm_Y.reshape(-1))
# mlm_l.shape
#
#
# # @save
# class NextSentencePred(nn.Module):
#     """BERT的下一句预测任务"""
#
#     def __init__(self, num_inputs, **kwargs):
#         super(NextSentencePred, self).__init__(**kwargs)
#         self.output = nn.Linear(num_inputs, 2)
#
#     def forward(self, X):
#         # X的形状：(batchsize,num_hiddens)
#         return self.output(X)
#
#
# encoded_X = torch.flatten(encoded_X, start_dim=1)
# # NSP的输入形状:(batchsize，num_hiddens)
# nsp = NextSentencePred(encoded_X.shape[-1])
# nsp_Y_hat = nsp(encoded_X)
# print(nsp_Y_hat.shape)
#
# nsp_y = torch.tensor([0, 1])
# nsp_l = loss(nsp_Y_hat, nsp_y)
# print(nsp_l.shape)
