import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
import torch
import pickle
from stanfordcorenlp import StanfordCoreNLP
import torch.nn.functional as F
import math
import torch.optim as optim
from stanfordcorenlp import StanfordCoreNLP
nlp = StanfordCoreNLP('E:/stanford-corenlp-full-2016-10-31', lang='en')

class TransformerSeq2seq(nn.Module):
    def __init__(self, scr_vocab_size, tgt_vocab_size, d_model = 256, num_heads = 4, num_layers = 3, ff_dim = 1024, dropout = 0.2):
        super(TransformerSeq2seq, self).__init__()
        self. encoder_embedding = nn.Embedding(scr_vocab_size, d_model, padding_idx=67077)#scr_vocab_size英文词表长度
        self.decoder_embedding = nn.Embedding(tgt_vocab_size, d_model, padding_idx=88577)#tgt_vocab_size中文词表长度
        self.pos_encoder = PositionalCodeing(d_model)

        self.encoder_layers = nn.ModuleList([Encoder_Layer(d_model, num_heads, ff_dim, dropout) for _ in range(num_layers)])#num_layers = 6，for循环会将nn.Sequential函数体中定义的结构复制6次
                                                                                                                                                                                                                                               # nn.ModuleList容器，网络中有很多相似或者重复的层，用 for 循环来创建它们，而不用一行一行地写
        self.decoder_layers = nn.ModuleList([Decoder_Layer(d_model, num_heads, ff_dim, dropout)for _ in range(num_layers)])

        self.fc_out = nn.Linear(d_model, tgt_vocab_size)

        self.cross_entropy_loss = nn.CrossEntropyLoss(ignore_index=88577)#忽略标签序列中<PAD>填充的影响。

        self.flatten_prediction = flatten = nn.Flatten(0, 1)  # 合并prediction的0和1这两个维度
        self.flatten_label = flatten = nn.Flatten(0, 1)  # 合并label的0和1维度的数据


    def encoder(self, scr, scr_mask):
        scr_embed = self.pos_encoder(self.encoder_embedding(scr))

        for layer in self.encoder_layers:
            scr_embed = layer(scr_embed, scr_mask)#遍历初始化时创建的6层encoder层/decoder层，并传入参数。

        return scr_embed

    def decoder(self, tgt, encoder_output,  selfattention_mask, cross_mask):
        tgt_embed = self.pos_encoder(self.decoder_embedding(tgt))
        for layer in self.decoder_layers:
            tgt_embed = layer(tgt_embed,encoder_output, selfattention_mask, cross_mask)

        return tgt_embed

    def generate_mask(self, q_pad: torch.Tensor, k_pad: torch.Tensor, with_left_mask: bool = False):
        # q_pad: [n, q_len], k_pad: [n, k_len]
        n, q_len = q_pad.shape
        k_len = k_pad.shape[1]

        # 创建填充掩码
        q_mask = q_pad.unsqueeze(2)  # [n, q_len, 1]
        k_mask = k_pad.unsqueeze(1)  # [n, 1, k_len]
        pad_mask = q_mask | k_mask  # [n, q_len, k_len]

        # 创建因果掩码
        if with_left_mask:
            causal_mask = torch.triu(torch.ones(q_len, k_len, device=q_pad.device), diagonal=1).bool()
            causal_mask = causal_mask.unsqueeze(0).unsqueeze(0)  # [1, 1, q_len, k_len]
        else:
            causal_mask = torch.zeros(1, 1, q_len, k_len, device=q_pad.device, dtype=torch.bool)

        # 合并掩码并调整维度
        combined_mask = pad_mask.unsqueeze(1) | causal_mask  # [n, 1, q_len, k_len]
        return combined_mask

    def forward(self, scr, tgt):#scr: 源语言序列。 tgt: 目标语言序列。
        tgt_input = tgt[:, : -1]  # 只取前面部分最后一个不要
        label = tgt[:, 1:]  # 从第一个开始直到最后一个
                                     #67077为源语言序列序列中PAD填充符的编号
                                     #88577为目标语言序列中PAD填充符的编号
        encoder_padmask = scr == 67077
        decoder_padmask = tgt_input == 88577
        scr_mask = self.generate_mask(encoder_padmask, encoder_padmask, with_left_mask=False)
        selfattention_mask = self.generate_mask(decoder_padmask, decoder_padmask, with_left_mask=True)
        cross_mask = self.generate_mask(decoder_padmask, encoder_padmask, with_left_mask=False)

        encoder_output = self.encoder(scr, scr_mask)
        decoder_output = self.decoder(tgt_input, encoder_output, selfattention_mask, cross_mask)
        pre = self.fc_out(decoder_output)

        pre = self.flatten_prediction(pre)
        label = self.flatten_label(label)

        loss = self.cross_entropy_loss(pre, label)


        return loss

class PositionalCodeing(nn.Module):
    def __init__(self, d_model, max_len = 1000):
        super(PositionalCodeing, self).__init__()
        assert d_model % 2 == 0, "d_model must be even for positional encoding"
        pe = torch.zeros(max_len, d_model)#这里的max_len为一句话或者说输入数据中词向量的个数，一般一句话的词向量个数也不会超过1000，d_model自然也就是每个词向量的维度喽。
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)#生成从0-1000的正整数shape为(5000，1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() *(-math.log(10000.0) / (d_model//2)))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe.unsqueeze(0))#成为模型中的参数，随着模型移动（gpu/cpu）而移动，但是不会随着梯度进行更新，
                                                                                                      # 'pe'为参数的名字。

    def forward(self, x):
        return x + self.pe[:, :x.size(1)]


class Decoder_Layer(nn.Module):
    def __init__(self, d_model, num_heads,ff_dim, dropout = 0.1):
        super(Decoder_Layer,self).__init__()
        self.selfattn = Attention_module(d_model, num_heads)
        self.cross_attn = Attention_module(d_model, num_heads)
        self.ffn = nn.Sequential(
                                                     nn.Linear(d_model, ff_dim),
                                                     nn.GELU(),
                                                     nn.Dropout(dropout),
                                                     nn.Linear(ff_dim, d_model)
                                                      )
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)


    def forward(self, x,  encoder_output, selfattention_mask, cross_mask):
        residual = x
        x = self.norm1(x)
        attn_output, _ = self.selfattn(x, x, x, selfattention_mask)#自注意力
        x = residual + self.dropout(attn_output)

        residual = x
        x = self.norm2(x)
        attn_output, _ = self.cross_attn(x, encoder_output, encoder_output, cross_mask)#交叉注意力
        x = residual + self.dropout(attn_output)

        residual = x
        x = self.norm3(x)
        ffn_output = self.ffn(x)
        x = residual + self.dropout(ffn_output)
        return x


class Encoder_Layer(nn.Module):
    def __init__(self, d_model, num_heads,ff_dim, dropout = 0.1):
       super(Encoder_Layer,self).__init__()

       self.selfattn = Attention_module(d_model, num_heads)
       self.ffn = nn.Sequential(
                                                    nn.Linear(d_model, ff_dim),#ff_dim线性层输出维度
                                                    nn.GELU(),
                                                    nn.Dropout(dropout),
                                                    nn.Linear(ff_dim, d_model)
                                                    )#这里相当于LSTM或GRU, 这样定义是为了减小计算量, nn.Sequential会自动按顺序执行函数体里面定义好的函数
       self.norm1 = nn.LayerNorm(d_model)
       self.norm2 = nn.LayerNorm(d_model)

       self.dropout = nn.Dropout(dropout)#Dropout是一种常用的正则化方法，通过随机将部分神经元的输出置为0来减少过拟合


    def forward(self,x, mask = None):
      residual = x
      x = self.norm1(x)
      attn_output, _ = self.selfattn(x, x, x, mask)#需要注意这里Q、K、V三个位置的输入数据都是同一个值：X。所以它是一个应用于encoder的自注意力模块
      #x = self.norm1(x + self.dropout(attn_output))
      x = residual + self.dropout(attn_output)


      residual = x
      x = self.norm2(x)
      ffn_output = self.ffn(x)#标准化处理的数据再经过一个类似与LSTM或GRU这样的模块，这里可能是为了尽量减少计算量。
      x = residual + self.dropout(ffn_output)#x + self.dropout(ffn_output)为残差连接
      return x

class Attention_module(nn.Module):
    def __init__(self, d_model, num_heads):
        super(Attention_module, self).__init__()
        self.d_model = d_model#表示模型的隐藏层维度（或称为特征维度）
        self.num_heads = num_heads
        assert d_model % num_heads == 0, "d_model 必须要被 num_heads整除"
        self.head_dim = d_model // num_heads
        self.scale_factor = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32))

        self.W_q = nn.Linear(self.d_model, self.d_model)
        self.W_k = nn.Linear(self.d_model, self.d_model)
        self.W_v = nn.Linear(self.d_model, self.d_model)
        self.W_out = nn.Linear(self.d_model, self.d_model)
    def split_heads(self, x):#输入x的形状是(batch_size, seq_len, d_model)时，d_model必须等于num_heads * head_dim，否则view会报错
        #将输入分割为多头
        batch_size, seq_len  = x.size()[:2]
        return x.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) #将X的维度由原来的三维拆分为四维，1 ，2这两个维度调换位置变成(batch_size, self.num_heads, seq_len, self.head_dim)

    def forward(self, Q, K, V, mask = None):
        """"Q , K , V: 输入矩阵[batch_size, seq_len, d_model]
             mask: 掩码矩阵[batch_size, seq_len, seq_len]"""

        batch_size = Q.size(0)

        Q = self.split_heads(self.W_q(Q))
        K = self.split_heads(self.W_k(K))
        V = self.split_heads(self.W_v(V))

        scores = torch.matmul(Q, K.transpose(-2, -1)) * self.scale_factor.reciprocal()


        #应用掩码
        if mask is not None:




            scores = scores.masked_fill(mask, -1e9)#这里表示如果给出掩码矩阵，那么掩码矩阵中为True的那个位置在scores矩阵中相同的位置的值会被转化为 -1e9
                                                                                                    #scores.masked_fill(mask, value) 是 PyTorch 中的一个函数，用于对张量进行按位替换操作。
                                                                                                   #mask 是一个布尔类型的张量，用于指示要进行替换的位置。
                                                                                                   #value 是一个标量或与被操作的张量 scores 具有相同形状的张量，用于指定替换的值。


        attention_w = F.softmax(scores, dim=-1)#对每个矩阵相同位置的数值进行softmax

        context = torch.matmul(attention_w, V)#α权重与V相乘，得到C向量。

        #合并多头
        context = context.transpose(1, 2).contiguous()
        context = context.view(batch_size, -1, self.d_model)
        output = self.W_out(context)
        return output, attention_w

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

with open('zh_split.pickle', 'rb') as f:
    zh_split = pickle.load(f)

with open('en_split.pickle', 'rb') as f:
    en_split = pickle.load(f)

with open('en_word2index.pickle', 'rb') as f:
    en_word2index = pickle.load(f)

with open('en_index2word.pickle', 'rb') as f:
    en_index2word = pickle.load(f)

with open('zh_word2index.pickle', 'rb') as f:
    zh_word2index = pickle.load(f)


with open('zh_index2word.pickle', 'rb') as f:
    zh_index2word = pickle.load(f)



zh_vocab_size = len(zh_word2index)#zh_vocab = 88577
en_vocab_size = len(en_word2index)#en_vocab = 67077

zh_word2index.update({"<PAD>" : zh_vocab_size, "<BOS>" : zh_vocab_size + 1, "<EOS>" : zh_vocab_size + 2})
en_word2index.update({"<PAD>" : en_vocab_size})
en_word2index.update({"<UNK>" : en_vocab_size + 1})

zh_index2word_vocab_size = len(zh_index2word)
zh_index2word.update({zh_index2word_vocab_size : "<PAD>", zh_index2word_vocab_size + 1 : "<BOS>", zh_index2word_vocab_size + 2 : "<EOS>"})

en_index2word_vocab_size = len(en_index2word)
en_index2word.update({en_index2word_vocab_size : "<PAD>"})


with open("Transformer_model.pkl", "rb") as f:
    model = pickle.load(f)
model.eval()
def translate(sentence):
    model.eval()
    with torch.no_grad():
        tokens = nlp.word_tokenize(sentence)

        #global en_word2index, model, device
        src_indices = [en_word2index.get(word, en_word2index['<UNK>']) for word in tokens]
        src_tensor = torch.tensor([src_indices], device=device)
        src_tensor_mask = src_tensor == 67077
        src_mask = model.generate_mask(src_tensor_mask, src_tensor_mask, False).bool()



        memory = model.encoder(src_tensor, scr_mask = src_mask)

        tgt_indices = [zh_word2index['<BOS>']]
        result = []
        max_len = 50

        for _ in range(max_len):
            tgt_tensor = torch.tensor([tgt_indices], device=device)
            tgt_tensor_mask = tgt_tensor == 88577

            selfattention_mask = model.generate_mask(tgt_tensor_mask, tgt_tensor_mask, True)
            cross_mask = model.generate_mask(tgt_tensor_mask, src_tensor_mask, False)

            output = model.decoder(
                tgt_tensor,
                memory,
                selfattention_mask,
                cross_mask
            )
            logits = model.fc_out(output[:, -1, :])
            next_word_idx = logits.argmax(dim=-1).item()
            if next_word_idx == zh_word2index['<EOS>']:
                break
            result.append(zh_index2word[next_word_idx])
            tgt_indices.append(next_word_idx)

        print("Transformer译文：", "".join(result))

if __name__ == "__main__":
    while True:
        sentence = input("请输入一句英文：")

        translate(sentence)