"""
序列到序列的模型实现
"""
from collections import Counter
import random
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset


# class Encoder(nn.Module):
#     """
#     编码器
#     """
#
#     def __init__(self, input_size, hidden_size, num_layers):
#         super(Encoder, self).__init__()
#         self.lstm = nn.LSTM(input_size, hidden_size, num_layers)
#         # LSTM模型
#
#     def forward(self, x, hidden):
#         x, hidden = self.lstm(x, hidden)
#         return hidden  # 只需要输出hidden


class Encoder(nn.Module):
    """
    编码器
    """

    def __init__(self, vocab_size, edb_size, hidden_size, num_layers):
        super().__init__()
        # self.lstm = nn.LSTM(input_size, hidden_size, num_layers)
        self.embedding = nn.Embedding(vocab_size, edb_size)
        self.gru = nn.GRU(edb_size, hidden_size, num_layers)
        # LSTM模型

    def forward(self, encoder_input):
        encoder_output = self.embedding(encoder_input).permute(1, 0, 2)
        output, hidden = self.gru(encoder_output)
        return hidden  # 只需要输出hidden


# class Decoder(nn.Module):
#     """
#     解码器
#     """
#
#     def __init__(self, output_size, hidden_size, num_layers):
#         super(Decoder, self).__init__()
#         self.lstm = nn.LSTM(output_size, hidden_size, num_layers)  # LSTM模型
#         self.linear = nn.Linear(hidden_size, output_size)
#
#     def forward(self, x, hidden):
#         x, state = self.lstm(x, hidden)
#         x = self.linear(x)
#         return x, state
#

class Decoder(nn.Module):
    """
    解码器
    """

    def __init__(self, vocab_size, edb_size, hidden_size, num_layers):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, edb_size)
        self.gru = nn.GRU(edb_size + hidden_size, hidden_size, num_layers)
        self.out = nn.Linear(hidden_size, vocab_size)

    def forward(self, decoder_input, encoder_state):
        decoder_output = self.embedding(decoder_input).permute(1, 0, 2)
        context = encoder_state[-1]
        context = context.repeat(decoder_output.size(0), 1, 1)
        output, state = self.gru(torch.cat((decoder_output, context), -1), encoder_state)
        x = self.out(output)
        return x, state


class SeqToSeqModel(nn.Module):
    def __init__(self, encoder, decoder):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder

    def forward(self, encoder_inputs, decoder_inputs):
        return self.decoder(decoder_inputs, self.encoder(encoder_inputs))


flatten = lambda l: [item for sublist in l for item in sublist]


class Vocab:
    def __init__(self, tokens):
        self.tokens = tokens  # 传入的tokens是二推列表
        self.token2index = {"<bos>": 0, "<eos>": 1}
        # 将调元按词频推序后生成列表
        self.token2index.update({token: index + 2
                                 for index, (token, freq) in
                                 enumerate(
                                     sorted(Counter(flatten(self.tokens)).items(), key=lambda x: x[1], reverse=True))})
        # 枸建id到词元字典
        self.index2token = {index: token for token, index in self.token2index.items()}

    def __getitem__(self, query):  # 雕一家引
        if isinstance(query, (str, int)):
            if isinstance(query, str):
                return self.token2index.get(query, 0)
            elif isinstance(query, int):
                return self.index2token.get(query, "<unk>")
                # 数组索引
        elif isinstance(query, (list, tuple)):
            return [self.__getitem__(item) for item in query]

    def __len__(self):
        return len(self.index2token)


# 数据集生成
soundmark = ["ei", "bi:", "si:", "di:", "i:", "ef", 'd3i:', "eit", "ai", "d3ei", "kai", "lao:", "enm:", "en:", "o:",
             "pu:", "qu:", "ar:", "es", "ti", "ju:", "vi:", "dblju:", "eks", "wai", "zi"]
alphabet = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u",
            "v", "w", "x", "y", "z"]

print(len(soundmark))
t = 10000
# 总条数
r = 1
# 扰动项
seq_len = 6
src_tokens, tgt_tokens = [], []  # 原始序列、目标序列
for i in range(t):
    src, tgt = [], []
    for j in range(seq_len):
        ind = random.randint(0, 25)
        src.append(soundmark[ind])
        if random.random() < r:
            tgt.append(alphabet[ind])
        else:
            tgt.append(alphabet[random.randint(0, 25)])

    src_tokens.append(src)
    tgt_tokens.append(tgt)

print(src_tokens[:2])
print(tgt_tokens[:2])

src_vocab, tgt_vocab = Vocab(src_tokens), Vocab(tgt_tokens)
# 增加结尾标识<eos>
src_data = torch.tensor([src_vocab[line + ["<eos>"]] for line in src_tokens])
tgt_data = torch.tensor([tgt_vocab[line + ["<eos>"]] for line in tgt_tokens])
# :调练集和测试集比例2比t8，batch_size=16
train_size = int(len(src_data) * 0.8)
test_size = len(src_data) - train_size
batch_size = 16
train_loader = DataLoader(TensorDataset(src_data[:train_size], tgt_data[:train_size]), batch_size=batch_size)
test_loader = DataLoader(TensorDataset(src_data[-test_size:], tgt_data[-test_size:]), batch_size=1)

from tqdm import *
import matplotlib.pyplot as plt

# 设置超参数
lr = 0.001
num_epochs = 20
hidden_size = 128

# 去建立模型
encoder = Encoder(len(src_vocab), len(src_vocab), hidden_size, num_layers=2)
decoder = Decoder(len(tgt_vocab), len(tgt_vocab), hidden_size, num_layers=2)
model = SeqToSeqModel(encoder, decoder)
# 交艾熵摄失及adam优化器
criterion = nn.CrossEntropyLoss(reduction="none")
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# 舞记最损失变化
loss_history = []
model.train()
for epoch in tqdm(range(num_epochs)):
    for encoder_inputs, decoder_targets in train_loader:
        # encoder_inputs, decoder_targets = encoder_inputs, decoder_targets
        # 解格一位作为decoder的输入
        # decoder的新入第一是<bos>
        bos_column = torch.tensor([tgt_vocab["<bos>"]] * decoder_targets.shape[0]).reshape(-1, 1)
        decoder_inputs = torch.cat((bos_column, decoder_targets[:, :-1]), dim=1)
        # pred的形状为(seqlen, batch_sizts wocgb size)
        pred, _ = model(encoder_inputs, decoder_inputs)
        # decoder_targets 的形状为(batch_size,seq_len)，我们需要改变pred的形状以保证它能够正确输入#Ioss的形状为(bateh size,seqlen)，
        # 其中的每个元素都代表了一个词元的损失
        loss = criterion(pred.permute(1, 2, 0), decoder_targets).mean()
        # 反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        loss_history.append(loss.item())

plt.plot(loss_history)
plt.ylabel('train loss')
plt.show()

model.eval()
translation_results = []
correct = 0
error = 0
# 因为batch_size是l，所以据改取出来的都是单个旬子
for src_seq, tgt_seq in test_loader:
    encoder_inputs = src_seq
    hidden = model.encoder(encoder_inputs)
    pred_seq = [tgt_vocab["<bos>"]]
    for _ in range(6):
        # 一步步输出，decoder的输入的形状为(batchsize，seqlen)=(1.1)
        decoder_inputs = torch.tensor(pred_seq[-1]).reshape(1, 1)
        # pred形机为(seq len, batch size,vocab size)=(, l, vocab size)
        pred, hidden = model.decoder(decoder_inputs, hidden)
        next_token_index = pred.squeeze().argmax().item()
        if next_token_index == tgt_vocab["<eos>"]:
            break
        pred_seq.append(next_token_index)

    pred_seq = tgt_vocab[pred_seq[1:]]
    tgt_seq = tgt_seq.squeeze().tolist()

    if tgt_vocab["<eos>"] in tgt_seq:
        eos_idx = tgt_seq.index(tgt_vocab["<eos>"])
        tgt_seq = tgt_vocab[tgt_seq[:eos_idx]]
    else:
        tgt_seq = tgt_vocab[tgt_seq]
    translation_results.append((" ".join(tgt_seq), " ".join(pred_seq)))

    for i in range(len(pred_seq)):
        if i >= len(pred_seq) or pred_seq[i] != tgt_seq[i]:
            error += 1
        else:
            correct += 1

print(correct / (correct + error))
