import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import pandas as pd
import pickle


# 语料库
class wzbDataset(Dataset):
    def __init__(self, en_data, ch_data, en_word_2_index, ch_word_2_index):
        self.en_data = en_data
        self.ch_data = ch_data
        self.en_word_2_index = en_word_2_index
        self.ch_word_2_index = ch_word_2_index

    # 一条数据怎么处理
    def __getitem__(self, index):
        en_sen = self.en_data[index]
        ch_sen = self.ch_data[index]
        en_index = [self.en_word_2_index[e] for e in en_sen]
        ch_index = [self.ch_word_2_index[c] for c in ch_sen]
        return en_index, ch_index

    # 让dataloader的数据填充流向这里
    # 因为长度不一，且最长的句子长度不短，所以我们需要按照一个batch的最大长度进行填充，及时止损。
    # 因为是要预测是什么，所以第一个位置不能是中文，所以我们需要让第一个位置
    def batch_data_process(self, batch_datas):
        global device
        en_index = []
        ch_index = []
        en_len = []
        ch_len = []
        max_en_len = -1
        max_ch_len = -1
        for en, ch in batch_datas:
            en_index.append(en)
            ch_index.append(ch)
            # [[], [], []]
            len_en = len(en)
            len_ch = len(ch)
            en_len.append(len_en)
            ch_len.append(len_ch)
            if len_en > max_en_len:
                max_en_len = len_en
            if len_ch > max_ch_len:
                max_ch_len = len_ch

        # 填充至一个batch中的最长
        en_index = [ ei + [self.en_word_2_index["<PAD>"]] * (max_en_len - len(ei)) for ei in en_index]
        ch_index = [ [self.ch_word_2_index["<BOS>"]] + ci + [self.ch_word_2_index["<EOS>"]] + [self.ch_word_2_index["<PAD>"]] * (max_ch_len - len(ci)) for ci in ch_index]

        en_index = torch.tensor(en_index, device=device)
        ch_index = torch.tensor(ch_index, device=device)

        return en_index, ch_index

    def __len__(self):
        assert len(self.en_data) == len(self.ch_data)
        return len(self.en_data)


class Encoder(nn.Module):
    def __init__(self, encoder_embedding_num, encoder_hidden_num, en_corpus_len):
        super().__init__()
        self.embedding = nn.Embedding(en_corpus_len, encoder_embedding_num)
        self.lstm = nn.LSTM(encoder_embedding_num, encoder_hidden_num, batch_first=True)

    def forward(self, en_index):
        en_embedding = self.embedding(en_index)
        encoder_output, encoder_hidden = self.lstm(en_embedding)
        return encoder_hidden


class Decoder(nn.Module):
    def __init__(self, decoder_embedding_num, decoder_hidden_num, ch_corpus_len):
        super().__init__()
        self.embedding = nn.Embedding(ch_corpus_len, decoder_embedding_num)
        self.lstm = nn.LSTM(decoder_embedding_num, decoder_hidden_num, batch_first=True)

    def forward(self, decoder_input, hidden):
        embedding = self.embedding(decoder_input)
        decoder_output, decoder_hidden = self.lstm(embedding, hidden)
        return decoder_output, decoder_hidden


class Seq2seq(nn.Module):
    def __init__(self, encoder_embedding_num, encoder_hidden_num, en_corpus_len, decoder_embedding_num, decoder_hidden_num, ch_corpus_len):
        super().__init__()
        self.encoder = Encoder(encoder_embedding_num, encoder_hidden_num, en_corpus_len)
        self.decoder = Decoder(decoder_embedding_num, decoder_hidden_num, ch_corpus_len)
        # 经过线性的分类器
        self.classifier = nn.Linear(decoder_hidden_num, ch_corpus_len)
        self.cross_loss = nn.CrossEntropyLoss()

    def forward(self, en_index, ch_index):
        # 我们decoder的input部分就不要最后一个，而就不要第一个的是label
        decoder_input = ch_index[:, :-1]
        label = ch_index[:, 1:]
        encoder_hidden = self.encoder(en_index)
        decoder_output, decoder_hidden = self.decoder(decoder_input, encoder_hidden)
        pre_ans = self.classifier(decoder_output)
        # 将二者前两维都合并成一维
        loss = self.cross_loss(pre_ans.reshape(-1, pre_ans.shape[-1]), label.reshape(-1))
        return loss


# num: number of return
def get_datas(num=-1):
    received_datas = pd.read_csv('./datas/translate.csv')
    en_datas = list(received_datas["english"])
    ch_datas = list(received_datas["chinese"])
    # print(en_datas)
    # print(ch_datas)
    if num == -1:
        return en_datas, ch_datas
    else:
        return en_datas[:num], ch_datas[:num]


def translate(sentence):
    global ch_word_2_index, ch_index_2_word, en_word_2_index, model, device
    en_index = [en_word_2_index[se] for se in sentence]
    en_index = torch.tensor([en_index], device=device)
    result = ""
    encoder_hidden = model.encoder(en_index)
    decoder_input = [ch_word_2_index["<BOS>"]]
    decoder_input = torch.tensor([decoder_input], device=device)
    decoder_hidden = encoder_hidden
    step = 0

    # 直到预测到结尾
    while True:
        decoder_output, decoder_hidden = model.decoder(decoder_input, decoder_hidden)

        pre_ans = model.classifier(decoder_output)
        word_index = int(torch.argmax(pre_ans, dim=-1))
        word = ch_index_2_word[word_index]

        if word == "<EOS>" or step > 50:
            break
        result += word
        step += 1

        decoder_input = torch.tensor([[word_index]], device=device)

    return result


with open('./datas/en1.vec', 'rb') as f:
    # print(f)
    en_word_2_index = pickle.load(f)
    en_index_2_word = pickle.load(f)
    # print(_)
    # print(en_word_2_index)
    # print(en_index_2_word, len(en_index_2_word))

with open('./datas/ch1.vec', 'rb') as f:
    # print(f)
    ch_word_2_index = pickle.load(f)
    ch_index_2_word = pickle.load(f)
    # print(_)
    # print(ch_word_2_index)
    # print(ch_index_2_word, len(ch_index_2_word))
en_datas, ch_datas = get_datas()
ch_corpus_len = len(ch_index_2_word)
en_corpus_len = len(en_index_2_word)
# “<pad>”（padding）符号用来添加在较短序列后，直到每个序列等长，而“<bos>”和“<eos>”符号分别表示序列的开始和结束。
ch_word_2_index.update({"<PAD>": ch_corpus_len, "<BOS>": ch_corpus_len + 1, "<EOS>": ch_corpus_len + 2})
en_word_2_index.update({"<PAD>": en_corpus_len})
ch_index_2_word.append("<PAD>")
ch_index_2_word.append("<BOS>")
ch_index_2_word.append("<EOS>")
en_index_2_word.append("<PAD>")
ch_corpus_len += 3
en_corpus_len += 1

device = "cuda:0" if torch.cuda.is_available() else "cpu"
# 因为英文不算很多，所以维度小一点
encoder_embedding_num = 50
encoder_hidden_num = 100

# 两者的hidden维度尽可能一样
decoder_embedding_num = 108
decoder_hidden_num = 100
batch_size = 3  # BATCH_SIZE即一次训练所抓取的数据样本数量
epoch = 100
lr = 0.001

dataset = wzbDataset(en_datas, ch_datas, en_word_2_index, ch_word_2_index)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, collate_fn=dataset.batch_data_process)
model = Seq2seq(encoder_embedding_num, encoder_hidden_num, en_corpus_len, decoder_embedding_num, decoder_hidden_num, ch_corpus_len)
model = model.to(device)
opt = torch.optim.Adam(model.parameters(), lr=lr)

for e in range(epoch):
    # dataloader在循环中会每次调用dataset的getitem
    for en_index, ch_index in dataloader:
        loss = model(en_index, ch_index)
        loss.backward()
        opt.step()
        opt.zero_grad()
    print(f"loss:{loss: .7f}")

while True:
    s = input()
    ans = translate(s)
    print(ans)
