import os

import jieba
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class MyCustomDataset(Dataset):
    def __init__(self, data, arg):
        self.data = data
        self.config = arg

    def __getitem__(self, index):
        if index < len(self.data):
            return self.data[index]
        else:
            # 重复的样本是索引小于数据集原始大小的样本
            repeat_index = index % len(self.data)
            return self.data[repeat_index]

    def __len__(self):
        # 返回数据集中样本的数量
        total_samples = (len(self.data) // self.config.batch_size) * self.config.batch_size
        return total_samples


def readfile():
    # 指定Excel文件路径
    file_path = './data/e2c.xlsx'  # 请将此路径替换为您的Excel文件的实际路径

    # 使用pandas读取Excel文件，假设我们要读取的列分别在第一列和第二列
    df = pd.read_excel(file_path)

    # 假设第一列英文数据在列名 'English' 下，第二列中文数据在列名 'Chinese' 下
    english_column = df.iloc[:, 0].tolist()  # 使用iloc获取第一列数据并转换为列表
    chinese_column = df.iloc[:, 1].tolist()  # 使用iloc获取第二列数据并转换为列表

    return english_column, chinese_column


def tokenizer(sentence):
    token = []
    for data in tqdm(sentence, ncols=50):
        token.append([e for e in list(jieba.cut(data)) if e != " "] + ["<EOS>"])
    return token


def bulid_vocab(data, savename):
    if os.path.isfile(f"./data/vocab/vocab_{savename}.txt"):
        with open(f"./data/vocab/vocab_{savename}.txt", "r", encoding="utf8") as f:
            lines = f.readlines()
            vocab = []
            for line in lines:
                vocab.append(line.strip("\n"))
            return vocab
    # 假设data是你的二维列表，其中包含了分词好的文本数据
    Tag = ["<EOS>", "<SOS>", "<PAD>"]
    # 初始化一个空集合来存储词汇
    vocabulary = set()
    # 遍历二维列表中的每个段落
    for paragraph in tqdm(data):
        # 将段落中的每个单词添加到词汇集合中
        vocabulary.update(paragraph)

    # 将集合转换为列表并排序
    vocabulary = list(vocabulary)
    for t in Tag:
        vocabulary.insert(0, t)
    if not os.path.isfile(f"./data/vocab/vocab_{savename}.txt"):
        # 如果需要，可以保存词汇表到文件
        with open(f'./data/vocab/vocab_{savename}.txt', 'w', encoding="utf8") as f:
            for word in vocabulary:
                f.write(f"{word}\n")

    return vocabulary


def mapping(vocab, Data):
    map_data = []
    max_len = 0
    for data in tqdm(Data):
        sentence = []
        for d in data:
            sentence.append(vocab.index(d))
        map_data.append(sentence)
        if len(sentence) > max_len: max_len = len(sentence)
    return map_data, max_len


def dataset():
    print("读取文件")
    en_list, ch_list = readfile()
    print("英文分词")
    en_list_tokenizer = tokenizer(en_list)
    print("中文分词")
    ch_list_tokenizer = tokenizer(ch_list)
    print("英文词汇表建立")
    vocab_en = bulid_vocab(en_list_tokenizer, "en")
    print("中文词汇表建立")
    vocab_ch = bulid_vocab(ch_list_tokenizer, "ch")
    print("英文映射单词")
    vector_en, en_max_len = mapping(vocab_en, en_list_tokenizer)

    print("中文映射单词")
    vector_ch, ch_max_len = mapping(vocab_ch, ch_list_tokenizer)

    return list(zip(vector_en, vector_ch)), vocab_en, vocab_ch, en_max_len, ch_max_len


def tensor_padding(tensor_list, seq_len):
    # 填充前两个张量
    padded_tensors = []
    for tensor in tensor_list:
        padding = (0, seq_len - len(tensor))  # 在末尾填充1
        padded_tensor = torch.nn.functional.pad(tensor, padding, mode='constant', value=0)
        padded_tensors.append(padded_tensor)
    return padded_tensors


def dateset2loader(config, dataset, en_max_len, ch_max_len):
    # 加载数据集合，转换为张量
    def collate_batch(batch):
        en_list_tokenizer, ch_list_tokenizer = [], []
        for (en_text, ch_text) in batch:
            processed_text = torch.tensor(en_text, dtype=torch.int64)
            en_list_tokenizer.append(processed_text)
            processed_text = torch.tensor(ch_text, dtype=torch.int64)
            ch_list_tokenizer.append(processed_text)

        seq_len1 = en_max_len
        seq_len2 = ch_max_len

        # 每一个batch里统一长度
        en_batch_seq = torch.stack(tensor_padding(en_list_tokenizer, seq_len1))
        ch_batch_seq = torch.stack(tensor_padding(ch_list_tokenizer, seq_len2))

        return en_batch_seq, ch_batch_seq

    train_dataloader = DataLoader(dataset, batch_size=config.batch_size, shuffle=False, collate_fn=collate_batch)

    return train_dataloader


def BLEU(predict, ground):
    pass


def arc_mapping(predict_idx, vocab):
    rel = []
    for sentence in predict_idx:
        temp_sentence = []
        for word in sentence:
            temp_sentence.append(vocab[word])
        rel.append(temp_sentence)
    return rel


def train(config, dataloader, vocab_en, vocab_ch, ch_max_len):
    import torch.optim as optim
    from model import Seq2Seq
    model = Seq2Seq(
        len(vocab_en),
        len(vocab_ch),
        config.embedding_size,
        config.hidden_size,
        config.num_layers,
        config.batch_size,
        ch_max_len,
        vocab_ch,
    )
    # model.load_state_dict(torch.load("./models/seq2seq_model.pth"))
    optimizer = optim.Adam(model.parameters(), config.optim_l_r)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[40], gamma=0.1)
    criterion = nn.NLLLoss(ignore_index=vocab_ch.index("<PAD>"), reduction="mean")

    def get_loss(decoder_outputs, target):
        decoder_outputs = torch.log(decoder_outputs)
        # torch.nn.CrossEntropyLoss==torch.nn.NLLLoss+torch.nn.LogSoftmax，因此这里需要取对数
        target = target.view(-1)  # [batch_size*max_len]
        decoder_outputs = decoder_outputs.view(config.batch_size * ch_max_len, -1)

        return criterion(decoder_outputs, target)

    model.train()
    model.to(device)
    min_loss = float("inf")
    for epoch in range(config.epochs):
        total_loss = 0
        correct = 0
        total = 0
        loss_temp = []
        progress_bar = tqdm(total=len(dataloader), desc='Train Epoch {}'.format(epoch), unit='batch')
        for idx, (en_text, ch_text) in enumerate(dataloader):
            optimizer.zero_grad()
            en_text = en_text.to(device)
            ch_text = ch_text.to(device)
            output, hidden = model(en_text)
            loss_t = get_loss(output, ch_text)

            total_loss += loss_t.item()
            loss_t.backward()
            optimizer.step()

            _, predicted = torch.max(output.data, 2)

            correct += (predicted == ch_text).sum().item()
            total += ch_text.size(0) * ch_text.size(1)
            acc = 100 * correct / total
            avg_loss = total_loss / (idx + 1)
            loss_temp.append(avg_loss)
            progress_bar.set_postfix({'loss': avg_loss, 'acc': '{:.2f}%'.format(acc)})
            progress_bar.update()
            if idx == 0:
                predict = arc_mapping(predicted, vocab_ch)
                ground = arc_mapping(ch_text, vocab_ch)
                print("真实：", end="")
                for g in ground[-1]:
                    if g == "<EOS>": break
                    print(g, end="")
                print()
                print("预测：", end="")
                for p in predict[-1]:
                    if p == "<EOS>": break
                    print(p, end="")
                print()
                print(loss_t.item())

        if (sum(loss_temp) / len(loss_temp)) < min_loss:
            torch.save(model.state_dict(), "models/seq2seq_model.pth")
        progress_bar.close()
        scheduler.step()


class configs():
    def __init__(self):
        self.batch_size = 256
        self.num_layers = 1
        self.hidden_size = 128
        self.embedding_size = 32
        self.optim_l_r = 1e-3
        self.epochs = 50


config = configs()

train_set, vocab_en, vocab_ch, en_max_len, ch_max_len = dataset()
dataset = MyCustomDataset(train_set, config)
dataloader = dateset2loader(config, dataset, en_max_len, ch_max_len)

train(config, dataloader, vocab_en, vocab_ch, ch_max_len)
