import json
import os.path
import random
import re
import time
from datetime import datetime

import torch
import wandb
import numpy as np
from datasets import Dataset, DatasetDict
from torch import nn
from transformers import DataCollatorForSeq2Seq, Seq2SeqTrainer, AutoTokenizer, \
    T5ForConditionalGeneration, T5Config, Seq2SeqTrainingArguments
from torchsummary import summary

from model.pre_trained import en_embedding, cn_embedding
from seq2seq_baseline.t5.my_trainer import MyTrainer

source_lang = "src"
target_lang = "dst"
prefix = "translate src to dst: "


def load_datasets(dir_path):
    train_path = os.path.join(dir_path, "train.txt")
    test_path = os.path.join(dir_path, "test.txt")

    id_count = 0
    train_dict = {"id": [], "translation": []}
    test_dict = {"id": [], "translation": []}

    with open(train_path, "r", encoding="utf-8") as f:
        lines = f.readlines()
        for line in lines:
            arr = line.strip().split("\t")
            train_dict["id"].append(id_count)
            train_dict["translation"].append({
                "src": arr[0],
                "dst": arr[1]
            })
            id_count += 1

    with open(test_path, "r", encoding="utf-8") as f:
        lines = f.readlines()
        for line in lines:
            arr = line.strip().split("\t")
            test_dict["id"].append(id_count)
            test_dict["translation"].append({
                "src": arr[0],
                "dst": arr[1]
            })
            id_count += 1

    train_dataset = Dataset.from_dict(train_dict)
    test_dataset = Dataset.from_dict(test_dict)

    datasets = DatasetDict({
        "train": train_dataset,
        "test": test_dataset
    })

    vocabs = {}
    n_words = 0
    for key, dataset in datasets.items():
        for example in dataset["translation"]:
            for word in re.split(r"\s+", example[source_lang]):
                idx = vocabs.setdefault(word, n_words)
                if idx == n_words:
                    n_words += 1
            for word in re.split(r"\s+", example[target_lang]):
                idx = vocabs.setdefault(word, n_words)
                if idx == n_words:
                    n_words += 1
    vocabs = list(vocabs.keys())

    return datasets, vocabs


def get_tokenizer(checkpoint, vocabs):
    old_tokenizer = AutoTokenizer.from_pretrained(checkpoint)
    # print(old_tokenizer.vocab)
    tokenizer = old_tokenizer.train_new_from_iterator([], len(vocabs))
    tokenizer.add_tokens(vocabs)
    # print(tokenizer.vocab)

    return tokenizer


def save_log(dir_path, test_acc: list, perplexity_l: list, config: dict = None):
    test_acc_file = os.path.join(dir_path, "test_acc.txt")
    config_file = os.path.join(dir_path, "config.txt")
    perplexity_file = os.path.join(dir_path, "perplexity.txt")

    with open(test_acc_file, 'w', encoding='utf-8') as f:
        for i, acc in enumerate(test_acc):
            f.write("%d\t%f\n" % (i, acc))

    with open(perplexity_file, 'w', encoding='utf-8') as f:
        for i, perplexity in enumerate(perplexity_l):
            f.write("%d\t%f\n" % (i, perplexity))

    with open(config_file, 'w', encoding='utf-8') as f:
        if config is not None:
            f.write(json.dumps(config, indent=2))


def postprocess_text(preds, labels):
    preds = [pred.strip() for pred in preds]
    labels = [label.strip() for label in labels]

    return preds, labels


def train(datasets_path, seed=0, pretrained=False, lang="en", max_len=20, n_epochs=400, learning_rate=0.0002):
    test_acc = []
    perplexity_l = []
    checkpoint = "t5-small"
    datasets, vocabs = load_datasets(datasets_path)
    tokenizer = get_tokenizer(checkpoint, vocabs)
    # 创建一个新的T5配置
    config = T5Config.from_pretrained("t5-small")
    config.vocab_size = tokenizer.vocab_size + len(vocabs)
    config.d_model = 128  # Size of the encoder layers and the pooler layer.
    config.d_kv = 128  # Size of the key, query, value projections per attention head. The inner_dim of the projection layer will be defined as num_heads * d_kv.
    config.d_ff = 128  # Size of the intermediate feed forward layer in each T5Block.
    config.num_layers = 1  # Number of hidden layers in the Transformer encoder.
    config.num_decoder_layers = 1  # Number of hidden layers in the Transformer decoder. Will use the same value as num_layers if not set.
    config.num_heads = 2  # Number of attention heads for each attention layer in the Transformer encoder.
    config.dropout_rate = 0.1
    config.max_length = max_len

    # encoded = tokenizer.encode("装配区电池连接器4电池连接器公母匹配")
    # print(encoded)
    # decoded = tokenizer.decode(encoded)
    # print(decoded)
    # print(tokenizer.vocab_size)
    # print(tokenizer.vocab)

    def compute_metrics(eval_preds):
        logits, labels = eval_preds

        # ############################# 计算困惑度 ###############################
        seq_criterion = nn.NLLLoss(reduction="sum")
        total_words = 0
        total_loss = 0
        logits_tensor = torch.from_numpy(logits)
        labels_tensor = torch.from_numpy(labels)
        # calculate loss
        seq_loss = seq_criterion(
            logits_tensor.view(-1, logits_tensor.size(-1)),
            labels_tensor.view(-1)
        )
        total_loss += seq_loss
        total_words += labels_tensor.numel()

        perplexity = np.exp(total_loss / total_words)
        perplexity_l.append(perplexity)

        # ############################## 计算精度 #################################
        topi = np.argmax(logits, axis=-1)
        preds = topi.squeeze()

        decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
        labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
        decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

        decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
        acc_total = 0
        for i, pred in enumerate(decoded_preds):
            if pred == decoded_labels[i]:
                acc_total += 1
        test_acc.append(acc_total / len(decoded_preds))

        return {
            "acc_rate": acc_total / len(decoded_preds),
            "perplexity": perplexity
        }

    def preprocess_logits_for_metrics(logits, lables):
        logits = nn.functional.log_softmax(logits[0], dim=-1)
        return logits

    def preprocess_function(examples):
        inputs = [example[source_lang] for example in examples["translation"]]
        targets = [example[target_lang] for example in examples["translation"]]
        model_inputs = tokenizer(inputs, text_target=targets, max_length=max_len, truncation=True, padding="max_length")
        return model_inputs

    tokenized_datasets = datasets.map(preprocess_function, batched=True)
    data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint)

    model = None
    if pretrained:
        config.d_model = 768
        # 创建一个新的Bart模型
        model = T5ForConditionalGeneration(config)

        all_vocabs = []
        for vocab, id in tokenizer.vocab.items():
            all_vocabs.insert(id, vocab)

        if lang == "en":
            pre_trained_embedded = en_embedding.get_embeddings(all_vocabs)
        elif lang == "cn":
            pre_trained_embedded = cn_embedding.get_embeddings(all_vocabs)
        else:
            raise RuntimeError("The lang is in valid.")
        model.base_model.encoder.embed_tokens.weight.data = pre_trained_embedded
    else:
        config.d_model = 128
        # 创建一个新的Bart模型
        model = T5ForConditionalGeneration(config)

    summary(model)

    training_args = Seq2SeqTrainingArguments(
        output_dir=r"A:\projects\doing\KEA2T-final\seq2seq_baseline\t5\log",
        evaluation_strategy="epoch",
        save_strategy="epoch",
        learning_rate=0.00005,
        per_device_train_batch_size=16,
        per_device_eval_batch_size=16,
        weight_decay=0.001,
        save_total_limit=5,
        num_train_epochs=n_epochs,
        predict_with_generate=False,
        load_best_model_at_end=True,
        use_cpu=True,
        seed=seed
    )

    trainer = MyTrainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["test"],
        tokenizer=tokenizer,
        data_collator=data_collator,
        compute_metrics=compute_metrics,
        preprocess_logits_for_metrics=preprocess_logits_for_metrics
    )

    run = wandb.init(project="t5")
    trainer.train()

    now = datetime.now()
    now_str = now.strftime('%Y-%m-%d-%H-%M-%S')
    full_log_dir = os.path.join(r"A:\projects\doing\KEA2T-final\seq2seq_baseline\log", 't5-log' + now_str)
    if not os.path.exists(full_log_dir):
        os.makedirs(full_log_dir)
    save_log(full_log_dir, test_acc, perplexity_l, {
        "datasets_path": datasets_path,
        "seed": seed,
        "pretrained": pretrained,
        "lang": lang,
        "max_len": max_len,
        "n_epochs": n_epochs,
        "learning_rate": learning_rate
    })


if __name__ == "__main__":
    config = {
        "seed": 0,
        "datasets_path": r"A:\projects\doing\KEA2T-final\datasets\datasets4\seq2seq",
        "pretrained": False,
        "lang": "en",
        "max_len": 20,
        "n_epochs": 400,
        "learning_rate": 0.00005
    }
    train(**config)
