import json
import os
import time
from datetime import datetime

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchsummary import summary

from model.ket2a.ket2a_processor import KET2AProcessor


def test_epoch(dataloader, model):
    model.eval()
    with torch.no_grad():
        total_correct_n = 0
        total_n = 0
        seq_criterion = nn.NLLLoss(reduction="sum")
        total_words = 0
        total_loss = 0

        for data in dataloader:
            inputs, outputs, targets = data
            decoder_outputs = model(inputs)

            # compute perplexity
            # calculate loss
            seq_loss = seq_criterion(
                decoder_outputs.view(-1, decoder_outputs.size(-1)),
                outputs.view(-1)
            )
            total_loss += seq_loss
            total_words += outputs.numel()

            # calculate accuracy on the test dataset
            _, topi = decoder_outputs.topk(1)
            decoder_outputs = topi.squeeze()
            correct_num = torch.eq(decoder_outputs, outputs).all(dim=-1).sum()

            total_n += decoder_outputs.shape[0]
            total_correct_n += correct_num

            # print(decoder_outputs)
            # print(outputs)
            # print("===============")

    return total_correct_n / total_n, np.exp(total_loss / total_words)


# 定义一个简单的 LSTM 模型
class Seq2SeqLSTM(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_words, num_layers, dropout):
        super(Seq2SeqLSTM, self).__init__()
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers

        self.embeddings = nn.Embedding(num_words, input_dim)
        # self.embeddings = nn.Embedding.from_pretrained(init_seq_in_embedded)

        # LSTM 层
        self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers=num_layers, dropout=dropout, batch_first=True)

        # 线性层用于生成输出序列
        self.fc = nn.Linear(hidden_dim, output_dim)

    def forward(self, src):
        # src 是输入序列，shape: (seq_len, batch_size, input_dim)
        embeddeds = self.embeddings(src)
        output, (hidden, cell) = self.lstm(embeddeds)

        # output 包含 LSTM 每个时间步的输出
        # 我们可以选择使用最后一个时间步的输出作为最终的输出
        prediction = self.fc(output)
        prediction = nn.functional.log_softmax(prediction, dim=-1)

        return prediction


def save_log(dir_path, train_acc: list, test_acc: list, epochs_perplexity: list, config: dict = None):
    train_acc_file = os.path.join(dir_path, "train_acc.txt")
    perplexity_file = os.path.join(dir_path, "perplexity.txt")
    test_acc_file = os.path.join(dir_path, "test_acc.txt")
    config_file = os.path.join(dir_path, "config.txt")

    with open(train_acc_file, 'a', encoding='utf-8') as f:
        for i, acc in enumerate(train_acc):
            f.write("%d\t%f\n" % (i, acc))

    with open(test_acc_file, 'a', encoding='utf-8') as f:
        for i, acc in enumerate(test_acc):
            f.write("%d\t%f\n" % (i, acc))
    with open(config_file, 'a', encoding='utf-8') as f:
        if config is not None:
            f.write(json.dumps(config, indent=2))

    with open(perplexity_file, 'a', encoding='utf-8') as f:
        for i, perplexity in enumerate(epochs_perplexity):
            f.write("%d\t%f\n" % (i, perplexity))


def train(config):
    # ###################### dataset #########################
    # init datasets
    dataset = KET2AProcessor(config["datasets"],
                             seq_max_len=config["seq_max_len"],
                             batch_size=config["batch_size"])
    train_dataloader = dataset.seq2seq_dataset.train
    test_dataloader = dataset.seq2seq_dataset.test

    # ###################### log #########################
    start = time.time()
    now = datetime.now()
    now_str = now.strftime('%Y-%m-%d-%H-%M-%S')
    full_log_dir = os.path.join(r"A:\projects\doing\KEA2T-final\seq2seq_baseline\log", 'lstm-log' + now_str)
    if not os.path.exists(full_log_dir):
        os.makedirs(full_log_dir)

    # 定义模型的输入维度、隐藏层维度和输出维度
    num_words = dataset.seq2seq_dataset.in_words.n_words
    output_dim = dataset.seq2seq_dataset.out_words.n_words
    input_dim = config["in_dim"]  # 输入序列的特征维度
    hidden_dim = config["hid_dim"]  # LSTM 隐藏层维度

    # print(dataset.seq2seq_dataset.out_words.n_words)

    # 创建模型
    model = Seq2SeqLSTM(input_dim, hidden_dim, output_dim, num_words=num_words, num_layers=config["num_layers"], dropout=config["dropout"])

    summary(model)

    # 定义损失函数和优化器
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.parameters(), lr=config["learning_rate"])

    # 训练循环
    num_epochs = config["num_epochs"]

    epochs_test_acc = []
    epochs_perplexity = []
    epochs_train_acc = []

    for epoch in range(num_epochs):
        model.train()
        total_loss = 0.0
        correct_num = 0
        total_n = 0

        for data in train_dataloader:  # train_data 包含输入序列和目标序列

            optimizer.zero_grad()

            inputs, outputs, targets = data

            output = model(inputs)

            loss = criterion(
                output.view(-1, output.size(-1)),
                outputs.view(-1)
            )

            # calculate accuracy on the test dataset
            _, topi = output.topk(1)
            output = topi.squeeze()
            correct_num += torch.eq(output, outputs).all(dim=-1).sum()
            total_n += output.shape[0]
            # print(output)
            # print(outputs)
            # print("=========================")

            loss.backward()
            optimizer.step()
            total_loss += loss.item()

        test_acc, perplexity = test_epoch(test_dataloader, model)
        train_acc = correct_num / total_n

        epochs_train_acc.append(train_acc)
        epochs_test_acc.append(test_acc)
        epochs_perplexity.append(perplexity)
        avg_loss = total_loss / len(train_dataloader)

        print(
            f"Epoch [{epoch + 1}/{num_epochs}] - Loss: {avg_loss:.4f} -"
            f" Train Acc: {train_acc} Test Acc: {test_acc:.4f} - Perplexity: {perplexity:.4f}")

    save_log(full_log_dir, epochs_train_acc, epochs_test_acc, epochs_perplexity, config)


if __name__ == "__main__":
    config = {
        "seed": 0,
        "datasets": r"A:\projects\doing\KEA2T-final\datasets\datasets4",
        "batch_size": 16,
        "seq_max_len": 10,
        "in_dim": 32,
        "hid_dim": 32,
        "learning_rate": 0.001,
        "dropout": 0.1,
        "num_layers": 1,
        "num_epochs": 500
    }
    train(config)
