import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import os
import numpy as np
import matplotlib.pyplot as plt
from structAIDetect.modeltrain.modaldataset import ModalDataset
from structAIDetect.modeltrain.disinfermodel import DisplaceInterpolator
import argparse


SEQ_LEN = 1      # 输入序列长度
PRED_LEN = 1      # 预测序列长度
HIDDEN_SIZE = 128  # LSTM隐藏层维度
BATCH_SIZE = 16
EPOCHS = 2000
LR = 0.001
NUMLAYERS = 1
NUM_SAMPLES = 200
NUM_FM_RATE = 10
NUM_STEP = 5
TOTAL_MODAL_NUM = 4
INPUT_LEN = 44





def train_model(model, trainloader, criterion, optimizer, seq_len, pred_len):
    model.train()
    losses = 0
    for modalinfo, cellstate in trainloader:
        optimizer.zero_grad()
        targets = modalinfo[:, seq_len:seq_len + pred_len, TOTAL_MODAL_NUM:]
        # inputs = modalinfo[:, :seq_len, :]
        pred_result = torch.zeros(modalinfo.shape[0], pred_len, TOTAL_MODAL_NUM * 10)
        h_t = torch.zeros(NUMLAYERS, modalinfo.shape[0], HIDDEN_SIZE)
        c_t = torch.zeros(NUMLAYERS, modalinfo.shape[0], HIDDEN_SIZE)
        # outputs, (h_t, c_t) = model(inputs, (h_t, c_t))
        next_input = modalinfo[:, :seq_len, TOTAL_MODAL_NUM:]
        for seq_idx in range(pred_len):
            next_input = torch.concat((targets[:, [0], :TOTAL_MODAL_NUM], next_input), axis=2)
            outputs, (h_t, c_t) = model(next_input, (h_t, c_t))
            next_input = outputs.unsqueeze(1)
            pred_result[:, [seq_idx], :] = outputs.unsqueeze(1)

        loss = criterion(pred_result, targets)
        loss.backward()

        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        losses += loss
        optimizer.step()

    return losses





def predict_loss(model, testloader, criterion, seq_len, pred_len):
    model.eval()
    losses = 0
    with torch.no_grad():
        for modalinfo, cellstate in testloader:
            modalinfo_edge = modalinfo[:, [0], :]
            targets = modalinfo[:, seq_len:seq_len + pred_len, TOTAL_MODAL_NUM:]
            pred_result = torch.zeros([modalinfo_edge.shape[0], pred_len, modalinfo_edge.shape[2] - TOTAL_MODAL_NUM])
            h_t = torch.zeros(NUMLAYERS, modalinfo_edge.shape[0], HIDDEN_SIZE)
            c_t = torch.zeros(NUMLAYERS, modalinfo_edge.shape[0], HIDDEN_SIZE)
            next_input = modalinfo[:, :seq_len, TOTAL_MODAL_NUM:]
            for pred_idx in range(pred_len):
                next_input = torch.concat((targets[:, [0], :TOTAL_MODAL_NUM], next_input), axis=2)
                outputs, (h_t, c_t) = model(next_input, (h_t, c_t))
                next_input = outputs.unsqueeze(1)
                pred_result[:, [pred_idx], :] = outputs.unsqueeze(1)
            
            loss = criterion(pred_result, targets)
            losses += loss

    return losses


def replace_dataloader(old_loader, NewDataset, new_config):
    if old_loader is not None:
        if hasattr(old_loader, '_workers'):
            old_loader._shutdown_workers()  # 终止worker
        del old_loader  # 删除引用
    
    new_dataset = NewDataset(*new_config)
    new_loader = DataLoader(new_dataset, batch_size=BATCH_SIZE, shuffle=True)
    
    return new_loader, new_dataset


def data_random_check(train_data):
    for i in range(10):
        input, target, _ = train_data[np.random.randint(0, len(train_data))]
        plt.figure()
        plt.plot(np.concatenate((input, target)))
        plt.show()


def main():
    parser = argparse.ArgumentParser
    parser.add_argument('--epochs', type=int, default=2000, help='输入训练轮次')
    args = parser.parse_args()
    EPOCHS = args.epochs

    datapath = os.path.join(os.getcwd(), "data", "norm")

    # 初始化模型和优化器
    model = DisplaceInterpolator(input_size=INPUT_LEN, hidden_size=HIDDEN_SIZE, num_layers=NUMLAYERS)
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=LR)

    # 训练
    train_data = ModalDataset(datapath, train_test='train')
    test_data = ModalDataset(datapath, train_test='test')
    modalinfo, cellstate = train_data[0]
    total_len = modalinfo.shape[0]
    seq_len_list = np.linspace(1, total_len - 10, NUM_STEP, dtype=int)[::-1]

    # 训练数据初始化
    train_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
    test_loader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True)

    seq_len = 1
    train_losses = []
    test_losses = []
    for epoch in range(EPOCHS):
        train_loss = train_model(model, train_loader, criterion, optimizer, 
                            seq_len, total_len - seq_len)
        train_losses.append(train_loss.item())
        test_loss = predict_loss(model, test_loader, criterion, seq_len, total_len - seq_len)
        test_losses.append(test_loss.item())
        print(f"Epoch {epoch+1}, Train Loss: {train_loss:.4f}")
        print(f"Epoch {epoch+1}, Test Loss: {test_loss:.4f}")

    plt.figure()
    plt.plot(train_losses)
    plt.title("Training Loss")
    plt.show()
    plt.figure()
    plt.plot(test_losses)
    plt.title("Test Loss")
    plt.show()

    savepath = os.path.join(os.getcwd(), 'model')
    torch.save(model.state_dict(), os.path.join(savepath, 'displace_predict_para.pth'))


if __name__ == '__main__':
    main()