
import argparse
import os
import sys

from timm.utils import ModelEmaV2

from orbitP.model.PatchTST import PatchTST
from orbitP.model.TimeMixer import TimeMixer
from orbitP.model.crossformer import crossformer

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'  # 临时解决方案
import pandas as pd
import joblib

from orbitP.model.PathFormer import PathFormer
from orbitP.script.DataLoader import orbitPSULTDataset
from orbitP.script.loss import WeightedMSELoss, HuberLoss, MultiStepWeightedLoss, PMLLoss, nllLoss
from orbitP.model.Transformer import Transformer
from orbitP.model.LSTM import Lstm
from orbitP.model.ITransformer import ITransformer
from orbitP.model.MLP import MLP
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import logging
import numpy as np

from tqdm import tqdm
from orbitP.script import config
from orbitP.script.util import *
from orbitP.script.plot import plot_error
import random

np.random.seed(42)
random.seed(42)
torch.manual_seed(42)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(42)

def getL2(model):
    l2_reg = 0
    for param in model.parameters():
        l2_reg += torch.norm(param, p=2) ** 2  # L2 范数的平方
    return l2_reg

def inference_SULT_Seq2One(test_dataloader, args, orbitData=None):
    deleteFile(config.lossPath + f'{args.model}_pml.txt')
    device = torch.device(config.device)
    if args.model == 'itransformer':
        model = ITransformer(args).to(device)
    elif args.model == 'lstm':
        model = Lstm(args).to(device)
    elif args.model == 'PathFormer':
        model = PathFormer(args).to(device)
    else:
        print("Model Not Found")
        sys.exit(0)
    predVec = np.empty((0, config.predicting_length, config.outputSize))
    targetVec = np.empty((0, config.predicting_length, config.outputSize))
    model.load_state_dict(torch.load(config.loadDir + config.loadModel))
    criterion = nn.MSELoss()
    test_loss = 0
    model.eval()
    with torch.no_grad():
        test_bar = tqdm(test_dataloader, total=len(test_dataloader))
        for orbitData_pre, orbitData_suf, stampData_pre, stampData_suf in test_bar:
            test_bar.set_description(f"inference")
            src = orbitData_pre.clone().to(device)  # torch.Size([288, 4, 9])
            src_mark = stampData_pre.clone().to(device)
            target = orbitData_suf.clone().to(device)
            target_mark = stampData_suf.clone().to(device)
            predList = []
            total_loss = 0
            for idx in range(target.size(1)):
                if args.model == 'itransformer':
                    pred = model(src, src_mark)
                elif args.model == 'PathFormer':
                    pred, balance_loss = model(src)
                else:
                    pred = model(src)
                predList.append(pred)
                step_loss = criterion(pred, target[:, idx:idx + 1, :config.outputSize])
                if args.model == "PathFormer":
                    step_loss = step_loss + balance_loss
                total_loss += step_loss
                if idx < config.predicting_length - 1:
                    nxt = target[:, idx:idx + 1, :].clone()
                    # nxt[:, :, :config.outputSize] = pred
                    nxt_mark = target_mark[:, idx:idx + 1, :].clone()
                    src = torch.cat((src[:, 1:, :], nxt), dim=1)
                    src_mark = torch.cat((src_mark[:, 1:, :], nxt_mark), dim=1)

            loss = total_loss / config.predicting_length
            test_bar.set_postfix({"loss": loss.detach().item()})
            test_loss += loss.detach().item()
            predList = torch.cat(predList,dim=1)
            predVec = np.concatenate((predVec,predList.detach().cpu().numpy()),axis=0)
            targetVec = np.concatenate((targetVec,target[:, :, :config.outputSize].detach().cpu().numpy()),axis=0)

        test_loss /= len(test_dataloader)
        print(f"loss_avg: {test_loss}")
        save_pred(predVec, targetVec, f"{args.model}_error")

def inference_SULT_Seq2Seq(test_dataloader, args, infType="test"):
    deleteFile(config.lossPath + f'{args.model}_pml.txt')
    device = torch.device(config.device)
    if args.model == 'itransformer':
        model = ITransformer(args).to(device)
    elif args.model == 'lstm':
        model = Lstm(args).to(device)
    elif args.model == 'PathFormer':
        model = PathFormer(args).to(device)
    elif args.model == "PatchTST":
        model = PatchTST(args).to(device)
    elif args.model == "crossformer":
        model = crossformer(args).to(device)
    elif args.model == "TimeMixer":
        model = TimeMixer(args).to(device)
    elif args.model == "MLP":
        model = MLP(args).to(device)
    elif args.model == "Transformer":
        model = Transformer(args).to(device)
    else:
        print("Model Not Found")
        sys.exit(0)
    # ema = ModelEmaV2(model, decay=0.9999, device=device)
    predVec = np.empty((0, config.predicting_length, config.outputSize))
    targetVec = np.empty((0, config.predicting_length, config.outputSize))
    model.load_state_dict(torch.load(config.loadDir + config.loadModel), strict=False)
    # ema.load_state_dict(torch.load(config.loadDir + config.loadEma))
    # criterion = WeightedMSELoss()
    # criterion = nn.MSELoss()

    if config.nll == 1:
        criterion = nllLoss()
    else:
        criterion = nn.MSELoss()
        # criterion = MultiStepWeightedLoss(
    #     loss_type="mse",       # 或 "mae"
    #     weight_strategy="linear",  # "linear" | "exp" | "uniform"
    #     tail_ratio=0.0,
    #     device="cuda"
    # )
    test_loss = 0
    model.eval()
    # ema.eval()
    with torch.no_grad():
        test_bar = tqdm(test_dataloader, total=len(test_dataloader))
        for orbitData_pre, orbitData_suf, stampData_pre, stampData_suf in test_bar:
            test_bar.set_description(f"inference")
            src = orbitData_pre.clone().to(device)  # torch.Size([288, 4, 9])
            src_mark = stampData_pre.clone().to(device)
            target = orbitData_suf.clone().to(device)
            target_mark = stampData_suf.clone().to(device)
            if args.model == 'PathFormer':
                pred, balance_loss = model(src, src_mark, target, target_mark)
            else:
                pred = model(src, src_mark, target, target_mark)

            if config.outputSize == 1:
                loss = criterion(pred, target[:, :config.predicting_length, config.outputIdx:config.outputIdx + 1])
            else:
                loss = criterion(pred, target[:, :config.predicting_length, :config.outputSize])
            test_bar.set_postfix({"loss": loss.detach().item()})
            test_loss += loss.detach().item()
            predVec = np.concatenate((predVec,pred[:,:,:config.outputSize].detach().cpu().numpy()),axis=0)

            if config.outputSize == 1:
                targetVec = np.concatenate((targetVec, target[:, :config.predicting_length, config.outputIdx:config.outputIdx+1].detach().cpu().numpy()), axis=0)
            else:
                targetVec = np.concatenate((targetVec,target[:, :config.predicting_length, :config.outputSize].detach().cpu().numpy()),axis=0)
        test_loss /= len(test_dataloader)
        print(f"loss_avg: {test_loss}")
        if infType == "test":
            save_pred(predVec, targetVec, f"{args.model}_error")
            pmlBucket(predVec, targetVec, f"{args.model}_pmlBucket")
        elif infType == "eval":
            return predVec, targetVec

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, default="itransformer")
    args = parser.parse_args()
    args = init_args(args)
    save_config(args,config.configPath)

    df_obserData = pd.read_csv(config.dataSetDir + 'df_obsData.csv')
    df_prdData = pd.read_csv(config.dataSetDir + 'df_prdData.csv')
    df_stampObs = pd.read_csv(config.dataSetDir + 'df_stampObs.csv')
    df_stampPrd = pd.read_csv(config.dataSetDir + 'df_stampPrd.csv')
    obsData = df_obserData.to_numpy(dtype=np.float32)
    prdData = df_prdData.to_numpy(dtype=np.float32)
    stampObs = df_stampObs.to_numpy(dtype=np.float32)
    stampPrd = df_stampPrd.to_numpy(dtype=np.float32)

    obsData, prdData, stampObs, stampPrd = splitDataset(obsData, prdData, stampObs, stampPrd, runType='train', sca=True)
    obsData_train = obsData[0];obsData_val = obsData[1];obsData_test = obsData[2]
    prdData_train = prdData[0];prdData_val = prdData[1];prdData_test = prdData[2]
    stampObs_train = stampObs[0];stampObs_val = stampObs[1];stampObs_test = stampObs[2]
    stampPrd_train = stampPrd[0];stampPrd_val = stampPrd[1];stampPrd_test = stampPrd[2]

    train_dataset = orbitPSULTDataset(obsData=obsData_train,prdData=prdData_train,stampObs=stampObs_train,stampPrd=stampPrd_train,training_length=config.training_length, predicting_length=config.predicting_length, forecast_window=config.forecast_window, days=obsData_train.shape[0]/config.training_length)
    val_dataset = orbitPSULTDataset(obsData=obsData_val,prdData=prdData_val,stampObs=stampObs_val,stampPrd=stampPrd_val,training_length=config.training_length, predicting_length=config.predicting_length, forecast_window=config.forecast_window, days=prdData_val.shape[0]/config.predicting_length)
    test_dataset = orbitPSULTDataset(obsData=obsData_test,prdData=prdData_test,stampObs=stampObs_test,stampPrd=stampPrd_test,training_length=config.training_length, predicting_length=config.predicting_length, forecast_window=config.forecast_window, days=prdData_test.shape[0]/config.predicting_length)

    train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=0, shuffle=True, pin_memory=True)
    val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False, pin_memory=True)
    test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False, pin_memory=True)

    # inference_SULT_Seq2Seq(train_dataloader, args, infType="test", orbitData=MOEORBData_train)
    inference_SULT_Seq2Seq(test_dataloader, args, infType="test")
