import os
import sys

import torch
import torch.nn as nn
import random
import argparse
import pandas as pd
import numpy as np
from tqdm import tqdm

from orbitP.model.ITransformer import ITransformer
from orbitP.model.LSTM import Lstm
from orbitP.model.PathFormer import PathFormer
from orbitP.script import config
from orbitP.script.DataLoader import orbitPSULTDataset
from orbitP.script.test import inference_SULT_Seq2Seq
from orbitP.script.util import init_args, save_config, splitDataset, save_multiEval, save_pred_allRmse, RMSE, \
    scalerInver, get_3DError
from torch.utils.data import DataLoader

np.random.seed(42)
random.seed(42)
torch.manual_seed(42)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(42)

def get_file_paths(directory):
    file_paths = []
    for root, dirs, files in os.walk(directory):
        for file in files:
            file_path = os.path.join(root, file)
            file_paths.append(os.path.normpath(file_path))
    file_paths = sorted(file_paths)
    return file_paths

def checkAxisID(axisID):
    if axisID == "R":
        config.outputIdx = 0
    elif axisID == "S":
        config.outputIdx = 1
    else:
        config.outputIdx = 2

def show_3DError():
    predList = []
    targetList = []
    title = []
    filesPath = get_file_paths(config.loadDir)
    for filePath in filesPath:
        if not filePath.endswith('.pth'):
            continue
        fileName = os.path.basename(filePath) # iTransformer#R#train_50.pth
        modelName = fileName.split("#")[0] #iTransformer
        axisID = fileName.split("#")[1]
        checkAxisID(axisID)
        title.append(modelName)
        parser = argparse.ArgumentParser()
        parser.add_argument("--model", type=str, default=f"{fileName.split('.')[0]}")
        args = parser.parse_args()
        config.loadModel = fileName
        config.configModelDir = config.loadDir + "YAML/"
        args = init_args(args)
        df_obserData = pd.read_csv(config.dataSetDir + 'df_obsData.csv')
        df_prdData = pd.read_csv(config.dataSetDir + 'df_prdData.csv')
        df_stampObs = pd.read_csv(config.dataSetDir + 'df_stampObs.csv')
        df_stampPrd = pd.read_csv(config.dataSetDir + 'df_stampPrd.csv')
        obsData = df_obserData.to_numpy(dtype=np.float32)
        prdData = df_prdData.to_numpy(dtype=np.float32)
        stampObs = df_stampObs.to_numpy(dtype=np.float32)
        stampPrd = df_stampPrd.to_numpy(dtype=np.float32)

        obsData, prdData, stampObs, stampPrd = splitDataset(obsData, prdData, stampObs, stampPrd, runType='train', sca=True)
        obsData_train = obsData[0];obsData_val = obsData[1];obsData_test = obsData[2]
        prdData_train = prdData[0];prdData_val = prdData[1];prdData_test = prdData[2]
        stampObs_train = stampObs[0];stampObs_val = stampObs[1];stampObs_test = stampObs[2]
        stampPrd_train = stampPrd[0];stampPrd_val = stampPrd[1];stampPrd_test = stampPrd[2]

        train_dataset = orbitPSULTDataset(obsData=obsData_train, prdData=prdData_train, stampObs=stampObs_train,
                                          stampPrd=stampPrd_train, training_length=config.training_length,
                                          predicting_length=config.predicting_length,
                                          forecast_window=config.forecast_window,
                                          days=obsData_train.shape[0] / config.training_length)
        val_dataset = orbitPSULTDataset(obsData=obsData_val, prdData=prdData_val, stampObs=stampObs_val,
                                        stampPrd=stampPrd_val, training_length=config.training_length,
                                        predicting_length=config.predicting_length,
                                        forecast_window=config.forecast_window,
                                        days=prdData_val.shape[0] / config.predicting_length)
        test_dataset = orbitPSULTDataset(obsData=obsData_test, prdData=prdData_test, stampObs=stampObs_test,
                                         stampPrd=stampPrd_test, training_length=config.training_length,
                                         predicting_length=config.predicting_length,
                                         forecast_window=config.forecast_window,
                                         days=prdData_test.shape[0] / config.predicting_length)

        train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=0, shuffle=True, pin_memory=True)
        val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False, pin_memory=True)
        test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False, pin_memory=True)

        predVec, targetVec = inference_SULT_Seq2Seq(test_dataloader, args, infType="eval")
        predVec = predVec.reshape(-1, config.outputSize)
        targetVec = targetVec.reshape(-1, config.outputSize)
        predError, srcError = scalerInver(predVec, targetVec)
        predError = predError.reshape(-1, config.predicting_length, config.outputSize)
        srcError = srcError.reshape(-1, config.predicting_length, config.outputSize)
        predList.append(predError)
        targetList.append(srcError)
    predList = np.concatenate(predList, axis=-1)
    targetList = np.concatenate(targetList, axis=-1)
    get_3DError(predList,targetList)


def show_MultiRMSE():
    predList = dict()
    targetList = dict()
    filesPath = get_file_paths(config.loadDir)
    for filePath in filesPath:
        if not filePath.endswith('.pth'):
            continue
        fileName = os.path.basename(filePath)  # iTransformer#R#train_50.pth
        modelName = fileName.split("#")[0]  # iTransformer
        axisID = fileName.split("#")[1]
        checkAxisID(axisID)
        parser = argparse.ArgumentParser()
        parser.add_argument("--model", type=str, default=f"{fileName.split('.')[0]}")
        args = parser.parse_args()
        config.loadModel = fileName
        config.configModelDir = config.loadDir + "YAML/"
        args = init_args(args)

        df_obserData = pd.read_csv(config.dataSetDir + 'df_obsData.csv')
        df_prdData = pd.read_csv(config.dataSetDir + 'df_prdData.csv')
        df_stampObs = pd.read_csv(config.dataSetDir + 'df_stampObs.csv')
        df_stampPrd = pd.read_csv(config.dataSetDir + 'df_stampPrd.csv')
        obsData = df_obserData.to_numpy(dtype=np.float32)
        prdData = df_prdData.to_numpy(dtype=np.float32)
        stampObs = df_stampObs.to_numpy(dtype=np.float32)
        stampPrd = df_stampPrd.to_numpy(dtype=np.float32)

        obsData, prdData, stampObs, stampPrd = splitDataset(obsData, prdData, stampObs, stampPrd, runType='train',
                                                            sca=True)
        obsData_train = obsData[0];obsData_val = obsData[1];obsData_test = obsData[2]
        prdData_train = prdData[0];prdData_val = prdData[1];prdData_test = prdData[2]
        stampObs_train = stampObs[0];stampObs_val = stampObs[1];stampObs_test = stampObs[2]
        stampPrd_train = stampPrd[0];stampPrd_val = stampPrd[1];stampPrd_test = stampPrd[2]

        train_dataset = orbitPSULTDataset(obsData=obsData_train, prdData=prdData_train, stampObs=stampObs_train,
                                          stampPrd=stampPrd_train, training_length=config.training_length,
                                          predicting_length=config.predicting_length,
                                          forecast_window=config.forecast_window,
                                          days=obsData_train.shape[0] / config.training_length)
        val_dataset = orbitPSULTDataset(obsData=obsData_val, prdData=prdData_val, stampObs=stampObs_val,
                                        stampPrd=stampPrd_val, training_length=config.training_length,
                                        predicting_length=config.predicting_length,
                                        forecast_window=config.forecast_window,
                                        days=prdData_val.shape[0] / config.predicting_length)
        test_dataset = orbitPSULTDataset(obsData=obsData_test, prdData=prdData_test, stampObs=stampObs_test,
                                         stampPrd=stampPrd_test, training_length=config.training_length,
                                         predicting_length=config.predicting_length,
                                         forecast_window=config.forecast_window,
                                         days=prdData_test.shape[0] / config.predicting_length)

        train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=0, shuffle=True, pin_memory=True)
        val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False, pin_memory=True)
        test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False, pin_memory=True)

        predVec, targetVec = inference_SULT_Seq2Seq(test_dataloader, args, infType="eval")
        if modelName not in predList:
            S, L, N = predVec.shape
            predList[modelName] = np.zeros((S, L, config.axis))
        if modelName not in targetList:
            S, L, N = targetVec.shape
            targetList[modelName] = np.zeros((S, L, config.axis))
        predList[modelName][:,:,config.outputIdx:config.outputIdx+1] = predVec
        targetList[modelName][:,:,config.outputIdx:config.outputIdx+1] = targetVec

    save_pred_allRmse(predList,targetList)

def show_MultiPerf():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, default="itransformer")
    args = parser.parse_args()
    args = init_args(args)

    df_obserData = pd.read_csv(config.dataSetDir + 'df_obsData.csv')
    df_prdData = pd.read_csv(config.dataSetDir + 'df_prdData.csv')
    df_stampObs = pd.read_csv(config.dataSetDir + 'df_stampObs.csv')
    df_stampPrd = pd.read_csv(config.dataSetDir + 'df_stampPrd.csv')
    obsData = df_obserData.to_numpy(dtype=np.float32)
    prdData = df_prdData.to_numpy(dtype=np.float32)
    stampObs = df_stampObs.to_numpy(dtype=np.float32)
    stampPrd = df_stampPrd.to_numpy(dtype=np.float32)

    obsData, prdData, stampObs, stampPrd = splitDataset(obsData, prdData, stampObs, stampPrd, runType='train', sca=True)
    obsData_train = obsData[0];obsData_val = obsData[1];obsData_test = obsData[2]
    prdData_train = prdData[0];prdData_val = prdData[1];prdData_test = prdData[2]
    stampObs_train = stampObs[0];stampObs_val = stampObs[1];stampObs_test = stampObs[2]
    stampPrd_train = stampPrd[0];stampPrd_val = stampPrd[1];stampPrd_test = stampPrd[2]

    train_dataset = orbitPSULTDataset(obsData=obsData_train,prdData=prdData_train,stampObs=stampObs_train,stampPrd=stampPrd_train,training_length=config.training_length, predicting_length=config.predicting_length, forecast_window=config.forecast_window, days=obsData_train.shape[0]/config.training_length)
    val_dataset = orbitPSULTDataset(obsData=obsData_val,prdData=prdData_val,stampObs=stampObs_val,stampPrd=stampPrd_val,training_length=config.training_length, predicting_length=config.predicting_length, forecast_window=config.forecast_window, days=prdData_val.shape[0]/config.predicting_length)
    test_dataset = orbitPSULTDataset(obsData=obsData_test,prdData=prdData_test,stampObs=stampObs_test,stampPrd=stampPrd_test,training_length=config.training_length, predicting_length=config.predicting_length, forecast_window=config.forecast_window, days=prdData_test.shape[0]/config.predicting_length)

    train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=0, shuffle=True, pin_memory=True)
    val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False, pin_memory=True)
    test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=0, shuffle=False, pin_memory=True)

    predVec, targetVec = inference_SULT_Seq2Seq(test_dataloader, args, infType="eval")

    save_multiEval(predVec,targetVec,args)




if __name__ == '__main__':
    # show_MultiPerf()
    show_MultiRMSE()
    # show_3DError()