import argparse
import os
import random
import sys

import joblib
import torch
import pandas as pd
import numpy as np
import torch.nn as nn
from optuna._callbacks import MaxTrialsCallback
from timm.utils import ModelEmaV2
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from tqdm import tqdm
from orbitP.model.ITransformer import ITransformer
from orbitP.model.LSTM import Lstm
from orbitP.model.PatchTST import PatchTST
from orbitP.model.PathFormer import PathFormer
from orbitP.model.TimeMixer import TimeMixer
from orbitP.model.crossformer import crossformer
from orbitP.model.MLP import MLP
from orbitP.model.Transformer import Transformer
from orbitP.script.DataLoader import orbitPSULTDataset
from orbitP.script.loss import nllLoss, PMLLoss
from orbitP.script.util import *
from orbitP.script.train import train_SULT_Seq2Seq
from orbitP.script import config
import optuna
from optuna.importance import get_param_importances

np.random.seed(42)
random.seed(42)
torch.manual_seed(42)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(42)

def objective(trial):
    args.EPOCH = 50
    if args.model in ['itransformer', 'Transformer', 'PatchTST']:
        args.d_model = trial.suggest_int('d_model', 64, 256, step=16)
        args.d_ff = trial.suggest_int('d_ff', args.d_model*2, args.d_model*4, step=16)
    elif args.model in ['lstm', 'MLP']:
        args.d_model = trial.suggest_int('d_model', 64, 256, step=16)
    elif args.model == "PathFormer":
        args.d_model = trial.suggest_int('d_model', 8, 24, step=4)
        args.d_ff = trial.suggest_int('d_ff', args.d_model*2, args.d_model*4, step=args.d_model)
    else:
        raise ValueError("Model Not Found")
    if args.model == "itransformer":
        args.factor = trial.suggest_int('factor', 2, 10)
    if args.model == "PatchTST":
        args.patch_len = trial.suggest_int('patch_len', 16, 32, step=8)
        args.stride = trial.suggest_int('stride', args.patch_len//4, args.patch_len//2, step=args.patch_len//8)
    if config.useHead:
        args.head_dim = trial.suggest_int('head_dim', 16, 32, step=2)
        args.embed_dim = trial.suggest_int('embed_dim', 4, 12)
        args.head_kernel_size = trial.suggest_categorical('head_kernel_size', [3, 5, 7])
    args.num_layers = trial.suggest_int('num_layers', 2, 4)
    args.dropout = trial.suggest_float('dropout', 0.0, 0.25)
    args.lr = trial.suggest_float('lr', 1e-4, 5e-3, log=True)
    args.lambda_l2 = trial.suggest_float('lambda_l2', 5e-6, 5e-4, log=True)
    args.grad_clip = trial.suggest_float('grad_clip', 2.5, 7.5)
    train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True, pin_memory=True)
    val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=4, shuffle=False, pin_memory=True)
    test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=4, shuffle=False, pin_memory=True)

    args.early_stopping_patience = 10
    val_loss = train_SULT_Seq2Seq_with_earlystop(train_dataloader, val_dataloader, args, trial)

    return val_loss

def train_SULT_Seq2Seq_with_earlystop(train_dataloader, val_dataloader, args, trial):
    device = torch.device(config.device)
    if args.model == 'itransformer':
        model = ITransformer(args).to(device)
    elif args.model == 'lstm':
        model = Lstm(args).to(device)
    elif args.model == 'PathFormer':
        model = PathFormer(args).to(device)
    elif args.model == "PatchTST":
        model = PatchTST(args).to(device)
    elif args.model == "crossformer":
        model = crossformer(args).to(device)
    elif args.model == "TimeMixer":
        model = TimeMixer(args).to(device)
    elif args.model == "MLP":
        model = MLP(args).to(device)
    elif args.model == "Transformer":
        model = Transformer(args).to(device)
    else:
        print("Model Not Found")
        sys.exit(0)
    total_steps = len(train_dataloader) * args.EPOCH
    optimizer = torch.optim.Adam(model.parameters(), lr=float(args.lr))
    scheduler = OneCycleLR(
        optimizer,
        max_lr=float(args.lr),
        total_steps=total_steps,
        pct_start=0.1,
        anneal_strategy='cos',
        div_factor=10,
        final_div_factor=20,
        three_phase=False,
        cycle_momentum=False
    )

    if config.usePTModel:
        model.load_state_dict(torch.load(config.loadPTDir + config.loadModel))
    elif config.resumeModel != 0:
        model.load_state_dict(torch.load(config.loadDir + config.loadModel))
        optimizer.load_state_dict(torch.load(config.loadDir + config.loadOptim))
        scheduler.load_state_dict(torch.load(config.loadDir + config.loadSche))
        print("load model success!")

    if config.nll:
        criterion = nllLoss()
    else:
        criterion = nn.MSELoss()

    best_val_loss = float('inf')
    patience_counter = 0
    last_loss = 0
    for epoch in range(config.resumeModel, args.EPOCH):
        train_loss = 0
        val_loss = 0
        model.train()
        # train_bar = tqdm(train_dataloader, total=len(train_dataloader))
        for orbitData_pre, orbitData_suf, stampData_pre, stampData_suf in train_dataloader:
            optimizer.zero_grad()
            # train_bar.set_description(f"train{epoch + 1}")
            src = orbitData_pre.to(device)  # torch.Size([288, 4, 9])
            src_mark = stampData_pre.to(device)
            target = orbitData_suf.to(device)
            target_mark = stampData_suf.to(device)
            if args.model == "PathFormer":
                pred, balance_loss = model(src, src_mark, target, target_mark)
            else:
                pred = model(src, src_mark, target, target_mark)

            if config.outputSize == 1:
                loss = criterion(pred, target[:, :config.predicting_length, config.outputIdx:config.outputIdx + 1])
            else:
                loss = criterion(pred, target[:, :config.predicting_length, :config.outputSize])
            train_loss += loss.detach().item()
            if args.model == "PathFormer":
                loss = loss + balance_loss
            else:
                l2_reg = getL2(model)
                loss = loss + args.lambda_l2 * l2_reg
            loss.backward()
            clip_grad_norm_(model.parameters(), max_norm=args.grad_clip)
            optimizer.step()
            scheduler.step()
            # train_bar.set_postfix({"loss": loss.detach().item(), "lr": optimizer.param_groups[0]['lr']})

        train_loss /= len(train_dataloader)

        model.eval()
        with torch.no_grad():
            # val_bar = tqdm(val_dataloader, total=len(val_dataloader))
            for orbitData_pre, orbitData_suf, stampData_pre, stampData_suf in val_dataloader:
                # val_bar.set_description(f"val{epoch + 1}")
                src = orbitData_pre.to(device)  # torch.Size([288, 4, 9])
                src_mark = stampData_pre.to(device)
                target = orbitData_suf.to(device)
                target_mark = stampData_suf.to(device)
                if args.model == "PathFormer":
                    pred, balance_loss = model(src, src_mark, target, target_mark)
                else:
                    pred = model(src, src_mark, target, target_mark)
                if config.outputSize == 1:
                    loss = criterion(pred, target[:, :config.predicting_length, config.outputIdx:config.outputIdx + 1])
                else:
                    loss = criterion(pred, target[:, :config.predicting_length, :config.outputSize])
                # val_bar.set_postfix({"loss": loss.detach().item()})
                val_loss += loss.detach().item()

            val_loss /= len(val_dataloader)
            trial.report(val_loss, epoch)

        # 若 pruner 判定当前 trial 应提前停止，则中止此 trial
        if trial.should_prune():
            raise optuna.exceptions.TrialPruned()

        # ---------------------------
        # (4) Early Stopping
        # ---------------------------
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            patience_counter = 0
        else:
            patience_counter += 1

        if patience_counter >= args.early_stopping_patience:
            print(f"Early stopping at epoch {epoch}, best val_loss={best_val_loss:.6f}")
            break

    # 返回最佳验证损失（Optuna 会根据此判优）
    return best_val_loss

if __name__ == "__main__":
    clean = True
    if clean:
        clean_directory()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, default="itransformer")
    args = parser.parse_args()
    args = init_args(args)

    df_obserData = pd.read_csv(config.dataSetDir + 'df_obsData.csv')
    df_prdData = pd.read_csv(config.dataSetDir + 'df_prdData.csv')
    df_stampObs = pd.read_csv(config.dataSetDir + 'df_stampObs.csv')
    df_stampPrd = pd.read_csv(config.dataSetDir + 'df_stampPrd.csv')
    obsData = df_obserData.to_numpy(dtype=np.float32)
    prdData = df_prdData.to_numpy(dtype=np.float32)
    stampObs = df_stampObs.to_numpy(dtype=np.float32)
    stampPrd = df_stampPrd.to_numpy(dtype=np.float32)

    obsData, prdData, stampObs, stampPrd = splitDataset(obsData, prdData, stampObs, stampPrd, runType='train', sca=True)
    obsData_train = obsData[0];obsData_val = obsData[1];obsData_test = obsData[2]
    prdData_train = prdData[0];prdData_val = prdData[1];prdData_test = prdData[2]
    stampObs_train = stampObs[0];stampObs_val = stampObs[1];stampObs_test = stampObs[2]
    stampPrd_train = stampPrd[0];stampPrd_val = stampPrd[1];stampPrd_test = stampPrd[2]

    train_dataset = orbitPSULTDataset(obsData=obsData_train, prdData=prdData_train, stampObs=stampObs_train,
                                      stampPrd=stampPrd_train, training_length=config.training_length,
                                      predicting_length=config.predicting_length,
                                      forecast_window=config.forecast_window,
                                      days=obsData_train.shape[0] / config.training_length)
    val_dataset = orbitPSULTDataset(obsData=obsData_val, prdData=prdData_val, stampObs=stampObs_val,
                                    stampPrd=stampPrd_val, training_length=config.training_length,
                                    predicting_length=config.predicting_length, forecast_window=config.forecast_window,
                                    days=prdData_val.shape[0] / config.predicting_length)
    test_dataset = orbitPSULTDataset(obsData=obsData_test, prdData=prdData_test, stampObs=stampObs_test,
                                     stampPrd=stampPrd_test, training_length=config.training_length,
                                     predicting_length=config.predicting_length, forecast_window=config.forecast_window,
                                     days=prdData_test.shape[0] / config.predicting_length)

    MAX_TRIALS = 30  # 可按需设置，因 pruning 不会真的跑 50 次完整训练

    pruner = optuna.pruners.PatientPruner(
        optuna.pruners.MedianPruner(
            n_startup_trials=5,     # 前 5 个 trial 不剪枝
            n_warmup_steps=10,      # 每个 trial 前 10 epoch 不剪枝
            interval_steps=5        # 每 5 epoch 判断一次
        ),
        patience=10                 # 前 10 epoch 不允许剪枝
    )

    study = optuna.create_study(
        study_name="orbitPrediction",
        direction="minimize",
        storage=f"sqlite:///{config.saveDir}/optuna_orbit.db",
        pruner=pruner,
        load_if_exists=True
    )

    already_done = len([t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE])
    remaining = max(0, MAX_TRIALS - already_done)

    if remaining > 0:
        study.optimize(
            objective,
            n_trials=remaining,
            callbacks=[MaxTrialsCallback(MAX_TRIALS)],
            show_progress_bar=True
        )
    else:
        print(f"已有 {already_done} 个 trial，不再继续。")

    best_params = study.best_params
    save_path = os.path.join(config.saveDir, "best_params.txt")
    with open(save_path, "w", encoding="utf-8") as f:
        f.write(f"最佳验证损失: {study.best_value}\n")
        f.write("最佳超参数:\n")
        for key, value in best_params.items():
            f.write(f"{key}: {value}\n")

    print(f'最佳超参数: {best_params}')
    print(f'最佳验证损失: {study.best_value}')
    imps = get_param_importances(study)
    print(f"搜索比重: {imps}")
