import sys
import math

from timm.utils import ModelEmaV2
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import LambdaLR, LinearLR, CosineAnnealingLR, SequentialLR, OneCycleLR, ReduceLROnPlateau
from torch.optim import AdamW
from orbitP.model.TimeMixer import TimeMixer
# from transformers import get_cosine_schedule_with_warmup
from orbitP.script.loss import WeightedMSELoss, HuberLoss, PMLLoss, MultiStepWeightedLoss, nllLoss
from orbitP.model.Transformer import Transformer
from orbitP.model.ITransformer import ITransformer
from orbitP.model.LSTM import Lstm
from orbitP.model.PathFormer import PathFormer
from orbitP.model.PatchTST import PatchTST
from orbitP.model.crossformer import crossformer
from orbitP.model.MLP import MLP
import torch
import logging
from tqdm import tqdm
from orbitP.script.util import *
import torch
import torch.nn as nn
import pandas as pd
import random
from orbitP.script import config

def train_SULT_Seq2Seq(train_dataloader,val_dataloader,args):
    device = torch.device(config.device)
    if args.model == 'itransformer':
        model = ITransformer(args).to(device)
    elif args.model == 'lstm':
        model = Lstm(args).to(device)
    elif args.model == 'PathFormer':
        model = PathFormer(args).to(device)
    elif args.model == "PatchTST":
        model = PatchTST(args).to(device)
    elif args.model == "crossformer":
        model = crossformer(args).to(device)
    elif args.model == "TimeMixer":
        model = TimeMixer(args).to(device)
    elif args.model == "MLP":
        model = MLP(args).to(device)
    elif args.model == "Transformer":
        model = Transformer(args).to(device)
    else:
        print("Model Not Found")
        sys.exit(0)
    total_steps = len(train_dataloader) * args.EPOCH
    ema = ModelEmaV2(model, decay=0.9999, device=device)

    optimizer = torch.optim.Adam(model.parameters(), lr=float(args.lr))
    # optimizer = AdamW(
    #     model.parameters(),
    #     lr=float(args.lr),  # 最大学习率
    #     betas=(0.9, 0.98),
    #     weight_decay=1e-3 # decoupled weight decay
    # )

    scheduler = OneCycleLR(
        optimizer,
        max_lr=float(args.lr),
        total_steps=total_steps,
        pct_start=0.1,
        anneal_strategy='cos',
        div_factor=10,
        final_div_factor=20,
        three_phase=False,
        cycle_momentum=False
    )
    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=total_steps//3, gamma=0.5)
    # scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10, verbose=True)
    # scheduler = get_cosine_schedule_with_warmup(
    #     optimizer,
    #     num_warmup_steps=int(0.1 * total_steps),
    #     num_training_steps=total_steps,
    #     num_cycles=0.5  # 半个周期衰减到最低
    # )
    if config.usePTModel:
        model.load_state_dict(torch.load(config.loadPTDir + config.loadModel))
    elif config.resumeModel != 0:
        model.load_state_dict(torch.load(config.loadDir + config.loadModel))
        optimizer.load_state_dict(torch.load(config.loadDir + config.loadOptim))
        scheduler.load_state_dict(torch.load(config.loadDir + config.loadSche))
        ema.load_state_dict(torch.load(config.loadDir + config.loadEma))
        print("load model success!")

    if config.nll == 1:
        criterion = nllLoss()
    else:
        criterion = nn.MSELoss()
    # criterion = nn.SmoothL1Loss(beta=0.5, reduction='mean')
    # criterion = MultiStepWeightedLoss(
    #     loss_type="mse",       # 或 "mae"
    #     weight_strategy="linear",  # "linear" | "exp" | "uniform"
    #     tail_ratio=0.1,
    #     device="cuda"
    # )
    best_val_loss = float('inf')
    last_loss = 0
    for epoch in range(config.resumeModel, args.EPOCH):
        train_loss = 0
        val_loss = 0
        model.train()
        train_bar = tqdm(train_dataloader, total=len(train_dataloader))
        for orbitData_pre, orbitData_suf, stampData_pre, stampData_suf in train_bar:
            optimizer.zero_grad()
            train_bar.set_description(f"train{epoch + 1}")
            src = orbitData_pre.to(device)  # torch.Size([288, 4, 9])
            src_mark = stampData_pre.to(device)
            target = orbitData_suf.to(device)
            target_mark = stampData_suf.to(device)
            if args.model == "PathFormer":
                pred,balance_loss = model(src, src_mark, target, target_mark)
            else:
                pred = model(src, src_mark, target, target_mark)
            if config.outputSize == 1:
                loss = criterion(pred, target[:, :config.predicting_length, config.outputIdx:config.outputIdx + 1])
            else:
                loss = criterion(pred, target[:, :config.predicting_length, :config.outputSize])
            train_loss += loss.detach().item()
            if args.model == "PathFormer":
                loss = loss + balance_loss
            else:
                l2_reg = getL2(model)
                loss = loss + args.lambda_l2 * l2_reg
            loss.backward()
            clip_grad_norm_(model.parameters(), max_norm=args.grad_clip)
            optimizer.step()
            scheduler.step()
            ema.update(model)
            train_bar.set_postfix({"loss": loss.detach().item(), "lr": optimizer.param_groups[0]['lr']})

        train_loss /= len(train_dataloader)

        model.eval()
        # ema.eval()
        with torch.no_grad():
            val_bar = tqdm(val_dataloader, total=len(val_dataloader))
            for orbitData_pre, orbitData_suf, stampData_pre, stampData_suf in val_bar:
                val_bar.set_description(f"val{epoch + 1}")
                src = orbitData_pre.to(device)  # torch.Size([288, 4, 9])
                src_mark = stampData_pre.to(device)
                target = orbitData_suf.to(device)
                target_mark = stampData_suf.to(device)
                if args.model == "PathFormer":
                    pred, balance_loss = model(src, src_mark, target, target_mark)
                else:
                    pred = model(src, src_mark, target, target_mark)
                if config.outputSize == 1:
                        loss = criterion(pred, target[:, :config.predicting_length, config.outputIdx:config.outputIdx + 1])
                else:
                        loss = criterion(pred, target[:, :config.predicting_length, :config.outputSize])
                val_bar.set_postfix({"loss": loss.detach().item()})
                val_loss += loss.detach().item()

            val_loss /= len(val_dataloader)
            # scheduler.step(val_loss)
        if epoch >= 0.8*args.EPOCH:
            last_loss += val_loss
        save_loss(epoch, train_loss, val_loss, trainFlag=True, valFlag=True)
        if (epoch + 1) % 5 == 0:
            save_model(epoch, model, optimizer, scheduler, ema)
        if val_loss < best_val_loss:
            best_val_loss = min(val_loss,best_val_loss)
            save_best_model(args.EPOCH - 1, model)


    save_last_model(args.EPOCH - 1, model, optimizer, scheduler, ema)
    return last_loss/int(0.2*args.EPOCH)