import time,torch
from typing import IO
import torch.optim as optim
import numpy as np
from tqdm import tqdm

import models
from models import WorkerArguments, create_model, get_model_save_dir
from utils.draw import *
from utils.data_loader import *
from utils.tools import TopkMSELoss, metric
from feasytools import ArgChecker


def prepare_dataloader(args:WorkerArguments, shuffle:bool, drop_last:bool, flag:str):
    """ Load data and prepare dataloader. """

    # prepare training dataset and dataloader
    batch_size = args.batch_size
    dset = Dataset_Traffic(args.path,args,flag)
    print(flag, len(dset))
    dloader = DataLoader(
        dset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=0,
        drop_last=drop_last)
    return dloader,dset


def sample_mining_scheduler(epoch:int, batch_size:int):
    if epoch < 2:
        topk = batch_size
    elif epoch < 4:
        topk = int(batch_size * (5 - epoch) / (6 - epoch))
    else:
        topk = int(0.5 * batch_size)
    return topk


def train_epoch(model:torch.nn.Module, train_dataset:Dataset_Traffic, training_loader:DataLoader, 
    optimizer:optim.Optimizer, opt:WorkerArguments, epoch:int):
    """ Epoch operation in training phase. """

    model.train()
    total_loss = 0
    total_pred_number = 0
    for batch in tqdm(training_loader, mininterval=2,
                      desc='  - (Training)   ', leave=False):
        batch:list[torch.Tensor]
        # prepare data
        batch_x, batch_y, batch_x_mark, batch_y_mark = map(lambda x: x.float().to(opt.device), batch)
        # prepare predict token
        dec_inp = torch.zeros_like(batch_y).float()
        optimizer.zero_grad()

        if isinstance(model, models.Pyraformer):
            # forward
            if opt.decoder == 'attention':
                if opt.pretrain and epoch < 1:
                    outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark, True)
                    batch_y = torch.cat([batch_x, batch_y], dim=1)
                else:
                    outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark, False)
            elif opt.decoder == 'FC':
                # Add a predict token into the history sequence
                predict_token = torch.zeros(batch_x.size(0), 1, batch_x.size(-1), device=batch_x.device)
                batch_x = torch.cat([batch_x, predict_token], dim=1)
                batch_x_mark = torch.cat([batch_x_mark, batch_y_mark[:, 0:1, :]], dim=1)
                outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark, False)
        else:
            outputs = model(batch_x, batch_x_mark, batch_y, batch_y_mark, False)
        
        if isinstance(train_dataset.scaler,ZeroMaxScaler): outputs = outputs.clamp(-1,1)
        # determine the loss function
        if opt.hard_sample_mining and not (opt.pretrain and epoch < 1):
            topk = sample_mining_scheduler(epoch, batch_x.size(0))
            criterion = TopkMSELoss(topk)
        else:
            criterion = torch.nn.MSELoss(reduction='none')

        # if inverse, both the output and the ground truth are denormalized.
        if opt.inverse:
            outputs, batch_y = train_dataset.inverse_transform(outputs, batch_y)
        # compute loss
        losses:torch.Tensor = criterion(outputs, batch_y)
        loss = losses.mean()
        loss.backward()

        """ update parameters """
        optimizer.step()
        total_loss += losses.sum().item()
        total_pred_number += losses.numel()

    return total_loss / total_pred_number


def eval_epoch(model:torch.nn.Module, test_dataset:Dataset_Traffic, test_loader:DataLoader, 
    opt: WorkerArguments, epoch:int, fp:IO, draw:bool = False, save_root:str="results"):
    """ Epoch operation in evaluation phase. """
    model.eval()
    preds = []
    trues = []
    desc = "Draw" if draw else "Validation"
    with torch.no_grad():
        for batch in tqdm(test_loader, mininterval=2, desc=f'  - ({desc}) ', leave=False):
            """ prepare data """
            batch:tuple[torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor]

            batch_x, batch_y, batch_x_mark, batch_y_mark = map(lambda x: x.float().to(opt.device), batch)
            dec_inp = torch.zeros_like(batch_y).float()

            if isinstance(model, models.Pyraformer) or (isinstance(model, models.Transformer) and opt.decoder == 'attention'):
                # forward
                if opt.decoder == 'FC':
                    # Add a predict token into the history sequence
                    predict_token = torch.zeros(batch_x.size(0), 1, batch_x.size(-1), device=batch_x.device)
                    batch_x = torch.cat([batch_x, predict_token], dim=1)
                    batch_x_mark = torch.cat([batch_x_mark, batch_y_mark[:, 0:1, :]], dim=1)
                outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark, False)
            else:
                outputs = model(batch_x, batch_x_mark, None, None, False)

            if isinstance(test_dataset.scaler,ZeroMaxScaler): outputs = outputs.clamp(-1,1)
            # if inverse, both the output and the ground truth are denormalized.
            if opt.inverse:
                outputs, batch_y = test_dataset.inverse_transform(outputs, batch_y)

            pred = outputs.detach().cpu().numpy()
            true = batch_y.detach().cpu().numpy()

            preds.append(pred)
            trues.append(true)
    
    preds = np.concatenate(preds, axis=0)
    trues = np.concatenate(trues, axis=0)
    
    if draw: # plotting mode: 7 days per week
        preds = preds.reshape(test_dataset._slen,-1,test_dataset.out_fea)
        trues = trues.reshape(test_dataset._slen,-1,test_dataset.out_fea)
        print('test shape:{}'.format(preds.shape))
        plot_results(preds,trues,opt.data,save_root)
    else:
        print('test shape:{}'.format(preds.shape))
    
    mae, mse, rmse, mape, mspe = metric(preds, trues)
    print('Epoch {}, mse:{}, mae:{}, rmse:{}, mape:{}, mspe:{}'.format(epoch, mse, mae, rmse, mape, mspe))
    fp.write(f"    [Epoch {epoch}] Test: mse:{mse}, mae:{mae}, rmse:{rmse}, mape:{mape}, mspe:{mspe}\n")

    return mse, mae, rmse, mape, mspe

def train(model:torch.nn.Module, optimizer:optim.Optimizer, 
    scheduler:optim.lr_scheduler.StepLR, opt:WorkerArguments, model_save_dir:str, fp:IO):
    """ Start training. """

    best_mse = 100000000

    """ prepare dataloader """
    training_dataloader, train_dataset = prepare_dataloader(opt,True,True,"train")
    test_dataloader, test_dataset = prepare_dataloader(opt,False,False,"test")

    best_metrics = []
    for epoch_i in range(opt.epoch):
        print('[ Epoch', epoch_i, ']')

        start = time.time()
        train_mse = train_epoch(model, train_dataset, training_dataloader, optimizer, opt, epoch_i)
        elapse = (time.time() - start) / 60
        print('  - (Training) '
              'MSE: {mse: 8.5f}  '
              'elapse: {elapse:3.3f} min'
              .format(mse=train_mse, elapse=elapse))
        fp.write(f"    [Epoch {epoch_i}] Train: mse:{train_mse:8.5f}, elapse:{elapse:3.3f} min\n")

        mse, mae, rmse, mape, mspe = eval_epoch(model, test_dataset, test_dataloader, opt, epoch_i, fp)

        scheduler.step()

        current_metrics = [float(mse), float(mae), float(rmse), float(mape), float(mspe)]
        if best_mse > mse:
            best_mse = mse
            best_metrics = current_metrics
            torch.save(
                {
                    "state_dict": model.state_dict(),
                    "metrics": best_metrics
                },
                model_save_dir
            )

    return best_metrics


def evaluate(model:torch.nn.Module, opt:WorkerArguments, model_save_dir:str, fp:IO, draw:bool = False):
    """ prepare dataloader """
    eval_dataloader, eval_dataset = prepare_dataloader(opt,False,False,"val")

    """ load pretrained model """
    checkpoint = torch.load(model_save_dir)["state_dict"]
    model.load_state_dict(checkpoint)

    st = time.time()
    mse, mae, rmse, mape, mspe = eval_epoch(model, eval_dataset, eval_dataloader, opt, 0, 
        fp, draw, model_save_dir[:model_save_dir.rfind('/')]+"/results")
    dur = time.time()-st
    fp.write(f"  Duration: {dur:.2f}s, Speed: {len(eval_dataset)/(opt.batch_size*dur)} it/s\n")

    return [float(mse), float(mae), float(rmse), float(mape), float(mspe)]


def main(opt:WorkerArguments, iter_index:int, model_save_dir:str, fp:IO):
    """ Main function. """
    print('[Info] parameters: {}'.format(opt))

    """ prepare model """
    model = create_model(opt)

    """ number of parameters """
    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('[Info] Number of parameters: {}'.format(num_params))
    fp.write(f"Iter {iter_index}\n")
    fp.write(f"  #Params: {num_params}\n")

    """ train or evaluate the model """
    model_save_dir += 'best_iter{}.pth'.format(iter_index)
    if opt.mode != 'train':
        best_metrics = evaluate(model, opt, model_save_dir, fp, opt.mode == 'draw')
    else:
        """ optimizer and scheduler """
        optimizer = optim.Adam(filter(lambda x: x.requires_grad, model.parameters()), opt.lr)
        scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=opt.lr_step)
        best_metrics = train(model, optimizer, scheduler, opt, model_save_dir, fp)

    print('Iteration best metrics: {}'.format(best_metrics))
    fp.write(f"  Iter best metrics: {best_metrics}\n")
    return best_metrics

if __name__ == '__main__':
    opt = WorkerArguments(ArgChecker())
    iter_num = opt.iter_num
    model_save_dir = get_model_save_dir(opt)
    os.makedirs(model_save_dir, exist_ok=True)    
    fp = open(model_save_dir + f"log_{opt.mode}.txt","w")
    all_perf = []
    for i in range(iter_num):
        metrics = main(opt, i, model_save_dir, fp)
        all_perf.append(metrics)
    all_perf = list(np.array(all_perf).mean(0))
    print('Average Metrics: {}'.format(all_perf))
    fp.write(f"Average Metrics: {all_perf}\n")
    fp.close()
