import torch.nn as nn
import torch
import os
import numpy as np
from data.datautils import batch_x_ffts
from loss import NTXentLoss_poly_2D,NTXentLoss_poly_4D
from metrics import metric


def Trainer(model,optimizer,train_dl,test_dl,valid_dl,configs,training_mode,logger,decoder,experiment_log_dir):
    logger.debug("Training started ....")
    
    criterion = nn.MSELoss()
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
    if training_mode == 'pre_train':
        print('——————————Pretraining————————————')
        for epoch in range(1,configs.EPOCHES+1):
            train_loss,train_series = model_pretrain(model, optimizer, criterion, train_dl, configs, configs.DEVICE, training_mode)
            # if(epoch % 2 == 0):
                # model_eval(model,optimizer,criterion,valid_dl,configs,configs.DEVICE,training_mode,decoder)
            print("training_loss:", train_loss)
        os.makedirs(os.path.join(experiment_log_dir,"saved_models"),exist_ok = True)
        chepoint = {'model_state_dict':model.state_dict()}
        torch.save(chepoint,os.path.join(experiment_log_dir,"saved_models",f'ckp_last.pt'))
        print('Pretrained model is stored at folder:{}'.format(experiment_log_dir+'saved_models'+'ckp_last.pt'))
    
    # if training_mode == 'forcasting':
    logger.debug("\n################## Training is Done! #########################")

def model_eval(model,optimizer,criterion,valid_dl,configs,device,training_mode,decoder):
    print('——————————EVAL——————————')
    model.eval()
    preds = []
    trues = []
    loss = []
    for i,(batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(valid_dl):
        pred = model.time_encoder(batch_x.to(torch.float32))
        pred = model.decoder(pred)
        preds.append(pred.detach().cpu().numpy())
        trues.append(batch_y.detach().cpu().numpy())
        # print(pred.size(),batch_y.size())
        # valid_loss = criterion(pred.detach().cpu().numpy(),batch_y.detach().cpu().numpy())
        # loss.append(valid_loss)
    preds = np.array(preds)
    trues = np.array(trues)
    preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
    trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
    mae, mse, rmse, mape, mspe = metric(preds, trues)
    # v_loss = np.mean(loss)
    print('mse:{}, mae:{}'.format(mse, mae))
    return 


def model_pretrain(model,optimizer,criterion,train_dl,configs,device,training_mode):
    total_loss = []
    model.train()
    optimizer.zero_grad()
    print('model_pretrain starting')
    for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_dl):
    #      model_optim.zero_grad()
        # 分成四小块

        batch_x_patch = batch_x.view(configs.BATCH_SIZE,configs.PATCHES,configs.PRED_LEN,configs.VARS).to(torch.float32).to(device) #  time domain —— batch_size * patch * pre_len * feature
        batch_x_fft_patch = batch_x_ffts(batch_x_patch,device).to(torch.float32) # frequence domain —— batch_size * patch * pre_len * feature
    #    batch_x_aug_patch = PretrainDataset(batch_x_patch)
        batch_x = batch_x.to(torch.float32).to(device)

        time_x,feq_x,time_long_x,pred = model.fit(batch_x_patch,batch_x_fft_patch,batch_x) 

        nt_criterion_4 = NTXentLoss_poly_4D(configs.DEVICE,configs.BATCH_SIZE,configs.TEMPERATURE,configs.PATCHES,configs.USE_COSIN_SIMILARITY)
        nt_criterion_2 = NTXentLoss_poly_2D(configs.DEVICE,configs.BATCH_SIZE,configs.TEMPERATURE,configs.PATCHES,configs.USE_COSIN_SIMILARITY)
        # print(time_x.device,feq_x.device)

        loss_1 = nt_criterion_4(time_x,feq_x) # 是时域和频域相似,时域之间的反例也计算了
        # 
        # print("beafore : 2d:", time_x.size(),"time_long_x:",time_long_x.size())
        loss_2 = nt_criterion_2(time_x.view(time_long_x.size()).to(torch.float32),time_long_x) # 是时域和长时域之间的相似性， # 这里依旧需要改，因为两者不是相同的维度
        loss_pred = criterion(pred.to(torch.float32).to(device),batch_y.to(torch.float32).to(device))
        loss = configs.LAMUBDA * (loss_1 + loss_2) + (1-configs.LAMUBDA) * loss_pred
        loss = loss.to(torch.float32)
        total_loss.append(loss.item())
        loss.backward()
        optimizer.step()
    ave_loss = torch.tensor(total_loss).mean()
    return ave_loss,time_long_x