import torch
from torchvision.utils import make_grid
from einops import rearrange,repeat
from torch import optim
from torch.optim.lr_scheduler import CosineAnnealingLR, ExponentialLR,CosineAnnealingWarmRestarts

from utils.vis import vis
from layer.models import iTransformer, PatchLinear, LinearModel, LinearModelV2, MaskFormer, LinearModelV3,FreqFormer
import torch.nn.functional as F
import lightning as L
from torchmetrics.regression import MeanAbsoluteError, MeanSquaredError
from torch import nn
from layer.metirc import MAELoss, MSELoss, MSE_MAE_Loss, ChannelMaskedLoss

loss_fn = {
    "mse": MSELoss(),
    "mae": MAELoss(),
    "mse_mae": MSE_MAE_Loss(),
    "smooth_l1": nn.SmoothL1Loss(beta=0.5)
}


class Predictor(L.LightningModule):
    def __init__(self, dim, pre_win, look_back, channel,
                 learning_rate: float = 5e-5,
                 use_rev: bool = True,
                 d_state: int = 2,
                 metric: str = 'mse',
                 use_v_attn: bool = False, model: str = 'man'):
        super().__init__()
        if model == 'itrans':
            self.model = iTransformer(c_in=channel, dim_model=dim, pre_win=pre_win, look_back=look_back)
        elif model == 'patchlinear':
            self.model = PatchLinear(c_in=channel, patch_win=6, stride=6, kernel_size=25, dim=dim, look_back=look_back,
                                     pre_win=pre_win)
        elif model == 'linear':
            self.model = LinearModel(c_in=channel, patch_win=8, stride=4, dim=dim, pre_win=pre_win, look_back=look_back)
        elif model == 'linearv2':
            self.model = LinearModelV2(c_in=channel, patch_win=8, stride=4, dim=dim, pre_win=pre_win,
                                       look_back=look_back)
        elif model == 'linearv3':
            self.model = LinearModelV3(c_in=channel, patch_win=16, stride=8, dim=dim, pre_win=pre_win,
                                       look_back=look_back)
        self.lr = learning_rate
        self.pre_win = pre_win
        self.save_hyperparameters()
        self.testMAE = MeanAbsoluteError()
        self.testMSE = MeanSquaredError()
        self.MSE = MeanSquaredError()
        self.MAE = MeanAbsoluteError()

        self.metric = loss_fn[metric]

    def training_step(self, batch, batch_idx):
        x, y, x_mark, _ = batch
        x = rearrange(x, "b w c->b c w")
        y = rearrange(y, "b w c->b c w")
        # x_mark : batch seq_len, 4 :
        if torch.all(x_mark == 0):
            pre = self.model(x.float(), None)
        else:
            pre = self.model(x.float(), x_mark.float())
        loss = self.metric(pre.float(), y.float())

        self.log("train_loss", loss, on_epoch=True, prog_bar=True, on_step=False)
        return loss

    def on_train_epoch_end(self) -> None:
        sch = self.lr_schedulers()
        sch.step()
        lr = self.trainer.lr_scheduler_configs[0].scheduler.get_last_lr()[0]
        self.log('learning_rate', lr, on_step=False, on_epoch=True, prog_bar=True)

    def test_step(self, batch, batch_idx):
        x, y, x_mark, _ = batch
        b = x.size(0)
        x = rearrange(x, "b w c->b c w")
        y = rearrange(y, "b w c->b c w")
        if torch.all(x_mark == 0):
            pre = self.model(x.float(), None)
        else:
            pre = self.model(x.float(), x_mark.float())
        self.testMSE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))
        self.testMAE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))

    def on_test_epoch_end(self) -> None:
        mse = self.testMSE.compute()
        mae = self.testMAE.compute()
        if self.trainer.is_global_zero:
            self.log_dict({"test_mse": mse.item(), "test_mae": mae.item()})

    def validation_step(self, batch, batch_idx):
        x, y, x_mark, _ = batch
        b = x.size(0)
        x = rearrange(x, "b w c->b c w")
        y = rearrange(y, "b w c->b c w")
        if torch.all(x_mark == 0):
            pre = self.model(x.float(), None)
        else:
            pre = self.model(x.float(), x_mark.float())
        self.MAE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))
        self.MSE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))

    def on_validation_epoch_end(self) -> None:
        valid_loss = self.MSE.compute()
        self.log("val_loss", valid_loss, on_epoch=True, prog_bar=True, on_step=False)
        self.MSE.reset()
        self.MAE.reset()

    def configure_optimizers(self):
        optimizer = optim.Adam(self.parameters(), lr=self.lr)
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": CosineAnnealingWarmRestarts(optimizer,5,2),
                    #CosineAnnealingLR(optimizer, T_max=10, eta_min=5e-8),
                # ExponentialLR(optimizer,gamma=0.8),
                "interval": "epoch",
                "frequency": 5
            }
        }


class FreqPredictor(L.LightningModule):
    def __init__(self, dim, pre_win, look_back, channel,
                 learning_rate: float = 5e-5,
                 use_rev: bool = True,
                 d_state: int = 2,
                 metric: str = 'mse',
                 use_v_attn: bool = False, model: str = 'man'):
        super().__init__()
        self.model = FreqFormer(c_in=channel, patch_win=16, stride=8, dim=dim, pre_win=pre_win,
                                   look_back=look_back)
        self.lr = learning_rate
        self.pre_win = pre_win
        self.save_hyperparameters()
        self.testMAE = MeanAbsoluteError()
        self.testMSE = MeanSquaredError()
        self.MSE = MeanSquaredError()
        self.MAE = MeanAbsoluteError()
        self.all_freq = []
        self.channel = channel
        self.channelMAE = nn.ModuleList([ MeanAbsoluteError() for _ in range(channel)])
        self.channelMSE = nn.ModuleList([ MeanSquaredError() for _ in range(channel)])
        self.metric = loss_fn[metric]
        self.channel_result = {}

    def training_step(self, batch, batch_idx):
        x, y, x_mark,_ = batch
        x = rearrange(x, "b w c->b c w")
        y = rearrange(y, "b w c->b c w")
        # x_mark : batch seq_len, 4 :
        # if torch.all(x_mark == 0):
        #     main,noise,rev_fn = self.model(x.float(), None)
        # else:
        #     main,noise,rev_fn = self.model(x.float(), x_mark.float())
        # freq_y = torch.fft.rfft(y,dim=-1)
        # freq_y_abs = freq_y.abs()
        # thresh = torch.quantile(freq_y_abs,0.01,dim=-1,keepdim=True)
        # main_pred_freq =  torch.where(freq_y_abs<=thresh,torch.tensor(0.+0.j,device=x.device),freq_y)
        # main_pred_target =  torch.fft.irfft(main_pred_freq).float()
        # noise_pred_target = (y - main_pred_target).float()

        if torch.all(x_mark == 0):
            pred = self.model(x.float(),None)
        else:
            pred = self.model(x.float(),x_mark.float())
        loss = F.mse_loss(pred,target=y.float())
        self.log("train_loss", loss, on_epoch=True, prog_bar=True, on_step=False)
        return loss

    def on_train_epoch_end(self) -> None:
        sch = self.lr_schedulers()
        sch.step()
        lr = self.trainer.lr_scheduler_configs[0].scheduler.get_last_lr()[0]
        self.log('learning_rate', lr, on_step=False, on_epoch=True, prog_bar=True)

    def test_step(self, batch, batch_idx):
        x, y, x_mark, _ = batch
        b = x.size(0)
        x = rearrange(x, "b w c->b c w")
        y = rearrange(y, "b w c->b c w")

        if torch.all(x_mark == 0):
            pre = self.model(x.float(),None)
        else:
            pre = self.model(x.float(),x_mark.float())
        # for bb in range(b):
        #     vis(bb,pre.detach(),y.detach(),name=f'vis_{batch_idx}_{bb}.png')
        for c in range(x.size(1)):
            self.channelMAE[c].update(preds=pre[:,c,:].reshape(b,-1),target=y[:,c,:].reshape(b,-1))
            self.channelMSE[c].update(preds=pre[:,c,:].reshape(b,-1),target=y[:,c,:].reshape(b,-1))
#            self.tf_logger.add_image(f'vis/batch_{b}',make_grid(img_path), global_step=batch_idx)        
        self.testMSE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))
        self.testMAE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))


    def on_test_epoch_end(self) -> None:
        mse = self.testMSE.compute()
        mae = self.testMAE.compute()
        for c in range(self.channel):
           c_mse = self.channelMSE[c].compute()
           c_mae = self.channelMAE[c].compute()
           self.channel_result[c] = {"mse":c_mse.item(),"mae":c_mae.item()}
        if self.trainer.is_global_zero:
            self.log_dict({"test_mse": mse.item(), "test_mae": mae.item()})
            #self.log_dict(channel_result)
            print(self.channel_result)
            print(self.model.channel_scale)
            print(self.model.revin.gamma,self.model.revin.beta)
    def validation_step(self, batch, batch_idx):
        x, y, x_mark, _ = batch
        b = x.size(0)
        x = rearrange(x, "b w c->b c w")
        y = rearrange(y, "b w c->b c w")

        if torch.all(x_mark == 0):
            pre = self.model(x.float(),None)
        else:
            pre = self.model(x.float(),x_mark.float())

        self.MAE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))
        self.MSE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))

    def on_validation_epoch_end(self) -> None:
        valid_loss = self.MSE.compute()
        self.log("val_loss", valid_loss, on_epoch=True, prog_bar=True, on_step=False)
        self.MSE.reset()
        self.MAE.reset()

    def configure_optimizers(self):
        optimizer = optim.Adam(self.parameters(), lr=self.lr)
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": CosineAnnealingWarmRestarts(optimizer,5,2),
                    #CosineAnnealingLR(optimizer, T_max=10, eta_min=5e-8),
                # ExponentialLR(optimizer,gamma=0.8),
                "interval": "epoch",
                "frequency": 5
            }
        }








class MaskTrainer(L.LightningModule):
    def __init__(self, dim, pre_win, look_back, channel,
                 learning_rate: float = 5e-5,
                 metric: str = 'mse',
                 mode:str = 'pretrain'
                 ):
        super().__init__()
        self.model = MaskFormer(c_in=channel, dim=dim, pre_win=pre_win, look_back=look_back, depth=4, head=4,
                                drop=0.2, mask_rate=0.2, dim_head=256)
        self.mode = mode
        self.lr = learning_rate
        self.pre_win = pre_win
        self.save_hyperparameters()
        self.testMAE = MeanAbsoluteError()
        self.testMSE = MeanSquaredError()
        self.MSE = MeanSquaredError()
        self.MAE = MeanAbsoluteError()
        self.metric = ChannelMaskedLoss()

    def training_step(self, batch, batch_idx):
        x, y, x_mark, _ = batch
        x = rearrange(x, "b w c->b c w")
        y = rearrange(y, "b w c->b c w")
        if torch.all(x_mark == 0):
            pre, recon, mask = self.model(x.float(), None,mode=self.mode)
        else:
            pre, recon, mask = self.model(x.float(), x_mark.float(),mode=self.mode)
        if self.mode == 'pretrain':
            mask_target = x[:, mask == 0, :]
            mask_recon = recon[:, mask == 0, :]
            loss = F.mse_loss(mask_recon.float(),mask_target.float()) #self.metric(pre.float(), y.float(), mask_recon.float(), mask_target.float())
        else:
            loss = F.mse_loss(pre.float(), y.float())

        self.log("train_loss", loss, on_epoch=True, prog_bar=True, on_step=False)
        return loss

    def on_train_epoch_end(self) -> None:
        sch = self.lr_schedulers()
        sch.step()
        lr = self.trainer.lr_scheduler_configs[0].scheduler.get_last_lr()[0]
        self.log('learning_rate', lr, on_step=False, on_epoch=True, prog_bar=True)
        if self.trainer.is_global_zero and  self.current_epoch >= 10:
            self.mode = 'tune'




    def test_step(self, batch, batch_idx):
        x, y, x_mark, _ = batch
        b = x.size(0)
        x = rearrange(x, "b w c->b c w")
        y = rearrange(y, "b w c->b c w")
        if torch.all(x_mark == 0):
            pre, recon, mask = self.model(x.float(), None,mode=self.mode)
        else:
            pre, recon, mask = self.model(x.float(), x_mark.float(),mode=self.mode)
        self.testMSE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))
        self.testMAE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))

    def on_test_epoch_end(self) -> None:
        mse = self.testMSE.compute()
        mae = self.testMAE.compute()
        if self.trainer.is_global_zero:
            self.log_dict({"test_mse": mse.item(), "test_mae": mae.item()})

    def validation_step(self, batch, batch_idx):
        x, y, x_mark, _ = batch
        b = x.size(0)
        x = rearrange(x, "b w c->b c w")
        y = rearrange(y, "b w c->b c w")
        if torch.all(x_mark == 0):
            pre, recon, mask = self.model(x.float(), None,mode=self.mode)
        else:
            pre, recon, mask = self.model(x.float(), x_mark.float(),mode=self.mode)
        self.MAE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))
        self.MSE.update(preds=pre.reshape(b, -1), target=y.reshape(b, -1))

    def on_validation_epoch_end(self) -> None:
        valid_loss = self.MSE.compute()
        self.log("val_loss", valid_loss, on_epoch=True, prog_bar=True, on_step=False)
        self.MSE.reset()
        self.MAE.reset()

    def configure_optimizers(self):
        optimizer = optim.Adam(self.parameters(), lr=self.lr)
        return {
            "optimizer": optimizer,
            "lr_scheduler": {
                "scheduler": ExponentialLR(optimizer,gamma=0.8), #CosineAnnealingLR(optimizer, T_max=10, eta_min=5e-8),
                # ExponentialLR(optimizer,gamma=0.8),
                "interval": "epoch",
                "frequency": 5
            }
        }
