import os

import torch
import torch.nn.functional as F
import lightning as L
from src.models.met_pol_model import MetPolModel
from src.utils import LinearWarmupCosineAnnealingLR, masked_mse_loss, masked_mae_loss


class MetPolModule(L.LightningModule):
    def __init__(self, args, statics, model_path=""):
        super().__init__()
        
        self.args = args
        
        self.coord_info = statics['coord_info']
        self.map_order = statics['map_order']
        self.target_mean = statics["target_mean"]
        self.target_std = statics['target_std']
        
        self.net = MetPolModel(**args.model.__dict__)

        if len(model_path) > 0:
            self.load_pretrained_weights(model_path)
            print("finish loading model in modules.py ......")

    def load_pretrained_weights(self, pretrained_path):
        if os.path.isdir(pretrained_path):
            raise NotImplementedError("Has not implemented right now")
        else:
            checkpoint = torch.load(pretrained_path, map_location=torch.device('cpu'))
            print(f"Loading pre-trained checkpoint from: {pretrained_path}")
            checkpoint_model = checkpoint['state_dict']
            
            msg = self.load_state_dict(checkpoint_model, strict=True)
            print(msg)

    def _reconstruct_pred_dict(self, preds, site_nums=[15,7,10,7], keys_order=[1,2,3,0]):
        pred_dict = {}
        start = 0
        for key, num in zip(keys_order, site_nums):
            pred_slice = preds[..., start:start + num]
            pred_dict[key] = pred_slice.unsqueeze(-1)
            start += num
        return pred_dict

    
    def training_step(self, batch, batch_idx):
        x_met, pol_seq_dict, pol_mask_dict, time_shift, met_time, month, day, hour = batch
        
        roll_cnt = self.args.lead_time
        block_time = self.args.block_time
        
        losses = []
        pred = None
        for i in range(roll_cnt):
            met_input = x_met[:, i: i+3]
            met_time_input = met_time[:, i: i+3]
            month_input = month[:, i*block_time: (i+1)*block_time]
            day_input = day[:, i*block_time: (i+1)*block_time]
            hour_input = hour[:, i*block_time: (i+1)*block_time]
            if i == 0:
                pol_seq_X = {k: v[:, :block_time] for k, v in pol_seq_dict.items()}
            else:
                pol_seq_X = self._reconstruct_pred_dict(pred)
            
            model_input = (met_input, met_time_input, pol_seq_X, self.coord_info, self.map_order, month_input, day_input, hour_input, time_shift)
            
            pred = self.net(*model_input)
            
            pol_seq_Y = {k: v[:, (i+1)*block_time: (i+2)*block_time] for k, v in pol_seq_dict.items()}
            target = torch.cat([item.squeeze(-1) for item in pol_seq_Y.values()], dim=-1)
            pol_mask_Y = {k: m[:, (i+1)*block_time: (i+2)*block_time] for k, m in pol_mask_dict.items()}
            mask = torch.cat([item.squeeze(-1) for item in pol_mask_Y.values()], dim=-1)
            
            loss = masked_mse_loss(pred, target, mask)
            
            losses.append(loss)
            
            self.log(f"train_loss_mse_{i}", loss)
        
        final_loss = torch.mean(torch.stack(losses))
        
        self.log(f"train_loss_mse_mean", final_loss)
        return final_loss

    def validation_step(self, batch, batch_idx):
        x_met, pol_seq_dict, pol_mask_dict, time_shift, met_time, month, day, hour = batch
        roll_cnt = self.args.lead_time
        block_time = self.args.block_time

        losses = []
        pred = None
        for i in range(roll_cnt):
            met_input = x_met[:, i: i+3]
            met_time_input = met_time[:, i: i+3]
            month_input = month[:, i*block_time: (i+1)*block_time]
            day_input = day[:, i*block_time: (i+1)*block_time]
            hour_input = hour[:, i*block_time: (i+1)*block_time]
            if i == 0:
                pol_seq_X = {k: v[:, :block_time] for k, v in pol_seq_dict.items()}
            else:
                pol_seq_X = self._reconstruct_pred_dict(pred)
            
            model_input = (met_input, met_time_input, pol_seq_X, self.coord_info, self.map_order, month_input, day_input, hour_input, time_shift)
            
            pred = self.net(*model_input)
            
            pol_seq_Y = {k: v[:, (i+1)*block_time: (i+2)*block_time] for k, v in pol_seq_dict.items()}
            target = torch.cat([item.squeeze(-1) for item in pol_seq_Y.values()], dim=-1)
            pol_mask_Y = {k: m[:, (i+1)*block_time: (i+2)*block_time] for k, m in pol_mask_dict.items()}
            mask = torch.cat([item.squeeze(-1) for item in pol_mask_Y.values()], dim=-1)
            
            loss = masked_mse_loss(pred, target, mask)
            
            losses.append(loss)
            
            pred_denorm = pred * self.target_std + self.target_mean
            target_denorm = target * self.target_std + self.target_mean
            
            de_mse = masked_mse_loss(pred_denorm, target_denorm, mask)
            de_rmse = torch.sqrt(de_mse)
            de_mae = masked_mae_loss(pred_denorm, target_denorm, mask)
            
            self.log(f"val_loss_de_rmse_{i}", de_rmse, on_epoch=True)
            self.log(f"val_loss_de_mae_{i}", de_mae, on_epoch=True)
            
            self.log(f"val_loss_mse_{i}", loss, on_epoch=True)
        
        final_loss = torch.mean(torch.stack(losses))
        
        self.log(f"val_loss", final_loss, on_epoch=True, prog_bar=True)
        
        return final_loss

    def configure_optimizers(self):
        optimizer = torch.optim.AdamW([
            {
                "params": self.parameters(),
                "lr": self.args.optim.lr,
                "betas": (self.args.optim.betas[0], self.args.optim.betas[1]),
                "weight_decay": self.args.optim.weight_decay,
                "fused": True
            }
        ])
        
        scheduler = LinearWarmupCosineAnnealingLR(
            optimizer,
            **self.args.scheduler.__dict__
        )
        
        return {
            "optimizer": optimizer,
            "lr_scheduler": {"scheduler": scheduler, "interval": "step", "frequency": 1}
        }

    def predict_step(self, batch, batch_idx):
        x_met, pol_seq_dict, pol_mask_dict, time_shift, met_time, month, day, hour = batch
        roll_cnt = self.args.lead_time
        block_time = self.args.block_time
        
        preds = []
        targets = []
        preds_norm = []
        targets_norm = []
        masks = []
        pred = None
        for i in range(roll_cnt):
            met_input = x_met[:, i: i+3]
            met_time_input = met_time[:, i: i+3]
            month_input = month[:, i*block_time: (i+1)*block_time]
            day_input = day[:, i*block_time: (i+1)*block_time]
            hour_input = hour[:, i*block_time: (i+1)*block_time]
            if i == 0:
                pol_seq_X = {k: v[:, :block_time] for k, v in pol_seq_dict.items()}
            else:
                pol_seq_X = self._reconstruct_pred_dict(pred)
            
            model_input = (met_input, met_time_input, pol_seq_X, self.coord_info, self.map_order, month_input, day_input, hour_input, time_shift)
            
            pred = self.net(*model_input)
            
            pol_seq_Y = {k: v[:, (i+1)*block_time: (i+2)*block_time] for k, v in pol_seq_dict.items()}
            target = torch.cat([item.squeeze(-1) for item in pol_seq_Y.values()], dim=-1)
            pol_mask_Y = {k: m[:, (i+1)*block_time: (i+2)*block_time] for k, m in pol_mask_dict.items()}
            mask = torch.cat([item.squeeze(-1) for item in pol_mask_Y.values()], dim=-1)
  
            pred_denorm = pred * self.target_std + self.target_mean
            target_denorm = target * self.target_std + self.target_mean
            
            preds.append(pred)
            targets.append(target)
            preds_norm.append(pred_denorm)
            targets_norm.append(target_denorm)
            masks.append(mask)
        
        return {
            "target": torch.stack(targets),
            "pred": torch.stack(preds),
            "target_norm": torch.stack(targets_norm),
            "pred_norm": torch.stack(preds_norm),
            "mask": torch.stack(masks)
        }
