import math
from dataclasses import dataclass

import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.utils.data import DataLoader

import lightning as L
from lightning.pytorch.loggers import TensorBoardLogger
from lightning.pytorch.callbacks import ModelCheckpoint

from data.DL3DV.DL3DV import DL3DVDataset
from models.RayLDMIC import RayFIC
from models.plucker import norm_cam, compute_plucker_rays

from torchvision.transforms import CenterCrop

@dataclass
class TrainingConfig:
    device = [0]
    num_worker = 1

    train_batch_size = 4
    eval_batch_size = 4
    num_epochs = 10000

    learning_rate = 1e-4

    output_dir = 'logs'
    name = f"RFIC-0.0.0"

    seed = 17

    fast_dev_run = True
    ckpt_path = None
    resume_ckpt_path = None

    lr_gamma = 0.5


    LAMBDA = 1024

    # Model
    N = 192
    depth = 5
    d_model = 512
    n_heads = 8

    # dataset
    image_count = 4
    dataset = "DL3DV"
    # dataset = "BlendedMVS"
    
    BlendedMVS_dataset_root = "../datasets/BlendedMVS"
    BlendedMVS_train_split = "data/BlendedMVS/train.txt"
    BlendedMVS_val_split = "data/BlendedMVS/val.txt"
    
    DL3DV_dataset_root = "E:/dataset/DL3DV-10K/480p"
    DL3DV_train_split = "data/DL3DV/train_11K.txt"
    DL3DV_val_split = "data/DL3DV/val_11K.txt"

    crop_size = (256, 448)


class LitModel(L.LightningModule):
    LAMBDA = 1024
    def __init__(self, config: TrainingConfig) -> None:
        super().__init__()
        self.automatic_optimization = False

        self.cfg = config

        self.model = RayFIC(
            config.N, config.image_count, 
            depth = config.depth,
            d_model = config.d_model, 
            n_heads = config.n_heads
        )
        
        self.avg_meters = []


    def training_step(self, batch, batch_idx):
        images, Es, Is = batch
        B, V, _, H, W = images.shape

        Es, Is = norm_cam(Es, Is)
        rays = compute_plucker_rays(Is, Es, H, W)
        
        out = self.model(images, rays)
        metrics = self.get_loss(out, images, self.LAMBDA)

        loss = metrics["loss"]

        opt = self.optimizers()
        opt.zero_grad()
        self.manual_backward(loss)
        self.clip_gradients(opt, gradient_clip_val = 1.0, gradient_clip_algorithm="norm")
        opt.step()

        self.log_dict({
            "train/loss": loss,
            "train/mse": metrics["mse"],
            "train/bpp": metrics["bpp"],
            "train/bpp_y": metrics["bpp_y"],
            "train/bpp_z": metrics["bpp_z"],
            "train/psnr": mse2psnr(metrics["mse"]),
            "train/lr": self.optimizers().optimizer.state_dict()['param_groups'][0]['lr']
        })
            

    def validation_step(self, batch, batch_idx):
        images, _, _ = batch
        B, V, _, H, W = images.shape

        Es, Is = norm_cam(Es, Is)
        rays = compute_plucker_rays(Is, Es, H, W)
        
        out = self.model(images, rays)
        metrics = self.get_loss(out, images, self.LAMBDA)

        self.log_dict({
            "val/loss": metrics["loss"],
            "val/mse": metrics["mse"],
            "val/bpp": metrics["bpp"],
            "val/bpp_y": metrics["bpp_y"],
            "val/bpp_z": metrics["bpp_z"],
            "val/psnr": mse2psnr(metrics["mse"].item()),
        })

        self.avg_meters.append(metrics["loss"])

    def on_validation_epoch_end(self):
        loss = sum(self.avg_meters) / len(self.avg_meters)

        lr_scheduler = self.lr_schedulers()
        lr_scheduler.step(metrics = loss)
        
        self.avg_meters = []

    def get_loss(self, out, x, lmbda):
        B, V, _, H, W = x.shape
        num_pixels = B * V * H * W

        x_hats = out["x_hat"]
        likeli = out['likelihoods']
        
        mse = F.mse_loss(x_hats, x)
        bpp_y = self._calc_bpp(likeli["y"], num_pixels)
        bpp_z = self._calc_bpp(likeli["z"], num_pixels)
        bpp = bpp_y + bpp_z

        loss = bpp + lmbda * mse
    
        return {
            "bpp": bpp,
            "mse": mse,
            "loss": loss,
            "bpp_y": bpp_y,
            "bpp_z": bpp_z
        }

    @staticmethod 
    def _calc_bpp(likelihoods, num_pixels):
        return torch.sum(torch.clamp(-1.0 * torch.log(likelihoods + 1e-6) / math.log(2.0), 0, 50)) / num_pixels

    
    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(self.model.parameters(), lr = self.cfg.learning_rate)

        lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer,
            mode = "min",
            factor = 0.5,
            patience = 10,
            min_lr = 1e-6
        )

        return [optimizer], [lr_scheduler]



def mse2psnr(mse):
    return 10 * math.log10(1.0 / (mse + 1e-7))



if __name__ == "__main__":
    L.seed_everything(3407)

    # model_module
    cfg = TrainingConfig()
    model_module = LitModel(config = cfg)
    
    # dataloader
    transform = CenterCrop(cfg.crop_size) # transform
    train_dataset = DL3DVDataset(
        cfg.DL3DV_dataset_root, 
        cfg.DL3DV_train_split, 
        image_count = cfg.image_count,
        randn_sample = True,
        transform = transform,
        sample_interval = 10
    )

    val_dataset = DL3DVDataset(
        cfg.DL3DV_dataset_root, 
        cfg.DL3DV_val_split,  
        image_count = cfg.image_count,
        randn_sample = False,
        transform = transform,
        sample_interval = 10
    )
    
    train_dataloader = DataLoader(
        train_dataset, 
        batch_size = cfg.train_batch_size,
        num_workers = cfg.num_worker, 
        shuffle = True, 
        persistent_workers = True, 
        pin_memory = True
    )

    val_dataloader = DataLoader(
        val_dataset, batch_size = cfg.eval_batch_size, shuffle = False
    )


    checkpoint_callback = ModelCheckpoint(
        save_top_k = 1,
        monitor = "val/loss",
        mode = "min",
        dirpath = cfg.output_dir + "/" + cfg.name,
        filename =  "ckpt_{epoch:04d}-{val/loss:.5f}-{val/psnr:.2f}",
        auto_insert_metric_name = False,
        save_last = True
    )


    logger = TensorBoardLogger(
        save_dir = cfg.output_dir, 
        name = cfg.name
    )

    trainer = L.Trainer(
        max_epochs = cfg.num_epochs,
        fast_dev_run = cfg.fast_dev_run,

        logger = logger,
        devices = cfg.device,
        callbacks = [checkpoint_callback]
    )

    if cfg.resume_ckpt_path is not None:
        trainer.fit(
            model = model_module,
            train_dataloaders = train_dataloader,
            val_dataloaders = val_dataloader,
            ckpt_path = cfg.resume_ckpt_path
        )
        
    else:
        trainer.fit(
            model = model_module,
            train_dataloaders = train_dataloader,
            val_dataloaders = val_dataloader,
        )