import sys
sys.path.append("./")

import importlib
import argparse

import os
import torch

from compressai.zoo import cheng2020_anchor

from PIL import Image
import numpy as np
import math

from tqdm import tqdm

from util.metrics import compute_msssim, compute_psnr, mse2psnr
from util.io.ImageIO import img2torch, pad, crop
from util.io.VideoIO import YUV420VideoSequence, RawVideoSequence

from loguru import logger


class BaseTest:
    def intra_encode(self, x, **kwargs):
        raise NotImplementedError()
    
    def intra_eval(self, input, output):
        """
        return psnr, ms-ssim, bpp, extra
        """
        raise NotImplementedError()
    
    def inter_encode(self, input, **kwargs):
        """
        input: padded x_ref, x
        """
        raise NotImplementedError()
    
    def inter_eval(self, input, output, **kwargs):
        raise NotImplementedError()


    def data_preprocess(self, frame: torch.Tensor) -> torch.Tensor:
        raise NotImplementedError()
    
    def data_postprocess(self, frame: torch.Tensor) -> torch.Tensor:
        raise NotImplementedError()
    
    def on_intra_encode(self, idx, input, output, result):
        pass

    def on_inter_encode(self, idx, input, output, result):
        pass



class LVCTestNeural(BaseTest):
    def __init__(self, 
        seqs: list[RawVideoSequence], 
        intra_quality = 6, 
        gop = 12,
        frame_num = -1,
        device = "cuda"
    ):
        super().__init__()

        self.intra_model = cheng2020_anchor(quality = intra_quality, pretrained = True).eval().cuda()

        self.inter_model = None

        self.seqs = seqs
        self.gop = gop
        self.device = device

        self.bpp_ratio = None
        self.frame_num = frame_num

        self.dpb = {
            "__ref": None
        }


    def intra_encode(self, x, **kwargs):
        return self.intra_model(x)
    
    def intra_eval(self, input, output):
        x_unpadded = self.data_postprocess(input)
        x_hat_unpadded = self.data_postprocess(output["x_hat"])
        
        psnr = compute_psnr(x_unpadded, x_hat_unpadded)
        msssim = compute_msssim(x_unpadded, x_hat_unpadded)
        bpp = self._compute_intra_bpp(output) * self.bpp_ratio

        return {
            "x_hat": output["x_hat"],
            "psnr": psnr,
            "msssim": msssim,
            "bpp": bpp,
            "extra": None
        }
    

    def data_preprocess(self, frame: torch.Tensor) -> torch.Tensor:
        padded_frame = pad(frame, 64, mode = "replicate").to(self.device)
        
        if self.bpp_ratio is None:
            self._shape = (frame.shape[-2], frame.shape[-1])  # save for uncrop
            
            self.orig_pixnum = frame.shape[-2] * frame.shape[-1]
            self.pad_pixnum = padded_frame.shape[-2] * padded_frame.shape[-1]
            self.bpp_ratio = self.pad_pixnum / self.orig_pixnum
        
        return padded_frame
    
    def data_postprocess(self, frame: torch.Tensor) -> torch.Tensor:
        return crop(frame, self._shape)


    @torch.no_grad()
    def eval(self):
        result = {
            "psnr": 0,
            "msssim": 0,
            "bpp": 0
        }

        for seq in tqdm(self.seqs, desc = "sequence"):
            seq_result = {
                "psnr": 0,
                "msssim": 0,
                "bpp": 0,
            }

            if self.frame_num != -1:
                frame_to_encode = self.frame_num
            else:
                frame_to_encode = len(seq)

            for i in tqdm(range(frame_to_encode), desc = "frame", leave = False, disable = True):
                frame = seq[i].unsqueeze(0).to(self.device)
                padded_frame = self.data_preprocess(frame)

                if i % self.gop == 0:

                    out = self.intra_encode(padded_frame)
                    result = self.intra_eval(padded_frame, out)
                    self.on_intra_encode(i, padded_frame, out, result)

                    self.dpb["__ref"] = result["x_hat"]

                    logger.info(
                        f"{seq.file}, {i}/{frame_to_encode}, I, PSNR: {result['psnr']}, MS-SSIM: {result['msssim']}, BPP: {result['bpp']}"
                    )


                else:
                    out = self.inter_encode(padded_frame)
                    result = self.inter_eval(padded_frame, out)                    
                    self.on_inter_encode(i, padded_frame, out, result)

                    self.dpb["__ref"] = result["x_hat"]
                    
                    logger.info(
                        f"{seq.file}, {i}/{frame_to_encode}, P, PSNR: {result['psnr']}, MS-SSIM: {result['msssim']}, BPP: {result['bpp']}"
                    )


                for key in seq_result:
                    seq_result[key] += result[key]
                

            for key in result:
                seq_result[key] /= frame_to_encode
                result[key] += seq_result[key]
            
            logger.success(
                f"{seq.file}, PSNR: {seq_result['psnr']}, MS-SSIM: {seq_result['msssim']}, BPP: {seq_result['bpp']}"
            )
            
            # release
            seq.close()

        for key in result:
            result[key] /= len(self.seqs)

        logger.critical(
            f"PSNR: {result['psnr']}, MS-SSIM: {result['msssim']}, BPP: {result['bpp']}"
        )
        
        return result

    def _compute_intra_bpp(self, out_net):
        size = out_net['x_hat'].size()
        num_pixels = size[0] * size[2] * size[3]
        return sum(torch.log(likelihoods).sum() / (-math.log(2) * num_pixels)
                for likelihoods in out_net['likelihoods'].values()).item()
    


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", "--module", type = str, required = True)
    parser.add_argument("-t", "--tester", type = str, required = True, help = "tester class name")
    parser.add_argument("-ckpt", "--checkpoint", type = str, required = True)

    parser.add_argument("-seq", "--sequences", type = str, required = True)
    parser.add_argument("-sw", "--width", type = int, required = True)
    parser.add_argument("-sh", "--height", type = int, required = True)
    parser.add_argument("-gop", "--gop", type = int, required = True)
    parser.add_argument("-f", "--frame_num", type = int, default = -1)

    parser.add_argument("--device", type = str, default = "cuda")

    parser.add_argument("-iq", "--intra_quality", type = int, default = 6)
    parser.add_argument("-im", "--intra_model", type = str, default = "cheng_anchor")

    parser.add_argument("--log", type = str, default = None)

    parser.add_argument("-e", "--extra", type = str, help = "k1:v1,k2:v2", default = None)

    args = parser.parse_args()

    # extra params
    extra = {}

    if args.extra is not None:
        for pair in args.extra.split(","):
            k, v = pair.split(":")
            k, v = k.strip(), v.strip()
            extra[k] = v
    
    return args, extra



def get_test_class(module, classname):
    return getattr(importlib.import_module(module), classname)
  


    
if __name__ == "__main__":
    args, extra_args = parse_args()
    print(args, extra_args)

    TestClass: LVCTestNeural.__class__ = get_test_class(args.module, args.tester)

    seqs = YUV420VideoSequence.from_folder(
        args.sequences, args.width, args.height
    )

    if args.log is not None:
        logger.add(args.log)

    tester: LVCTestNeural = TestClass(
        seqs = seqs,
        intra_quality = args.intra_quality, 
        gop = args.gop,
        frame_num = args.frame_num, 
        device = args.device,

        ckpt = args.checkpoint,
        
        **extra_args
    )

    
    result = tester.eval()
    print(result)