# -*- coding: utf-8 -*-
"""
@Time: 2025/2/21 15:21
@Author: Xiao Chen
@File: metrics.py
"""
import time
from pytorch_msssim import ms_ssim
from PIL import Image
import numpy as np
from typing import Tuple, Union, Any
import torch
from torchvision import transforms
from torch import Tensor
from pathlib import Path
import lpips
from piq import ssim, DISTS
from ptflops import get_model_complexity_info
import warnings
import pandas as pd
warnings.simplefilter("ignore", UserWarning)

"""Functions included in this file:
1 compute_bpp -> bits per pixel                                         √
    input: file_path, height, width
    output: float
    
2 compute_psnr -> peak signal to noise ratio                            √
    input: img1, img2, max_val=255.0
    output: float

3 compute_runtime -> secs                                               √
    input:  function, *kargs, **kwargs
    output: None

4 compute_ms_ssim -> multi-scale structural similarity index measure    √
    input:  img1, img2, max_val=255.0
    output: float
    
5 compute_lpips -> perceptual loss                                      √
    input:  img1, img2, net=alex
    output: float
    
6 compute_dists
    input: img1, img2
    output: float
    
7 compute_ssim
    input: img1, img2
    output: float

8 compute_macs
    input: model, inpute_des, height, width
    output: float
    
"""


def data_preprocess(a: Image.Image or np.ndarray,
                    b: Image.Image or np.ndarray)\
        -> Tuple[Tensor, Tensor]:
    if isinstance(a, Image.Image):
        a = np.asarray(a)
    if isinstance(b, Image.Image):
        b = np.asarray(b)
    a = torch.from_numpy(a.copy()).float().unsqueeze(0)
    if a.size(3) == 3:
        a = a.permute(0, 3, 1, 2)
    b = torch.from_numpy(b.copy()).float().unsqueeze(0)
    if b.size(3) == 3:
        b = b.permute(0, 3, 1, 2)
    return a, b

def compute_msssim(a:Image.Image or np.ndarray, b: Image.Image or np.ndarray,
                   max_val: float=255.0) -> float:
        a, b = data_preprocess(a, b)
        return ms_ssim(a, b, data_range=max_val).item()


def compute_psnr(a: Union[np.ndarray, Image.Image],
                 b: Union[np.ndarray, Image.Image],
                 max_val: float = 255.0) -> float:
    a, b = data_preprocess(a, b)
    mse = torch.mean((a - b) ** 2).item()
    p = 20 * np.log10(max_val)  - 10 * np.log10(mse)
    return p


def compute_bpp(file_path: str,
                height: int,
                width: int) -> float:
    if not Path(file_path).is_file():
        raise f"Invalid file {file_path}"
    file_size = Path(file_path).stat().st_size
    return (file_size * 8) / (height * width)


def compute_runtime(func, *kargs, **kwargs) -> None:
    def test(func, *kargs, **kwargs):
        start = time.time()
        func(*kargs, **kwargs)
        end = time.time() - start
        print("Run function \033[1;31;48m %s \033[0m consumes %f secs" % (func.__name__, end))
        return
    test(func, *kargs, **kwargs)


def compute_lpips(a: Union[Image.Image, np.ndarray],
                  b: Union[Image.Image, np.ndarray],
                  net: str="alex") -> float:
    a, b = data_preprocess(a, b)
    # normalized to [-1, 1]
    a = transforms.Normalize(
        mean=[(a.max() + a.min()) / 2],
        std=[(a.max() - a.min()) / 2]
    )(a)
    b = transforms.Normalize(
        mean= [(b.max() + b.min()) / 2],
        std=[(b.max() - b.min()) / 2]
    )(b)
    loss_fn = lpips.LPIPS(net=net)
    return loss_fn(a, b).item()


def compute_dists(a: Union[Image.Image, np.ndarray],
                  b: Union[Image.Image, np.ndarray]) -> float:
    a, b = data_preprocess(a, b)
    dists = DISTS()
    return dists(a, b).item()


def compute_ssim(a: Union[Image.Image, np.ndarray],
                 b: Union[Image.Image, np.ndarray]) -> float:
    a, b = data_preprocess(a, b)
    return ssim(a, b, data_range=255.0).item()


def compute_macs(model: Any,
                 input_des: Tuple,
                 height: int,
                 width: int) -> float:
    macs, params = get_model_complexity_info(model, input_des, as_strings=False, print_per_layer_stat=False)
    return macs / (height * width)


def dict_to_xlsx(dictionary, xlsx_save_path):
    with pd.ExcelWriter(xlsx_save_path, engine="openpyxl") as writer:
        for quality, metrics in dictionary.items():
            df = pd.DataFrame(metrics)
            df.to_excel(writer, sheet_name=quality, index=False)
    print(f"Successfully save data to {xlsx_save_path}")


if __name__ == "__main__":
    img1_paths = [
        r"D:\video-communication-dataset\图像编解码测试\CLIC\professional_valid\zugr-108.png",
        r"D:\video-communication-dataset\图像编解码测试\CLIC\professional_valid\daniel-robert-405.png",
        # r"D:\video-communication-dataset\图像编解码测试\CLIC\professional_valid\andrew-ruiz-376.png"
                 ]
    for img1_path in img1_paths:      
        print(img1_path)     
        # img2_path = r"C:\Users\13541\Desktop\daniel-robert-405.png"
        a = Image.open(img1_path)
        # b = Image.open(img2_path)
        # bit_path = r"D:\competition\image-compress-sensing\MLIC-main\bits-降采样\img1.bits"
        # print("MS-SSIM: ", compute_msssim(a, b))
        # print("PSNR: ", compute_psnr(a, b))
        # print("压缩倍数: ", compute_bpp(img1_path, a.height, a.width) / compute_bpp(img2_path, a.height, a.width))
        print("压缩倍数: ", compute_bpp(img1_path, a.height, a.width))
        # print(compute_lpips(a, b))
        # print(compute_dists(a, b))
        # print("SSIM: ", compute_ssim(a, b))
        # compute_runtime(compute_lpips, a, b)
    pass
