import itertools
from typing import Callable, Dict, Tuple

import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
import matplotlib.pyplot as plt
from torchvision import transforms
import os
import math
from collections import Counter

def calculate_psnr(
    img1: torch.Tensor,
    img2: torch.Tensor,
    max_pixel_value: float = 255.0
) -> float:
    # 将图像裁剪到 [0, 255]，然后转换为 uint8
    img1_uint8 = img1.clamp(0, 255).round().to(torch.uint8)
    img2_uint8 = img2.clamp(0, 255).round().to(torch.uint8)
    
    # 转换回 float32 类型以计算 MSE
    img1_float = img1_uint8.to(torch.float32)
    img2_float = img2_uint8.to(torch.float32)
    
    # 计算 MSE
    mse = F.mse_loss(img1_float, img2_float)
    if mse == 0:
        return float('inf')
    psnr = 20 * torch.log10(max_pixel_value / torch.sqrt(mse))
    return psnr.item()
def pad_spatially_to_multiple_of_bsize(
    inputs: torch.Tensor, bsize: int, mode: str = 'reflect'
) -> torch.Tensor:
    height, width = inputs.shape[2:4]
    pad_height = ((height - 1) // bsize + 1) * bsize - height
    pad_width = ((width - 1) // bsize + 1) * bsize - width
    if pad_height or pad_width:
        padding = (0, pad_width, 0, pad_height)  # (left, right, top, bottom)
        return F.pad(inputs, padding, mode=mode)
    return inputs

class JpegProxy:
    """可微分的类似JPEG的层，用于模拟JPEG压缩。"""

    def __init__(
        self,
        downsample_chroma: bool,
        luma_quantization_table: torch.Tensor,
        chroma_quantization_table: torch.Tensor,
        convert_to_yuv: bool,
        clip_to_image_max: bool,
        dct_size: int = 8,
        upsample_method: str = 'bilinear',
        device = torch.device('cuda')
    ):
        self.downsample_chroma = downsample_chroma
        self.upsample_method = upsample_method
        self.luma_quantization_table = luma_quantization_table.reshape(-1)
        self.chroma_quantization_table = chroma_quantization_table.reshape(-1)
    
       
        self.device = device
        self.convert_to_yuv = convert_to_yuv
        self.clip_to_image_max = clip_to_image_max

        # 颜色转换矩阵
        self.rgb_from_yuv_matrix = torch.tensor(
            [[1.0, 1.0, 1.0],
             [0, -0.344136, 1.772],
             [1.402, -0.714136, 0]],
            dtype=torch.float32
        ).to(device)
        self.yuv_from_rgb_matrix = torch.tensor(
            [[0.299, -0.168736, 0.5],
             [0.587, -0.331264, -0.418688],
             [0.114, 0.5, -0.081312]],
            dtype=torch.float32
        ).to(device)
        self.dct_size = dct_size
        self.dct_2d_mat = self._construct_dct_2d(self.dct_size)

    def _rgb_to_yuv(self, rgb: torch.Tensor) -> torch.Tensor:
        rgb = rgb.permute(0, 2, 3, 1)  # [batch_size, height, width, channels]
        yuv = torch.matmul(rgb, self.yuv_from_rgb_matrix) + torch.tensor([0, 128, 128], dtype=torch.float32).to(self.device )
        yuv = yuv.permute(0, 3, 1, 2)  # [batch_size, channels, height, width]
        return yuv

    def _yuv_to_rgb(self, yuv: torch.Tensor) -> torch.Tensor:
        yuv = yuv.permute(0, 2, 3, 1)  # [batch_size, height, width, channels]
        rgb = torch.matmul(yuv - torch.tensor([0, 128, 128], dtype=torch.float32).to(self.device ), self.rgb_from_yuv_matrix)
        rgb = rgb.permute(0, 3, 1, 2)  # [batch_size, channels, height, width]
        return rgb

    def _construct_dct_2d(self, dct_size: int) -> torch.Tensor:
        """构建2D DCT基矩阵。"""
        dct_1d_mat = np.zeros((dct_size, dct_size), dtype=np.float32)
        for i, j in itertools.product(range(dct_size), repeat=2):
            dct_1d_mat[i, j] = np.cos((2 * i + 1) * j * np.pi / (2 * dct_size))

        # 单位范数缩放
        dct_1d_mat *= np.sqrt(2 / dct_size)
        dct_1d_mat[:, 0] *= 1 / np.sqrt(2)

        block_size = dct_size ** 2

        # 2D基，列为单位范数
        dct_2d_mat = np.zeros((block_size, block_size), dtype=np.float32)

        for i in range(block_size):
            dct_2d_mat[:, i] = np.reshape(
                np.outer(dct_1d_mat[:, i // dct_size], dct_1d_mat[:, i % dct_size]),
                [-1],
            )
        return torch.tensor(dct_2d_mat, dtype=torch.float32).to(self.device )

    def _forward_dct_2d(self, image_channel: torch.Tensor) -> torch.Tensor:
        """计算输入图像通道的2D DCT系数。"""
        batch_size, _, height, width = image_channel.shape
        dct_size = self.dct_size
        offset = 128.0

        # 提取图像块
        patches = image_channel.unfold(2, dct_size, dct_size).unfold(3, dct_size, dct_size)
        patches = patches.contiguous().view(batch_size, -1, dct_size * dct_size)
        # 减去偏移
        patches = patches - offset
        # 计算DCT系数
        coeffs = torch.matmul(patches, self.dct_2d_mat)
        return coeffs

    def _inverse_dct_2d(self, dct_coeffs: torch.Tensor, blocks_h: int, blocks_w: int) -> torch.Tensor:
        """根据DCT系数重建图像。"""
        offset = 128.0
        batch_size, num_blocks, _ = dct_coeffs.shape
        dct_size = self.dct_size

        # 计算逆DCT
        channel = torch.matmul(dct_coeffs, self.dct_2d_mat.t()) + offset
        # 重塑并排列以重建图像
        channel = channel.view(batch_size, blocks_h, blocks_w, dct_size, dct_size)
        channel = channel.permute(0, 1, 3, 2, 4).contiguous()
        channel = channel.view(batch_size, 1, blocks_h * dct_size, blocks_w * dct_size)
        return channel

    def __call__(
        self,
        image: torch.Tensor,
        rounding_fn: Callable[[torch.Tensor], torch.Tensor] = torch.round,
        image_max: float = 255.0,
    ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
        image = image.float()
        batch_size, channels, height, width = image.shape
        assert channels == 3

        # 填充到dct_size的倍数
        pad_multiple = 2 * self.dct_size if self.downsample_chroma else self.dct_size
        image = pad_spatially_to_multiple_of_bsize(image, pad_multiple, mode='reflect')
        _, _, padded_height, padded_width = image.shape

        # 编码和解码
        if self.convert_to_yuv:
            image = self._rgb_to_yuv(image)

        downsample = [False, self.downsample_chroma, self.downsample_chroma]
        dct_keys = ['y', 'u', 'v']
        decoded_channels = []
        quantized_dct_coeffs = {}
        for ch in range(3):
            channel = image[:, ch : ch + 1, :, :]
            if downsample[ch]:
                channel = F.interpolate(
                    channel, scale_factor=0.5, mode='bilinear', align_corners=False
                )

            # 前向DCT
            coeffs = self._forward_dct_2d(channel)

            # 量化
            quantization_table = (
                self.luma_quantization_table
                if ch == 0
                else self.chroma_quantization_table
            )
          
            quantized_coeffs = rounding_fn(coeffs / quantization_table)
            quantized_dct_coeffs[dct_keys[ch]] = quantized_coeffs

            # 反量化
            dequantized = quantized_coeffs * quantization_table

            # 逆DCT
            blocks_h = channel.shape[2] // self.dct_size
            blocks_w = channel.shape[3] // self.dct_size
            channel = self._inverse_dct_2d(dequantized, blocks_h, blocks_w)

            if downsample[ch]:
                channel = F.interpolate(
                    channel,
                    size=(padded_height, padded_width),
                    mode=self.upsample_method,
                    align_corners=False
                )
            decoded_channels.append(channel)

        # 合并通道
        decoded_image = torch.cat(decoded_channels, dim=1)

        # 转换回RGB
        if self.convert_to_yuv:
            decoded_image = self._yuv_to_rgb(decoded_image)

        # 裁剪到原始尺寸
        decoded_image = decoded_image[:, :, :height, :width]
        if self.clip_to_image_max:
            decoded_image = torch.clamp(decoded_image, 0.0, image_max)
        return decoded_image, quantized_dct_coeffs
def calculate_entropy(coeffs: torch.Tensor) -> float:
    """计算量化后的DCT系数的熵。"""
    coeffs_flat = coeffs.view(-1).cpu().numpy()
    freqs = Counter(coeffs_flat)
    # print('freqs',freqs)
    total = sum(freqs.values())
    entropy = -sum((count / total) * math.log2(count / total) for count in freqs.values() if count > 0)
    return entropy

def estimate_compressed_size(quantized_dct_coeffs: Dict[str, torch.Tensor]) -> float:
    """估计压缩后的大小（以比特为单位）。"""
    total_bits = 0
    for key, coeffs in quantized_dct_coeffs.items():
        entropy = calculate_entropy(coeffs)
        num_coeffs = coeffs.numel()
        total_bits += entropy * num_coeffs
    return total_bits


# 测试代码
if __name__ == "__main__":
    # 加载图像
    image_path = "/home/zhangyuantong/code/virtual_codec/sandwiched_compression-main/image_compression_torch/example.png"  # 替换为您的图像路径
    img = Image.open(image_path).convert('RGB')

    # 将图像转换为Tensor
    transform = transforms.Compose([
        transforms.ToTensor(),  # 将图像转换为 [0, 1] 的张量，尺寸为 [channels, height, width]
        transforms.Lambda(lambda x: x * 255.0)  # 还原到 [0, 255]
    ])
    image = transform(img)
    image = image.unsqueeze(0)  # 添加batch维度，尺寸为 [1, channels, height, width]

    # 定义量化表
    # luma_quantization_table = torch.ones(8, 8)
    # chroma_quantization_table = torch.ones(8, 8)

    luma_quantization_table = torch.tensor([
    [16, 11, 10, 16, 24, 40, 51, 61],
    [12, 12, 14, 19, 26, 58, 60, 55],
    [14, 13, 16, 24, 40, 57, 69, 56],
    [14, 17, 22, 29, 51, 87, 80, 62],
    [18, 22, 37, 56, 68, 109, 103, 77],
    [24, 35, 55, 64, 81, 104, 113, 92],
    [49, 64, 78, 87, 103, 121, 120, 101],
    [72, 92, 95, 98, 112, 100, 103, 99]
], dtype=torch.float32)

    chroma_quantization_table = torch.tensor([
    [17, 18, 24, 47, 99, 99, 99, 99],
    [18, 21, 26, 66, 99, 99, 99, 99],
    [24, 26, 56, 99, 99, 99, 99, 99],
    [47, 66, 99, 99, 99, 99, 99, 99],
    [99, 99, 99, 99, 99, 99, 99, 99],
    [99, 99, 99, 99, 99, 99, 99, 99],
    [99, 99, 99, 99, 99, 99, 99, 99],
    [99, 99, 99, 99, 99, 99, 99, 99]
], dtype=torch.float32)
    compression_factor = 2  # 可以尝试不同的值，如2、5、10等
    luma_quantization_table *= compression_factor
    chroma_quantization_table *= compression_factor
    # 创建JPEG代理
    jpeg_proxy = JpegProxy(
        downsample_chroma=False,
        luma_quantization_table=luma_quantization_table,
        chroma_quantization_table=chroma_quantization_table,
        convert_to_yuv=True,
        clip_to_image_max=True,
        dct_size=8,
        upsample_method='bilinear',
    )

    # ------------------------用现成的库进行jpeg保存--------------------------------
    # 执行JPEG压缩和解压缩
    # decoded_image, quantized_dct_coeffs = jpeg_proxy(image)
    # # print(image.max())
    # psnr_value = calculate_psnr(decoded_image.squeeze(0), image.squeeze(0))
    # print(f"压缩后的PSNR值: {psnr_value:.2f} dB")
    # # 将Tensor转换回PIL图像
    # decoded_image = decoded_image.squeeze(0)  # 尺寸为 [channels, height, width]
    # decoded_image = decoded_image.clamp(0, 255).permute(1, 2, 0).cpu().numpy().astype(np.uint8)  # [height, width, channels]
    # decoded_pil_image = Image.fromarray(decoded_image)
    # decoded_pil_image.save('output/decoded_image.png')  # 保存解码后的图像
    # file_size = os.path.getsize('output/decoded_image.png')
    # print(f"压缩后的文件大小: {file_size / 1024:.2f} KB")
    # # 原始图像
    # original_image = image.squeeze(0).clamp(0, 255).permute(1, 2, 0).cpu().numpy().astype(np.uint8)
    # original_pil_image = Image.fromarray(original_image)
    # original_pil_image.save('output/original_image.png')  # 保存原始图像

    # print("图像已保存：original_image.png 和 decoded_image.png")


    # ------------------------熵估计--------------------------------
      # 执行JPEG压缩和解压缩
    decoded_image, quantized_dct_coeffs = jpeg_proxy(image)

    # 计算PSNR
    original_image_tensor = image.squeeze(0)  # [channels, height, width]
    decoded_image_tensor = decoded_image.squeeze(0)  # [channels, height, width]
    psnr_value = calculate_psnr(original_image_tensor, decoded_image_tensor)
    print(f"压缩后的PSNR值: {psnr_value:.2f} dB")

    # 估计压缩后的大小
    compressed_size_bits = estimate_compressed_size(quantized_dct_coeffs)
    compressed_size_kb = compressed_size_bits / 8 / 1024  # 转换为KB
    print(f"估计的压缩后大小: {compressed_size_kb:.2f} KB")

