import io
import logging
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
from typing import Callable, Dict, List, Optional, Tuple
from jpeg_proxy import pad_spatially_to_multiple_of_bsize,JpegProxy,calculate_psnr
from torchvision import transforms
import cv2

def calculate_entropy(coeffs: torch.Tensor) -> float:
    """计算量化后的DCT系数的熵。"""
    coeffs_flat = coeffs.view(-1).cpu().numpy()
    freqs = np.bincount(np.abs(coeffs_flat).astype(np.int64))
    probs = freqs / np.sum(freqs)
    entropy = -np.sum(probs * np.log2(probs + 1e-9))
    return entropy
def _encode_decode_with_jpeg(
    input_images: np.ndarray,
    qstep: np.float32,
    one_channel_at_a_time: bool = False,
    use_420: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
    """Compress-decompress with actual jpeg with fixed qstep.

    Args:
        input_images: Array of shape [b, h, w, c] where b is batch size, h x w is
          the image size, and c is the number of channels.
        qstep: float that determines the step-size of the scalar quantizer.
        one_channel_at_a_time: True if each channel should be encoded independently
          as a grayscale image.
        use_420: True when desired subsampling is 4:2:0. False when 4:4:4.

    Returns:
        decoded: Array of same size as input_images containing the
          quantized-dequantized version of the input_images.
        rate: Array of size b that contains the total number of bits needed to
          encode the input_images into decoded.
    """
    assert input_images.ndim == 4
    decoded = np.zeros_like(input_images)
    rate = np.zeros(input_images.shape[0])
    # JPEG needs byte qsteps
    jpeg_qstep = np.clip(np.rint(qstep).astype(int), 0, 255)
    qtable = [jpeg_qstep] * 64

    def run_jpeg(input_image: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        img = Image.fromarray(
            np.rint(np.clip(input_image, 0, 255)).astype(np.uint8))
        buf = io.BytesIO()
        img.save(
            buf,
            format='jpeg',
            quality=100,
            optimize=True,
            qtables=[qtable, qtable, qtable],
            subsampling='4:2:0' if use_420 else '4:4:4',
        )
        decoded = np.array(Image.open(buf))
        rate = np.array(8 * len(buf.getbuffer()))
        return decoded, rate

    for index in range(input_images.shape[0]):
        if not one_channel_at_a_time:
            decoded[index], rate[index] = run_jpeg(input_images[index])
        else:
            # Run each channel separately through jpeg as a grayscale image
            # (Image.mode = 'L'.) Useful when RGB <-> YUV conversions need to be
            # skipped.
            for channel in range(input_images.shape[-1]):
                decoded_img, channel_rate = run_jpeg(input_images[index, ..., channel])
                decoded[index, ..., channel] = decoded_img
                rate[index] += channel_rate

    return decoded.astype(np.float32), rate.astype(np.float32)

def convert_420_to_444(
    inputs: torch.Tensor,
    method: str = 'bilinear',
) -> torch.Tensor:
    """Converts a YUV420 tensor to YUV444.

    Args:
        inputs: Tensor of size [b, c, h, w] or [b, f, c, h, w].
        method: Desired chroma resizing method.

    Returns:
        outputs: Tensor of the same size as inputs where UV channels have been
        upsampled.
    """
    if inputs.dim() == 4:
        Y = inputs[:, 0:1, :, :]
        UV = inputs[:, 1:, ::2, ::2]
        UV_upsampled = F.interpolate(UV, size=(inputs.size(2), inputs.size(3)), mode=method, align_corners=False)
        outputs = torch.cat([Y, UV_upsampled], dim=1)
        return outputs
    elif inputs.dim() == 5:
        b, f, c, h, w = inputs.shape
        Y = inputs[:, :, 0:1, :, :]
        UV = inputs[:, :, 1:, ::2, ::2]
        UV = UV.reshape(-1, c-1, h//2, w//2)
        UV_upsampled = F.interpolate(UV, size=(h, w), mode=method, align_corners=False)
        UV_upsampled = UV_upsampled.reshape(b, f, c-1, h, w)
        outputs = torch.cat([Y, UV_upsampled], dim=2)
        return outputs
    else:
        raise ValueError('inputs must have rank 4 or 5.')

def convert_444_to_420(
    inputs: torch.Tensor,
    method: str = 'bilinear',
) -> torch.Tensor:
    """Converts a 444 tensor to 420 by downsampling the chroma channels.

    Args:
        inputs: Tensor of size [b, c, h, w] or [b, f, c, h, w].
        method: Desired chroma resizing method.

    Returns:
        outputs: Tensor of the same size as inputs where chroma channels have been
        downsampled.
    """
    if inputs.dim() == 4:
        Y = inputs[:, 0:1, :, :]
        UV = inputs[:, 1:, :, :]
        new_size = (UV.shape[2] // 2, UV.shape[3] // 2)
        UV_downsampled = F.interpolate(UV, size=new_size, mode=method, align_corners=False)
        # Pad UV_downsampled to original size with zeros
        pad_h = inputs.size(2) - UV_downsampled.size(2)
        pad_w = inputs.size(3) - UV_downsampled.size(3)
        UV_padded = F.pad(UV_downsampled, (0, pad_w, 0, pad_h), mode='constant', value=0)
        outputs = torch.cat([Y, UV_padded], dim=1)
        return outputs
    elif inputs.dim() == 5:
        b, f, c, h, w = inputs.shape
        Y = inputs[:, :, 0:1, :, :]
        UV = inputs[:, :, 1:, :, :]
        UV = UV.reshape(-1, c-1, h, w)
        new_size = (h // 2, w // 2)
        UV_downsampled = F.interpolate(UV, size=new_size, mode=method, align_corners=False)
        pad_h = h - UV_downsampled.size(-2)
        pad_w = w - UV_downsampled.size(-1)
        UV_padded = F.pad(UV_downsampled, (0, pad_w, 0, pad_h), mode='constant', value=0)
        UV_padded = UV_padded.reshape(b, f, c-1, h, w)
        outputs = torch.cat([Y, UV_padded], dim=2)
        return outputs
    else:
        raise ValueError('inputs must have rank 4 or 5.')

class EncodeDecodeIntra(torch.nn.Module):
    """A class with methods for basic intra compression emulation."""

    def __init__(
        self,
        rounding_fn: Callable[[torch.Tensor], torch.Tensor] = torch.round,
        device:torch.device = torch.device('cuda'),
        use_jpeg_rate_model: bool = True,
        qstep_init: float = 1.0,
        train_qstep: bool = True,
        min_qstep: float = 0.0,
        jpeg_clip_to_image_max: bool = True,
        convert_to_yuv: bool = False,
        downsample_chroma: bool = False,
        
    ):
        """Constructor.

        Args:
          rounding_fn: Callable that is used to round transform coefficients for
            JPEG during quantization.
          use_jpeg_rate_model: True for JPEG-specific rate model, False for
            Gaussian-distribution-based rate model.
          qstep_init: float that determines initial value for the step-size of the
            scalar quantizer.
          train_qstep: Whether qstep should be trained.
          min_qstep: Minimum value which qstep should be greater than.
          jpeg_clip_to_image_max: True if jpeg proxy should clip the final output to
            [0, image_max].
          convert_to_yuv: True if color conversion should be applied during
            compression.
          downsample_chroma: Whether chroma planes should be downsampled during
            compression.
        """
        super().__init__()
        self.train_qstep = train_qstep
        if self.train_qstep:
            self.qstep = torch.nn.Parameter(torch.tensor(qstep_init, dtype=torch.float32))
        else:
            self.qstep = torch.tensor(qstep_init, dtype=torch.float32)
        self.min_qstep = min_qstep

        self.clip_to_image_max = jpeg_clip_to_image_max
        self._rounding_fn = rounding_fn

        self.use_jpeg_rate_model = use_jpeg_rate_model
        self.run_jpeg_one_channel_at_a_time = not convert_to_yuv
        self.run_jpeg_with_downsampled_chroma = downsample_chroma

        logging.info(
            'EncodeDecodeIntra configured with %s',
            'jpeg-rate' if use_jpeg_rate_model else 'gaussian-rate',
        )

        logging.info(
            'EncodeDecodeIntra running %s',
            '420' if downsample_chroma else '444',
        )

        logging.info(
            'EncodeDecodeIntra yuv conversion is %s',
            'on' if convert_to_yuv else 'off',
        )
        self.device = device
        self._init_jpeg_layer(convert_to_yuv, downsample_chroma,device)
        Image.init()

    def _positive_qstep(self):
        return F.elu(self.qstep, alpha=0.01) + self.min_qstep

    def get_qstep(self) -> torch.Tensor:
        return self._positive_qstep()

    def _quantizer_fn(self, x: torch.Tensor) -> torch.Tensor:
        """Implements quantize-dequantize with the trainable qstep."""
        positive_qstep = self._positive_qstep()
        return self._rounding_fn(x / positive_qstep) * positive_qstep

    def _init_jpeg_layer(self, convert_to_yuv: bool, downsample_chroma: bool,device):
        # quantization_table = np.full((8, 8), 1.0, dtype=np.float32)
        compression_factor = 1  # 可以尝试不同的值，如2、5、10等
        luma_quantization_table = torch.tensor([
                [16, 11, 10, 16, 24, 40, 51, 61],
                [12, 12, 14, 19, 26, 58, 60, 55],
                [14, 13, 16, 24, 40, 57, 69, 56],
                [14, 17, 22, 29, 51, 87, 80, 62],
                [18, 22, 37, 56, 68, 109, 103, 77],
                [24, 35, 55, 64, 81, 104, 113, 92],
                [49, 64, 78, 87, 103, 121, 120, 101],
                [72, 92, 95, 98, 112, 100, 103, 99]
            ], dtype=torch.float32).to(self.device)

        chroma_quantization_table = torch.tensor([
            [17, 18, 24, 47, 99, 99, 99, 99],
            [18, 21, 26, 66, 99, 99, 99, 99],
            [24, 26, 56, 99, 99, 99, 99, 99],
            [47, 66, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99]
        ], dtype=torch.float32).to(self.device)
        luma_quantization_table *= compression_factor
        chroma_quantization_table *= compression_factor
        self._jpeg_layer = JpegProxy(
            downsample_chroma=downsample_chroma,
            luma_quantization_table=luma_quantization_table,
            chroma_quantization_table=chroma_quantization_table,
            convert_to_yuv=convert_to_yuv,
            clip_to_image_max=self.clip_to_image_max,
            device=device
        )

    def _rate_proxy_gaussian(self, inputs: torch.Tensor, axis: List[int]) -> torch.Tensor:
        """Calculates entropy assuming a Gaussian distribution and high-res quantization."""
        deviations = torch.std(inputs, dim=axis, keepdim=False)
        assert deviations.size(0) == inputs.size(0)
        positive_qstep = self._positive_qstep()
        hires_entropy = torch.relu(
            torch.log(deviations / positive_qstep + np.finfo(float).eps) +
            0.5 * np.log(2 * np.pi * np.exp(1))
        )
        batch_size = inputs.size(0)
        hires_entropy = hires_entropy.view(batch_size, -1)
        sum_entropies = torch.sum(hires_entropy, dim=1)
        num_samples = torch.prod(torch.tensor([inputs.size(dim) for dim in axis], dtype=torch.float32))
        rate = sum_entropies * num_samples / np.log(2)
        return rate

    def _rate_proxy_jpeg(self, three_channel_inputs: torch.Tensor, dequantized_dct_coeffs: Dict[str, torch.Tensor]) -> torch.Tensor:
        """Calculates a rate proxy based on a Jpeg-specific rate model."""
        def calculate_non_zeros(dct_coeffs: Dict[str, torch.Tensor], qstep: torch.Tensor) -> torch.Tensor:
            num_nonzeros = torch.zeros(three_channel_inputs.shape[0], device=three_channel_inputs.device)
            for k in dct_coeffs:
                quantized_coeffs = dct_coeffs[k] / qstep
                num_nonzeros += torch.sum(
                    torch.reshape(
                        torch.log(1 + torch.abs(quantized_coeffs)),
                        (three_channel_inputs.shape[0], -1)),
                    dim=1)
            return num_nonzeros.float()

        def encode_decode_inputs_with_jpeg():
            """Encodes then decodes the three_channel_inputs using actual jpeg."""
            if self.run_jpeg_one_channel_at_a_time:
                use_420 = False
            else:
                use_420 = self.run_jpeg_with_downsampled_chroma
            with torch.no_grad():
                jpeg_decoded, jpeg_rate = _encode_decode_with_jpeg(
                    three_channel_inputs.permute(0, 2, 3, 1).cpu().numpy(),
                    self._positive_qstep().item(),
                    self.run_jpeg_one_channel_at_a_time,
                    use_420,
                )
                jpeg_decoded = torch.from_numpy(jpeg_decoded).permute(0, 3, 1, 2).to(three_channel_inputs.device)
                jpeg_rate = torch.from_numpy(jpeg_rate).to(three_channel_inputs.device)
            return jpeg_decoded, jpeg_rate

        positive_qstep = self._positive_qstep()
        num_nonzero_dct_coeffs = calculate_non_zeros(dequantized_dct_coeffs, positive_qstep)
        _, jpeg_rate = encode_decode_inputs_with_jpeg()

        nonzero_times_rate = num_nonzero_dct_coeffs * jpeg_rate
        nonzero_times_nonzero = num_nonzero_dct_coeffs * num_nonzero_dct_coeffs

        line_weights = (nonzero_times_rate / (nonzero_times_nonzero + 1e-8)).detach()

        return num_nonzero_dct_coeffs * line_weights

    def is_codec_proxy_420(self):
        return self.run_jpeg_with_downsampled_chroma

    def _encode_decode_jpeg(self, inputs: torch.Tensor, image_max: float) -> Tuple[torch.Tensor, torch.Tensor]:
        """Encodes then decodes the input using JPEG."""
        if inputs.dim() != 4:
            raise ValueError('inputs must have rank 4.')
        if inputs.size(1) > 3:
            raise ValueError('jpeg layer can handle up to 3 channels.')

        pad_dim = 3 - inputs.size(1)
        if pad_dim > 0:
            paddings = (0, 0, 0, 0, 0, pad_dim)  # Pad channels dimension
            three_channel_inputs = F.pad(inputs, paddings, mode='constant', value=0)
        else:
            three_channel_inputs = inputs

        three_channel_inputs = self._rounding_fn(three_channel_inputs)

        dequantized_three_channels, dequantized_dct_coeffs = self._jpeg_layer(
            three_channel_inputs, self._quantizer_fn, image_max=image_max
        )

        if pad_dim > 0:
            dequantized = dequantized_three_channels[:, :inputs.size(1), :, :]
        else:
            dequantized = dequantized_three_channels

        def gaussian_rate():
            gauss_rate = torch.zeros(inputs.size(0), device=inputs.device)
            for k in dequantized_dct_coeffs:
                gauss_rate += self._rate_proxy_gaussian(
                    dequantized_dct_coeffs[k], axis=[1])
            return gauss_rate

        def jpeg_rate():
            conversion_to_420_needed = (
                self.run_jpeg_one_channel_at_a_time
                and self.run_jpeg_with_downsampled_chroma
            )
            if conversion_to_420_needed:
                rate_inputs = pad_spatially_to_multiple_of_bsize(
                    three_channel_inputs, bsize=2, mode='reflect'
                )
                rate_inputs = convert_444_to_420(rate_inputs)
            else:
                rate_inputs = three_channel_inputs

            scale = 255.0 / image_max
            return self._rate_proxy_jpeg(scale * rate_inputs, dequantized_dct_coeffs)

        if self.use_jpeg_rate_model:
            rate = jpeg_rate()
        else:
            rate = gaussian_rate()

        return dequantized, rate

    def forward(self, inputs: torch.Tensor, input_qstep: Optional[torch.Tensor] = None, image_max: float = 255.0) -> Tuple[torch.Tensor, torch.Tensor]:
        """Encodes then decodes the input."""
        if inputs.dim() != 4:
            raise ValueError('inputs must have rank 4.')

        if not self.train_qstep and input_qstep is not None:
            self.qstep = input_qstep

        def run_jpeg():
            if inputs.size(1) <= 3:
                return self._encode_decode_jpeg(inputs, image_max)

            c = inputs.size(1)
            limit = c
            size = 3
            begin = 0
            dequantized, rate = self._encode_decode_jpeg(
                inputs[:, begin:begin+size, :, :], image_max
            )

            for begin in range(3, limit, 3):
                size = min(limit - begin, 3)
                dequantized_loop, rate_loop = self._encode_decode_jpeg(
                    inputs[:, begin:begin+size, :, :], image_max
                )
                dequantized = torch.cat([dequantized, dequantized_loop], dim=1)
                rate += rate_loop
            return dequantized, rate

        return run_jpeg()
def test_encode_decode_intra(image_path):
    device = torch.device('cuda')
    # 加载图像
    image = Image.open(image_path).convert('RGB')
    transform = transforms.Compose([
        transforms.ToTensor(),  # 转换为 [0,1]
        transforms.Lambda(lambda x: x * 255.0)  # 缩放到 [0,255]
    ])
    image_tensor = transform(image).unsqueeze(0).to(device)  # 形状为 [1, 3, H, W]
    
    # 初始化模型
    model = EncodeDecodeIntra(
        rounding_fn=torch.round,
        use_jpeg_rate_model=True,
        qstep_init=1.0,
        train_qstep=False,
        min_qstep=1.0,
        jpeg_clip_to_image_max=True,
        convert_to_yuv=True,
        downsample_chroma=False,
    ).to(device)
    
    # 压缩和解压缩图像
    with torch.no_grad():
        decoded_image, rate = model(image_tensor)
    psnr = calculate_psnr(decoded_image,image_tensor)
    print(f'PSNR: {psnr:.2f} dB')
    
    # 计算熵（这里计算Y通道的DCT系数的熵）
    _, dequantized_dct_coeffs = model._jpeg_layer(
        image_tensor, model._quantizer_fn, image_max=255.0
    )
   
    entropy = calculate_entropy(dequantized_dct_coeffs['y'])
    print(f'Y通道DCT系数的熵: {entropy:.2f} bits')
    
    # 将张量转换回PIL图像以便可视化
    original_image = image_tensor.squeeze(0).permute(1, 2, 0).cpu().numpy() 
    decoded_image_np = decoded_image.squeeze(0).permute(1, 2, 0).cpu().numpy() 
    cv2.imwrite('output_new/original_image.png',original_image[:,:,::-1])
    cv2.imwrite('output_new/decoded_image.png',decoded_image_np[:,:,::-1])

def straight_through_estimator(x):
    """直通估计器，用于替代不可导的操作如 torch.round。"""
    return x.round() + (x - x.round())

def test_encode_decode_intra_backprop():
    # 创建模拟数据，随机生成一个批次的图像张量
    batch_size = 2
    channels = 3
    height = 64
    width = 64
    torch.manual_seed(0)
    input_tensor = torch.randn(batch_size, channels, height, width, requires_grad=True)
    
    # 创建目标张量（可以是随机的，也可以是某个函数的输出）
    target_tensor = torch.randn(batch_size, channels, height, width)
    
      # 将模型和数据移动到设备（CPU 或 GPU）
    device = torch.device( 'cuda')
    # 初始化模型，使用可微分的 rounding 函数（STE）
    model = EncodeDecodeIntra(
        rounding_fn=straight_through_estimator,  # 使用 STE 代替 torch.round
        device = device,
        use_jpeg_rate_model=True,
        qstep_init=10.0,
        train_qstep=True,  # 为了测试梯度，令 qstep 可训练
        min_qstep=1.0,
        jpeg_clip_to_image_max=True,
        convert_to_yuv=True,
        downsample_chroma=True,
       
    )
    
    model.to(device)
    input_tensor = input_tensor.to(device)
    target_tensor = target_tensor.to(device)
    
    # 定义优化器
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
    
    # 前向传播
    decoded_tensor, rate = model(input_tensor)
    
    # 定义损失函数（例如，MSE 损失 + lambda*rate）
    mse_loss = F.mse_loss(decoded_tensor, target_tensor)
    lambda_value = 0.01
    total_loss = mse_loss + lambda_value* rate.mean()  # 组合损失函数
    
    # 反向传播
    optimizer.zero_grad()
    total_loss.backward()
    
    # 检查梯度是否计算
    print("Gradients for input_tensor:", input_tensor.grad is not None)
    print("Gradients for model.qstep:", model.qstep.grad)
    
    # 执行一步优化（可选）
    optimizer.step()
    
    # 输出损失值
    print(f'Total loss: {total_loss.item():.4f}')
    print(f'MSE loss: {mse_loss.item():.4f}')
    print(f'Rate: {rate.mean().item():.4f}')
    
    # 检查参数是否更新（可选）
    print(f'Updated qstep: {model.get_qstep().item():.4f}')
if __name__=='__main__':
    img_p = 'example.png'
    # test_encode_decode_intra(img_p)

    test_encode_decode_intra_backprop()