import torch
import torch.nn.functional as F
from torch import nn as nn
import numpy as np
import math

from basicsr.utils.registry import ARCH_REGISTRY

from .network_swinir import RSTB
from .fema_utils import ResBlock, CombineQuantBlock 
from .vgg_arch import VGGFeatureExtractor
from .PSF import *
from .femasr_arch import *
from .main_decoder import DBCA

@ARCH_REGISTRY.register()
class DualCodeBookNet(nn.Module):
    def __init__(self,
                 *,
                 in_channel=3,
                 codebook_params=None, 
                 gt_resolution=256,
                 LQ_stage=False,
                 norm_type='gn',
                 act_type='silu',
                 use_quantize=True,
                 scale_factor=4,
                 use_semantic_loss=False,
                 use_residual=False,
                 entropy_loss_ratio=1.0,
                 show_codebook_usage=False,
                 former=False,
                 quantize_way=None,
                 short_cut=False,
                 **ignore_kwargs):
        super().__init__()
        codebook_params = np.array(codebook_params)

        self.codebook_scale = codebook_params[:, 0]# 码本对应的分辨率
        codebook_emb_num = codebook_params[:, 1].astype(int) #码本中嵌入向量的数量
        codebook_emb_dim = codebook_params[:, 2].astype(int) #码本中嵌入向量的维度

        self.use_quantize = use_quantize
        self.in_channel = in_channel
        self.gt_res = gt_resolution
        self.LQ_stage = LQ_stage
        self.scale_factor = scale_factor if LQ_stage else 1
        self.use_residual = use_residual
        self.use_semantic_loss = use_semantic_loss
        self.entropy_loss_ratio = entropy_loss_ratio
        self.show_codebook_usage = show_codebook_usage
        self.former = former
        self.quantize_way = quantize_way
        self.short_cut = short_cut
        assert quantize_way in ['min', 'attention'], f'quantize_way must be "min" or "ema", but got {quantize_way}'

        # 分辨率-通道数”查询表
        channel_query_dict = {
            8: 256,
            16: 256,
            32: codebook_emb_dim[0]//2,# 256
            64: 256,
            128: 128,
            256: 64,
            512: 32,
        }

        # build encoder 
        self.max_depth = int(np.log2(gt_resolution // self.codebook_scale[0])) 
        # 从输出/输入分辨率（gt_resolution）到码本量化的分辨率codebook_scale[0]（即瓶颈分辨率）的最大下采样深度，
        # 取log2值是因为每次下采样分辨率减半
        encode_depth = int(np.log2(gt_resolution // self.scale_factor // self.codebook_scale[0]))
        # gt_resolution // self.scale_factor 是输入图像的分辨率
            
        if self.LQ_stage == False:
            self.multiscale_encoder_clear = MultiScaleEncoder(
                                in_channel,     
                                encode_depth,  
                                self.gt_res // self.scale_factor, 
                                channel_query_dict,
                                norm_type, act_type, LQ_stage
                            )
            self.decoder_group_hazy = nn.ModuleList()
        else:
            self.swin_layers = LRFormerLayers(embed_dim=256)
            self.residual_shortcut = nn.ModuleList()
            res = 32
            for i in range(2):
                in_channel, out_channel = channel_query_dict[res], channel_query_dict[res * 2]
                self.residual_shortcut.append(nn.Sequential(
                    nn.Upsample(scale_factor=2),
                    nn.Conv2d(in_channel, out_channel, 3, stride=1, padding=1),
                    ResBlock(out_channel, out_channel, norm_type, act_type),
                    ResBlock(out_channel, out_channel, norm_type, act_type),
                    )
                )
                res = res * 2

        self.multiscale_encoder_hazy = MultiScaleEncoder(
                                3,     
                                encode_depth,  
                                self.gt_res // self.scale_factor, 
                                channel_query_dict,
                                norm_type, act_type, LQ_stage
                            )
        self.decoder_group_clear = nn.ModuleList()


        for i in range(self.max_depth):
            res = gt_resolution // 2**self.max_depth * 2**i
            in_ch, out_ch = channel_query_dict[res], channel_query_dict[res * 2]
            if self.LQ_stage == False:
                self.decoder_group_hazy.append(DecoderBlock(in_ch, out_ch, norm_type, act_type))
                self.decoder_group_clear.append(DecoderBlock(in_ch, out_ch, norm_type, act_type)) 
                self.out_conv_clear = nn.Conv2d(out_ch, 3, 3, 1, 1)
                self.out_conv_hazy = nn.Conv2d(out_ch, 3, 3, 1, 1)
            else:
                self.decoder_group_clear.append(DecoderBlock(in_ch, out_ch, norm_type, act_type)) 
                self.out_conv_clear = nn.Conv2d(out_ch, 3, 3, 1, 1)

        # build multi-scale vector quantizers 
        self.quantize_group = nn.ModuleList()
        # 遍历每个码本尺度
        for scale in range(0, codebook_params.shape[0]):
            quantize = VectorQuantizer(
                codebook_emb_num[scale],
                codebook_emb_dim[scale],
                LQ_stage=self.LQ_stage,
                entropy_loss_ratio=self.entropy_loss_ratio,
                quantize_way=self.quantize_way,
                show_usage=self.show_codebook_usage,
            )
            self.quantize_group.append(quantize)
       
        if use_semantic_loss:
            self.conv_semantic = nn.Sequential( # 
                nn.Conv2d(codebook_emb_dim[0], 1024, 1, 1, 0), # 512, 512
                nn.ReLU(),
                )
            self.vgg_feat_layer = 'relu4_4'
            self.vgg_feat_extractor = VGGFeatureExtractor([self.vgg_feat_layer])    
        
        self.psfm = nn.ModuleList([
            DBCA(256, num_heads=4, deformable_groups=4),
            DBCA(128, num_heads=4, deformable_groups=4),
            # SDFM(in_C=256, out_C=256),
            # SDFM(in_C=128, out_C=128),
        ])
        

    def encode_and_decode_refactored(self, hazy_input, clear_input, current_iter=None):
        """
        Refactored version of encode_and_decode to explicitly handle quantization 
        at the deepest level without a conditional loop.
        """
        # ------------------ 共同的准备阶段 ------------------
        enc_feats_hazy = self.multiscale_encoder_hazy(hazy_input.detach())
        if self.LQ_stage == False:
            enc_feats_clear = self.multiscale_encoder_clear(clear_input.detach())

        if self.LQ_stage:
            enc_feats_hazy = enc_feats_hazy[::-1]
        else: 
            # 在HQ阶段，encoder输出是[shallow, middle, deepest]，需要反转
            enc_feats_hazy = enc_feats_hazy[::-1]
            enc_feats_clear = enc_feats_clear[::-1]
            
        if self.use_semantic_loss:
            with torch.no_grad():
                if self.LQ_stage == False:
                    vgg_feat_clear = self.vgg_feat_extractor(clear_input)[self.vgg_feat_layer]
                vgg_feat_hazy = self.vgg_feat_extractor(hazy_input)[self.vgg_feat_layer]
        

        # 初始化返回值列表
        codebook_loss_list = []
        semantic_loss_list = []
        # ------------------ 1. 在最深层进行量化 (替代了原循环中 i=0 的 if 分支) ------------------
        # 获取最深层的特征
        deepest_feat_hazy = enc_feats_hazy[0]
        if self.LQ_stage == False:
            deepest_feat_clear = enc_feats_clear[0]
            deepest_feat = torch.cat([deepest_feat_hazy, deepest_feat_clear], dim=1)
        else:
            deepest_feat = deepest_feat_hazy

        if self.LQ_stage and self.former:
            # 添加维度检查和填充
            B, C, H, W = deepest_feat.shape
            window_size = 8  # 与SwinLayers中的window_size一致

            # 检查并调整维度使其能被window_size整除
            if H % window_size != 0 or W % window_size != 0:
                # 计算需要填充的尺寸
                new_H = ((H + window_size - 1) // window_size) * window_size
                new_W = ((W + window_size - 1) // window_size) * window_size
                deepest_feat = F.pad(deepest_feat, (0, new_W - W, 0, new_H - H), mode='reflect')

            deepest_feat = self.swin_layers(deepest_feat)

            # 如果进行了填充，现在裁剪回原始尺寸
            if H % window_size != 0 or W % window_size != 0:
                deepest_feat = deepest_feat[:, :, :H, :W]

        
        quant_idx = 0
        z_quant, codebook_loss, codebook_usage, (d_hazy_norm, d_clear_norm) = self.quantize_group[quant_idx](deepest_feat)
        codebook_loss_list.append(codebook_loss)
        
        if self.use_semantic_loss:
            semantic_z_quant = self.conv_semantic(z_quant)
            if self.LQ_stage == False:
                semantic_z_quant_hazy, semantic_z_quant_clear = semantic_z_quant.chunk(2, dim=1) #dim=1
                semantic_loss = F.mse_loss(semantic_z_quant_hazy, vgg_feat_hazy) + F.mse_loss(semantic_z_quant_clear, vgg_feat_clear)
                semantic_loss_list.append(semantic_loss)
            else:   
                semantic_loss = F.mse_loss(semantic_z_quant, vgg_feat_hazy)
                semantic_loss_list.append(semantic_loss)
        
        
        if self.LQ_stage == False:
            deepest_feat_hazy, deepest_feat_clear = z_quant.chunk(2, dim=1)
            x_hazy = self.decoder_group_hazy[0](deepest_feat_hazy)
            x_clear = self.decoder_group_clear[0](deepest_feat_clear)
        else:   
            x_dehaze = self.decoder_group_clear[0](z_quant)
        
        res_shortcurt = deepest_feat
        # 然后，循环处理剩下的解码器块
        for i in range(1, self.max_depth):
            if self.LQ_stage and self.use_residual: # HQ stage 不使用跳跃连接,强迫解码器将所有重建所需的纹理、细节、颜色等信息
                # x_dehaze += enc_feats_hazy[i]
                x_dehaze = self.psfm[i-1](x_dehaze, enc_feats_hazy[i])
            if self.LQ_stage and self.short_cut:
                res_shortcurt = self.residual_shortcut[i-1](res_shortcurt)
                x_dehaze = res_shortcurt + x_dehaze
            # 通过当前深度的解码器块进行上采样
            if self.LQ_stage == False:
                x_hazy = self.decoder_group_hazy[i](x_hazy)
                x_clear = self.decoder_group_clear[i](x_clear)
            else:
                x_dehaze = self.decoder_group_clear[i](x_dehaze)
        # ------------------ 3. 输出最终图像 ------------------

        if self.LQ_stage == False:
            out_img_hazy = self.out_conv_hazy(x_hazy)
            out_img_clear = self.out_conv_clear(x_clear)
        else:
            out_img_clear = self.out_conv_clear(x_dehaze)
            out_img_hazy = None

        codebook_loss = sum(codebook_loss_list)
        semantic_loss = sum(semantic_loss_list) if len(semantic_loss_list) else codebook_loss * 0

        return out_img_hazy, out_img_clear, codebook_loss, semantic_loss, codebook_usage, (d_hazy_norm, d_clear_norm)

    def decode_indices(self, indices):
        assert len(indices.shape) == 4, f'shape of indices must be (b, 1, h, w), but got {indices.shape}'

        z_quant = self.quantize_group[0].get_codebook_entry(indices)
        z_quant_hazy, z_quant_clear = torch.chunk(z_quant, chunks=2, dim=1)
        for m in self.decoder_group_hazy:
            z_quant_hazy = m(z_quant_hazy)
        for m in self.decoder_group_clear:
            z_quant_clear = m(z_quant_clear)
        out_img_hazy = self.out_conv_hazy(z_quant_hazy)
        out_img_clear = self.out_conv_clear(z_quant_clear)
        return out_img_hazy, out_img_clear

    @torch.no_grad()
    def test_tile(self, input, tile_size=240, tile_pad=16):
        # return self.test(input)
        """It will first crop input images to tiles, and then process each tile.
        Finally, all the processed tiles are merged into one images.
        Modified from: https://github.com/xinntao/Real-ESRGAN/blob/master/realesrgan/utils.py
        """
        batch, channel, height, width = input.shape
        output_height = height * self.scale_factor
        output_width = width * self.scale_factor
        output_shape = (batch, channel, output_height, output_width)

        # start with black image
        output = input.new_zeros(output_shape)
        tiles_x = math.ceil(width / tile_size)
        tiles_y = math.ceil(height / tile_size)

        # loop over all tiles
        for y in range(tiles_y):
            for x in range(tiles_x):
                # extract tile from input image
                ofs_x = x * tile_size
                ofs_y = y * tile_size
                # input tile area on total image
                input_start_x = ofs_x
                input_end_x = min(ofs_x + tile_size, width)
                input_start_y = ofs_y
                input_end_y = min(ofs_y + tile_size, height)

                # input tile area on total image with padding
                input_start_x_pad = max(input_start_x - tile_pad, 0)
                input_end_x_pad = min(input_end_x + tile_pad, width)
                input_start_y_pad = max(input_start_y - tile_pad, 0)
                input_end_y_pad = min(input_end_y + tile_pad, height)

                # input tile dimensions
                input_tile_width = input_end_x - input_start_x
                input_tile_height = input_end_y - input_start_y
                tile_idx = y * tiles_x + x + 1
                input_tile = input[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]

                # upscale tile
                output_tile = self.test(input_tile)

                # output tile area on total image
                output_start_x = input_start_x * self.scale_factor
                output_end_x = input_end_x * self.scale_factor
                output_start_y = input_start_y * self.scale_factor
                output_end_y = input_end_y * self.scale_factor

                # output tile area without padding
                output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale_factor
                output_end_x_tile = output_start_x_tile + input_tile_width * self.scale_factor
                output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale_factor
                output_end_y_tile = output_start_y_tile + input_tile_height * self.scale_factor

                # put tile into output image
                output[:, :, output_start_y:output_end_y,
                       output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
                                                                  output_start_x_tile:output_end_x_tile]
        return output

    @torch.no_grad()
    def test(self, hazy_input, clear_input):
        """
        A dedicated test function for inference.
        """
        org_use_semantic_loss = self.use_semantic_loss
        self.use_semantic_loss = False
        # Note: Padding logic might need adjustment based on your specific model architecture details.
        # Here we assume a simple forward pass is sufficient for debugging.
        wsz = 8  # A reasonable default for padding
        _, _, h_old, w_old = hazy_input.shape
        h_pad = (h_old // wsz + 1) * wsz - h_old
        w_pad = (w_old // wsz + 1) * wsz - w_old
        
        hazy_input = F.pad(hazy_input, (0, w_pad, 0, h_pad), 'reflect')
        clear_input = F.pad(clear_input, (0, w_pad, 0, h_pad), 'reflect')
        
        dec, _, _, _ = self.forward(hazy_input, clear_input) # Use the corrected forward method
        
        output = dec
        output = output[..., :h_old, :w_old] # Crop back to original size
        self.use_semantic_loss = org_use_semantic_loss
        return output

    def forward(self, hazy_input, clear_input):
        
            # This is the path for HQ stage testing
        out_img_hazy, out_img_clear, codebook_loss, semantic_loss, codebook_usage, (d_hazy_norm, d_clear_norm) = self.encode_and_decode_refactored(hazy_input, clear_input)
        return out_img_hazy, out_img_clear, codebook_loss, semantic_loss, codebook_usage, (d_hazy_norm, d_clear_norm)