import torch
import torch.nn.functional as F
from torch import nn as nn
import numpy as np
import math

from basicsr.utils.registry import ARCH_REGISTRY

from .network_swinir import RSTB
from .fema_utils import ResBlock, CombineQuantBlock 
from .vgg_arch import VGGFeatureExtractor
from .lrformer import Block

class VectorQuantizer(nn.Module):
    def __init__(self, n_e, e_dim, beta=0.25, LQ_stage=False, show_usage=True, entropy_loss_ratio=None, quantize_way='min', l2_norm=True):
        super().__init__()
        self.n_e = int(n_e)
        self.e_dim = int(e_dim)
        self.LQ_stage = LQ_stage
        self.beta = beta 
        self.l2_norm = l2_norm
        self.embedding_hazy = nn.Embedding(self.n_e, self.e_dim//2)
        self.embedding_hazy.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
        self.embedding_clear = nn.Embedding(self.n_e, self.e_dim//2)
        self.embedding_clear.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
        if self.l2_norm:
            self.embedding_clear.weight.data = F.normalize(self.embedding_clear.weight.data, p=2, dim=-1)
            self.embedding_hazy.weight.data = F.normalize(self.embedding_hazy.weight.data, p=2, dim=-1)

        self.show_usage = show_usage
        if entropy_loss_ratio is not None:
            self.entropy_loss_ratio = entropy_loss_ratio
        else:
            self.entropy_loss_ratio = 1.0
        self.quantize_way = quantize_way
        if quantize_way == 'attention':
            self.temperature = 0.5
        if self.show_usage:
            self.register_buffer("codebook_used", torch.zeros(self.n_e, dtype=torch.long))

        self.temperature = torch.ones(1)
    
    def forward(self, z):
        """
        重构后的 forward 函数。
        """
        # 准备工作：调整维度
        d_hazy_norm = None
        d_clear_norm = None
        z = torch.einsum('b c h w -> b h w c', z).contiguous()  # (b, h, w, c)
        codebook_usage = None
        distance_loss = None
        if not self.LQ_stage:
            if self.quantize_way == 'min':
                # --- HQ 阶段 ---
                z_hazy, z_clear = torch.chunk(z, chunks=2, dim=-1)
                z_flattened = z.view(-1, self.e_dim)  # (b*h*w, c)
                z_flattened_hazy, z_flattened_clear = torch.chunk(z_flattened, chunks=2, dim=-1)
                
                if self.l2_norm:
                    z_flattened_hazy = F.normalize(z_flattened_hazy, p=2, dim=-1)
                    z_flattened_clear = F.normalize(z_flattened_clear, p=2, dim=-1)
                    z_flattened = torch.cat([z_flattened_hazy, z_flattened_clear], dim=-1)

                if self.l2_norm:
                    z_hazy = F.normalize(z_hazy, p=2, dim=-1)
                    z_clear = F.normalize(z_clear, p=2, dim=-1)
                    z = torch.cat([z_hazy, z_clear], dim=-1)

                    embedding_clear = F.normalize(self.embedding_clear.weight, p=2, dim=-1)
                    embedding_hazy = F.normalize(self.embedding_hazy.weight, p=2, dim=-1)

                d_hazy = torch.sum(z_flattened_hazy ** 2, dim=1, keepdim=True) + \
                    torch.sum(embedding_hazy**2, dim=1) - 2 * \
                    torch.einsum('bd,dn->bn', z_flattened_hazy, torch.einsum('n d -> d n', embedding_hazy))
                
                d_clear = torch.sum(z_flattened_clear ** 2, dim=1, keepdim=True) + \
                    torch.sum(embedding_clear**2, dim=1) - 2 * \
                    torch.einsum('bd,dn->bn', z_flattened_clear, torch.einsum('n d -> d n', embedding_clear))
                
                # 计算距离损失
                # lambda_dist = 1
                # distance_loss = F.mse_loss(d_hazy, d_clear) * lambda_dist
                
                # KL 散度损失
                temperature = 0.05
                p_hazy = F.softmax(-d_hazy / temperature, dim=-1)
                p_clear = F.softmax(-d_clear / temperature, dim=-1)
                distance_loss = F.kl_div(p_hazy.log(), p_clear.detach(), reduction='batchmean')


                d_hazy_norm = torch.mean(torch.sum(d_hazy**2, dim=-1))
                d_clear_norm = torch.mean(torch.sum(d_clear**2, dim=-1))

                d = d_hazy + d_clear

                #直接量化
                indices = torch.argmin(d, dim=1) # (b*h*w, 1)
                all_embeddings = torch.cat([self.embedding_hazy.weight, self.embedding_clear.weight], dim=-1)
                z_q = all_embeddings[indices].reshape(z.shape)
                
                # # 交叉量化
                # indices_hazy = torch.argmin(d_hazy, dim=1)  # (b*h*w, 1)
                # indices_clear = torch.argmin(d_clear, dim=1)  # (b*h*w, 1)
                # z_q_hazy = self.embedding_hazy(indices_clear).view(z_hazy.shape)
                # z_q_clear = self.embedding_clear(indices_hazy).view(z_clear.shape)
                # z_q = torch.cat([z_q_hazy, z_q_clear], dim=-1)
                
                if self.show_usage:
                    unique_indices = torch.unique(indices)
                    self.codebook_used.data[unique_indices] = 1.0

                # 计算损失
                e_latent_loss = torch.mean((z_q.detach() - z)**2) ## 使用未归一化前的 z_flattened，码本利用率最高，
                q_latent_loss = torch.mean((z_q - z.detach())**2) ## 使用归一化后的z_flattened_norm，码本利用率较低
                codebook_loss = q_latent_loss + e_latent_loss * self.beta + self.entropy_loss_ratio * compute_entropy_loss(-d) + distance_loss
                
            elif self.quantize_way == 'attention':
                z_flattened = z.view(-1, self.e_dim)  # (b*h*w, c)
                z_flattened_hazy, z_flattened_clear = torch.chunk(z_flattened, chunks=2, dim=-1)
                if self.l2_norm:
                    z_flattened_hazy = F.normalize(z_flattened_hazy, p=2, dim=-1)
                    z_flattened_clear = F.normalize(z_flattened_clear, p=2, dim=-1)
                    z_flattened = torch.cat([z_flattened_hazy, z_flattened_clear], dim=-1)

                if self.l2_norm:
                    z_hazy = F.normalize(z_hazy, p=2, dim=-1)
                    z_clear = F.normalize(z_clear, p=2, dim=-1)
                    z = torch.cat([z_hazy, z_clear], dim=-1)
                    
                    embedding_clear = F.normalize(self.embedding_clear.weight, p=2, dim=-1)
                    embedding_hazy = F.normalize(self.embedding_hazy.weight, p=2, dim=-1)
                    z_permuted = torch.cat([z_hazy, z_clear], dim=-1)

                attn_temper = 0.05
                kl_temper = 0.05
                attn_hazy = (z_flattened_hazy @ embedding_hazy.t())
                p_hazy = F.softmax(attn_hazy / kl_temper, dim=-1)  #
                attn_clear = (z_flattened_clear @ embedding_clear.t())
                p_clear = F.softmax(attn_clear / kl_temper, dim=-1)  #

                # KL 散度损失
                distance_loss = F.kl_div(p_hazy.log(), p_clear.detach(), reduction='batchmean')

                #直接量化
                attn_combined = attn_hazy + attn_clear
                attn = F.softmax(attn_combined / attn_temper, dim=-1) 
                z_q_hazy = attn @ embedding_hazy  # (h*w, n_e) (n_e, e_dim) -> (h*w, e_dim)
                z_q_clear = attn @ embedding_clear
                z_q = torch.cat([z_q_hazy, z_q_clear], dim=-1).view(z.shape)

                # 交叉量化
                # attn_hazy = F.softmax(attn_hazy / attn_temper, dim=-1)
                # attn_clear = F.softmax(attn_clear / attn_temper, dim=-1)
                # z_q_clear = attn_hazy @ embedding_clear  # (h*w, n_e) (n_e, e_dim) -> (h*w, e_dim)
                # z_q_hazy = attn_clear @ embedding_hazy
                # z_q = torch.cat([z_q_hazy, z_q_clear], dim=-1).view(z.shape)

                e_latent_loss = torch.mean((z_q.detach() - z)**2)
                q_latent_loss = torch.mean((z_q - z.detach())**2)
                codebook_loss = q_latent_loss + e_latent_loss * self.beta + \
                    self.entropy_loss_ratio * compute_entropy_loss(attn_combined) + distance_loss

        else:
            if self.quantize_way == 'min':
                z_flattened = z.view(-1, self.e_dim // 2)  # (b*h*w, c)
                z_flattened_hazy = z_flattened
                z_hazy = z

                if self.l2_norm:
                    z_flattened_hazy = F.normalize(z_flattened_hazy, p=2, dim=-1)
                    z_flattened = z_flattened_hazy

                if self.l2_norm:
                    z_hazy = F.normalize(z_hazy, p=2, dim=-1)
                    z = z_hazy

                    embedding_clear = F.normalize(self.embedding_clear.weight, p=2, dim=-1)
                    embedding_hazy = F.normalize(self.embedding_hazy.weight, p=2, dim=-1)

                d_hazy = torch.sum(z_flattened_hazy ** 2, dim=1, keepdim=True) + \
                    torch.sum(embedding_hazy**2, dim=1) - 2 * \
                    torch.einsum('bd,dn->bn', z_flattened_hazy, torch.einsum('n d -> d n', embedding_hazy))
                
                # 交叉量化
                indices_hazy = torch.argmin(d_hazy, dim=1)  # (b*h*w, 1)
                z_q_clear = self.embedding_clear(indices_hazy).view(z_hazy.shape)
                z_q = z_q_clear
        
                # 计算损失
                e_latent_loss = torch.mean((z_q.detach() - z)**2) ## 使用未归一化前的 z_flattened，码本利用率最高，
                q_latent_loss = torch.mean((z_q - z.detach())**2) ## 使用归一化后的z_flattened_norm，码本利用率较低
                codebook_loss = q_latent_loss + e_latent_loss * self.beta
    
            elif self.quantize_way == 'attention':
                z_view = z_permuted.view(b, h*w, -1) 
                hazy_code, clear_code = self.embedding.weight.chunk(chunks=2, dim=1)
                attn = (z_view @ hazy_code.t()) * self.temperature  # 计算注意力分数
                attn = F.softmax(attn, dim=-1)  # 计算注意力权重

                out = attn @ clear_code  # (h*w, n_e) (n_e, e_dim) -> (h*w, e_dim)
                z_q = out.view(b, h, w, -1)  
                e_latent_loss = torch.mean((z_q.detach() - z_permuted)**2)
                q_latent_loss = torch.mean((z_q - z_permuted.detach())**2)
                codebook_loss = q_latent_loss + e_latent_loss * self.beta

        # 梯度直通 (STE)
        z_q = z + (z_q - z).detach()
        # 恢复原始维度并返回
        z_q = torch.einsum('b h w c -> b c h w', z_q)

        if self.show_usage:
            codebook_usage = torch.sum(self.codebook_used) / self.n_e

        return z_q, codebook_loss, codebook_usage, (d_hazy_norm, d_clear_norm)
    
    def get_codebook_entry(self, indices):
        # b, _, h, w = indices.shape
        # z_q = self.embedding.weight[indices]
        # z_q = z_q.view(b, h, w, self.e_dim).permute(0, 3, 1, 2).contiguous()
        b, _, h, w = indices.shape
        hazy_code, clear_code = self.embedding_hazy.weight, self.embedding_clear.weight
        all_code = torch.cat([hazy_code, clear_code], dim=-1)
        z_q = all_code[indices]
        z_q = z_q.view(b, h, w, self.e_dim).permute(0, 3, 1, 2).contiguous()
        return z_q

    def reset_usage(self):
        """在每个 epoch 开始时调用，重置使用情况统计。"""
        if self.show_usage:
            self.codebook_used.fill_(0)

def compute_entropy_loss(affinity, loss_type="softmax", temperature=0.01):
    flat_affinity = affinity.reshape(-1, affinity.shape[-1])
    flat_affinity /= temperature
    probs = F.softmax(flat_affinity, dim=-1)
    log_probs = F.log_softmax(flat_affinity + 1e-5, dim=-1)
    if loss_type == "softmax":
        target_probs = probs
    else:
        raise ValueError("Entropy loss {} not supported".format(loss_type))
    
    avg_probs = torch.mean(target_probs, dim=0)
    avg_entropy = - torch.sum(avg_probs * torch.log(avg_probs + 1e-5))
    sample_entropy = - torch.mean(torch.sum(target_probs * log_probs, dim=-1))
    beta = 1
    loss = sample_entropy - beta * avg_entropy
    return loss

class SwinLayers(nn.Module):
    def __init__(self, input_resolution=(32, 32), embed_dim=256, 
                blk_depth=6,
                num_heads=8,
                window_size=8,
                **kwargs):
        super().__init__()
        self.swin_blks = nn.ModuleList()
        for i in range(4):
            layer = RSTB(embed_dim, input_resolution, blk_depth, num_heads, window_size, patch_size=1, **kwargs)
            self.swin_blks.append(layer)
    
    def forward(self, x):
        b, c, h, w = x.shape
        x = x.reshape(b, c, h*w).transpose(1, 2)
        for m in self.swin_blks:
            x = m(x, (h, w))
        x = x.transpose(1, 2).reshape(b, c, h, w) 
        return x
    
class LRFormerLayers(nn.Module):
    def __init__(self, embed_dim=256):
        super().__init__() # 必须调用
        
        # 定义参数
        dim = embed_dim
        num_blocks = 12
        num_heads = 2
        mlp_ratio = 4.0
        q_pooled_size = 16
        pooled_sizes = [11, 8, 6, 4]
        drop_path_rate = 0.1
        
        # 生成 dpr
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)]
        
        # 1. 先定义 d_convs (因为它是共享的，且 Block forward 需要它)
        self.d_convs = nn.ModuleList([
            nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1, groups=dim) 
            for _ in pooled_sizes
        ])

        # 2. 定义 Blocks
        self.blocks = nn.ModuleList([
            Block(
                dim=dim,
                num_heads=num_heads,
                mlp_ratio=mlp_ratio,
                qkv_bias=True,
                drop=0.0,
                attn_drop=0.0,
                drop_path=dpr[i],
                ls=False,
                pooled_sizes=pooled_sizes,
                q_pooled_size=q_pooled_size,
                q_conv=False # 注意参数名是 q_conv
            )
            for i in range(num_blocks)
        ])

    def forward(self, x):
        B, C, H, W = x.shape
        # (B, C, H, W) -> (B, N, C)
        x_flat = x.flatten(2).transpose(1, 2)
        
        # 必须遍历 ModuleList
        for blk in self.blocks:
            x_flat = blk(x_flat, H, W, d_convs=self.d_convs)
            
        # (B, N, C) -> (B, C, H, W)
        x_out = x_flat.transpose(1, 2).reshape(B, C, H, W)
        return x_out


class MultiScaleEncoder(nn.Module):
    def __init__(self,
                 in_channel,
                 max_depth,
                 input_res=256,
                 channel_query_dict=None,
                 norm_type='gn',
                 act_type='leakyrelu',
                 LQ_stage=True,
                 **swin_opts,
                 ):
        super().__init__()

        ksz = 3

        self.in_conv = nn.Conv2d(in_channel, channel_query_dict[input_res], 4, padding=1)
        self.LQ_stage = LQ_stage
        self.blocks = nn.ModuleList()
        self.up_blocks = nn.ModuleList()
        self.max_depth = max_depth
        res = input_res
        for i in range(max_depth):
            in_ch, out_ch = channel_query_dict[res], channel_query_dict[res // 2]
            tmp_down_block = [
                nn.Conv2d(in_ch, out_ch, ksz, stride=2, padding=1),
                ResBlock(out_ch, out_ch, norm_type, act_type),
                ResBlock(out_ch, out_ch, norm_type, act_type),
            ]
            self.blocks.append(nn.Sequential(*tmp_down_block))
            res = res // 2

    def forward(self, input):
        outputs = []
        x = self.in_conv(input)

        for idx, m in enumerate(self.blocks):
            x = m(x)
            outputs.append(x)

        return outputs


class DecoderBlock(nn.Module):

    def __init__(self, in_channel, out_channel, norm_type='gn', act_type='leakyrelu'):
        super().__init__()

        self.block = []
        self.block += [
            nn.Upsample(scale_factor=2),
            nn.Conv2d(in_channel, out_channel, 3, stride=1, padding=1),
            ResBlock(out_channel, out_channel, norm_type, act_type),
            ResBlock(out_channel, out_channel, norm_type, act_type),
        ]

        self.block = nn.Sequential(*self.block)

    def forward(self, input):
        return self.block(input)