import torch
import torch.nn as nn

class VectorQuantizer_fordubug(nn.Module):
    """
    A self-contained VectorQuantizer class.
    The forward logic is adapted to be a static-like method for easier debugging,
    accepting the codebook as an argument.
    """
    def __init__(self, n_e, e_dim, beta=0.25):
        super().__init__()
        self.n_e = int(n_e)
        self.e_dim = int(e_dim)
        self.beta = beta
        
        if self.e_dim % 2 != 0:
            raise ValueError("e_dim must be an even number for the hazy/clear split.")
            
        # The embedding is still part of the class for standard use,
        # but our debug function will bypass it.
        self.embedding = nn.Embedding(self.n_e, self.e_dim)
        self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)

    def dist(self, x, y):
        """
        Calculates the squared Euclidean distance between two sets of vectors.
        """
        return torch.sum(x ** 2, dim=1, keepdim=True) + \
               torch.sum(y ** 2, dim=1) - 2 * \
               torch.matmul(x, y.t())

    def forward_for_debug(self, z, codebook):
        """
        Forward pass with decoupled codebook logic, designed for debugging.
        It takes the codebook as an external argument.
        
        Args:
            z (torch.Tensor): The input tensor. Shape: (b, e_dim, h, w).
            codebook (torch.Tensor): The codebook tensor. Shape: (n_e, e_dim).
        
        Returns:
            torch.Tensor: The dehazed representation tensor.
        """
        e_dim = codebook.shape[1]
        if z.shape[1] != e_dim:
            raise ValueError(f"Channel dimension of z ({z.shape[1]}) must match e_dim of codebook ({e_dim}).")

        # 1. Decouple the codebook and the input feature 'z'
        hazy_code, clear_code = codebook.chunk(chunks=2, dim=1)
        hazy_resprestation, _ = z.chunk(chunks=2, dim=1)

        # Prepare the hazy representation for distance calculation
        b, _, h, w = hazy_resprestation.shape
        hazy_resprestation_flat = hazy_resprestation.permute(0, 2, 3, 1).contiguous().view(-1, e_dim // 2)

        # 2. Find the closest content vector in the 'hazy_code' part
        d = self.dist(hazy_resprestation_flat, hazy_code)
        min_encoding_indices = torch.argmin(d, dim=1)

        # 3. Use the content indices to retrieve the 'clear' version
        dehaze_resprestation_q = clear_code[min_encoding_indices]
        
        # Reshape back to the image feature map format
        dehaze_resprestation_q = dehaze_resprestation_q.view(b, h, w, e_dim // 2).permute(0, 3, 1, 2).contiguous()

        # 4. Apply Straight-Through Estimator (STE) for gradient flow
        final_output = hazy_resprestation + (dehaze_resprestation_q - hazy_resprestation).detach()

        return final_output

def get_dehazed_representation_with_manual_input(z, codebook):
    """
    Wrapper function for debugging the dehazing logic.

    Args:
        z (torch.Tensor): The manually specified input tensor.
        codebook (torch.Tensor): The manually specified codebook tensor.

    Returns:
        torch.Tensor: The dehazed output tensor.
    """
    n_e, e_dim = codebook.shape
    
    # We still need an instance to access the 'dist' method.
    quantizer_instance = VectorQuantizer_fordubug(n_e=n_e, e_dim=e_dim)
    
    with torch.no_grad():
        dehazed_output = quantizer_instance.forward_for_debug(z, codebook)
        
    return dehazed_output

if __name__ == '__main__':
    # --- Example of how to use the function for debugging ---

    # 1. Define parameters
    embedding_dim = 512  # Must be an even number
    num_embeddings = 1024
    batch_size = 1
    height = 32
    width = 32

    # 2. Manually create your input tensor 'z' and 'codebook'
    #    You can replace these with your actual tensors for debugging.
    input_tensor_z = torch.randn(batch_size, embedding_dim, height, width)
    manual_codebook = torch.randn(num_embeddings, embedding_dim)
    
    print(f"--- Debug Script for Decoupled VectorQuantizer Logic ---")
    print(f"Manually specified input tensor 'z' shape: {input_tensor_z.shape}")
    print(f"Manually specified 'codebook' shape: {manual_codebook.shape}")
    print("-" * 50)

    # 3. Call the function with your manual inputs
    dehazed_tensor = get_dehazed_representation_with_manual_input(
        z=input_tensor_z,
        codebook=manual_codebook
    )

    # 4. Print and verify the output
    print(f"Returned 'dehaze_resprestation_q' shape: {dehazed_tensor.shape}")
    
    expected_channel_dim = embedding_dim // 2
    print(f"Expected output channel dimension: {expected_channel_dim}")
    
    if dehazed_tensor.shape[1] == expected_channel_dim:
        print("SUCCESS: Output channel dimension is correct.")
    else:
        print(f"FAILURE: Output channel dimension is {dehazed_tensor.shape[1]}, but expected {expected_channel_dim}.")
    
    print("-" * 50)
