import math
import torch
import torch.nn as nn
import torch.nn.functional as F

class Downsample(nn.Module):
    def __init__(self, d_model, ratio, mode):
        super().__init__()
        self.mode = mode
        self.ratio = ratio
        if self.mode == 'concat':
            # Concat reduces dimension by half via pooling, then doubles it via concat, 
            # so we project back to d_model
            self.lin = nn.Linear(d_model * 2, d_model)

    def forward(self, x, x_mask, idx_b, idx_t, idx_c, imp):
        """
        Optimized forward for torch.compile with fixed-length inputs.
        """
        B, T, C, D = x.shape
        r = self.ratio

        # 1. Pad T to be divisible by ratio
        if T % r != 0:
            pad_len = r - (T % r)
            x = F.pad(x, (0, 0, 0, 0, 0, pad_len), mode='constant', value=0)
            x_mask = F.pad(x_mask, (0, 0, 0, pad_len), mode='constant', value=True)
            idx_t = F.pad(idx_t, (0, 0, 0, pad_len), mode='constant', value=0) 
            T = T + pad_len

        # 2. Reshape for Pooling (B, T/r, r, C, D)
        x_reshaped = x.view(B, T // r, r, C, D)
        mask_reshaped = x_mask.view(B, T // r, r, C)

        # 3. Apply Downsampling Logic
        
        # 预先计算 block mask，用于修正 max pooling 的结果
        # 如果一个 block 内全是 pad (True)，则 output 也应该是 pad (0值)
        # mask_block: (B, T//r, C)
        mask_block = mask_reshaped.all(dim=2) 

        if self.mode == 'max':
            mask_broad = mask_reshaped.unsqueeze(-1)
            x_filled = x_reshaped.masked_fill(mask_broad, float("-inf"))
            x_out = x_filled.max(dim=2).values
            
            # [关键修复] 将 -inf 修正为 0
            # 使用 mask_block 再次进行 masked_fill，而不是依赖 nan_to_num
            x_out = x_out.masked_fill(mask_block.unsqueeze(-1), 0.0)
            
        elif self.mode == 'avg':
            mask_broad = mask_reshaped.unsqueeze(-1)
            x_filled = x_reshaped.masked_fill(mask_broad, 0.0)
            x_sum = x_filled.sum(dim=2) 
            
            valid_count = (~mask_reshaped).sum(dim=2).unsqueeze(-1) 
            # [关键修复] 防止除零
            valid_count = valid_count.clamp(min=1.0) 
            x_out = x_sum / valid_count

        elif self.mode == 'concat':
            mask_broad = mask_reshaped.unsqueeze(-1)
            
            # --- Max Branch ---
            x_max_filled = x_reshaped.masked_fill(mask_broad, float("-inf"))
            x_max = x_max_filled.max(dim=2).values
            
            # [关键修复] 将 -inf 修正为 0
            # 如果不修复，这里会残留 -3.4e38 这种巨数，导致 Linear 层爆炸
            x_max = x_max.masked_fill(mask_block.unsqueeze(-1), 0.0)
            
            # --- Avg Branch ---
            x_avg_filled = x_reshaped.masked_fill(mask_broad, 0.0)
            x_sum = x_avg_filled.sum(dim=2)
            
            valid_count = (~mask_reshaped).sum(dim=2).unsqueeze(-1)
            # [关键修复] 防止除零
            valid_count = valid_count.clamp(min=1.0)
            x_avg = x_sum / valid_count
            
            # Concat and project
            x_out = self.lin(torch.cat([x_max, x_avg], dim=-1))
            
        else:
            raise ValueError(f"Unknown mode: {self.mode}")

        # 4. Update Mask
        x_mask_out = mask_block # (B, T//r, C)

        # 5. Update idx_t
        idx_t_reshaped = idx_t.view(B, T // r, r, C)
        idx_t_out = idx_t_reshaped[:, :, 0, :] # (B, T//r, C)

        return x_out, x_mask_out, idx_t_out


class DownsampleLayer(nn.Module):
    def __init__(self, d_model, ratio, mode):
        super().__init__()
        self.mode = mode
        self.ratio = ratio
        
        if self.mode in ['max', 'avg', 'concat']:
            self.down = Downsample(d_model, ratio, mode)
        else:
            self.down = None 

    def forward(self, x, x_mask, idx_b, idx_t, idx_c, imp):
        if self.down is not None:
            return self.down(x, x_mask, idx_b, idx_t, idx_c, imp)
        return x, x_mask, idx_t