import torch
import torch.nn as nn
import torch.nn.functional as F
from wenet.transformer.encoder import ConformerEncoder
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from gxl_ai_utils.utils import utils_file
torch.backends.cudnn.enabled = False
from wenet.hotword.gru import CustomGRU

class ContextEncoder2(nn.Module):
    def __init__(self, hidden_size=3584, dropout=0.1, *args, **kwargs):
        """
        input: (B, *, hidden_size)
        Args:
            input_size:
            hidden_size:
            dropout:
        """
        super().__init__(*args, **kwargs)
        self.dropout = nn.Dropout(dropout)
        self.lstm = CustomGRU(hidden_size, hidden_size, batch_first=True, num_layers=1)
    def forward(self, x, x_lens):
        """
        Args:
            x: (B, T, hidden_size)
            x_lens: (B,)
        Returns:
            output: (B, 1, hidden_size)
        """
        x = self.dropout(x)
        x, _ = self.lstm(x, x_lens)
        x = self.dropout(x)
        output = x[:, -1, :]
        output = output.unsqueeze(0)
        return output


class ContextEncoder(nn.Module):
    def __init__(self, hidden_size=3584, dropout=0.1):
        super().__init__()
        self.hidden_size = hidden_size
        # Transformer模块：捕获全局依赖
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=hidden_size,
            nhead=8,
            dim_feedforward=2048,
            dropout=dropout,
            batch_first=True
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=1)

        # 自适应池化模块：压缩序列维度
        self.pool = nn.AdaptiveAvgPool1d(1)

        # 维度调整模块
        self.fc = nn.Linear(hidden_size, hidden_size)
        self.dropout = nn.Dropout(dropout)

    def _generate_mask(self, x_lens, max_len):
        return (torch.arange(max_len).to(x_lens.device)[None, :] >= x_lens[:, None])

    def forward(self, x, x_lens):
        """输入输出维度保持 (B, T, H) -> (B, 1, H)"""
        # 维度调整：Conv1d需要通道在前
        # x = x.permute(0, 2, 1)  # (B, H, T)
        # # 卷积处理
        # x = self.conv(x)  # (B, H, T)
        # # 维度恢复
        # x = x.permute(0, 2, 1)  # (B, T, H)
        x = self.dropout(x)

        # 生成Transformer的注意力mask
        mask = self._generate_mask(x_lens, x.size(1))

        # Transformer处理
        x = self.transformer(x, src_key_padding_mask=mask)  # (B, T, H)

        # 自适应池化
        x = x.permute(0, 2, 1)  # (B, H, T)
        x = self.pool(x)  # (B, H, 1)
        x = x.permute(0, 2, 1)  # (B, 1, H)
        x = x.permute(1,0, 2)  # (B, H, 1)

        # 最终调整
        x = self.fc(x)
        return x

if __name__ == '__main__':
    model = ContextEncoder(hidden_size=3584, dropout=0.1)
    # utils_file.print_model_size(model.self_att)
    # 171.650391M
    # utils_file.print_model_size(model.lstm)
    # 98.027344M
    utils_file.print_model_size(model) #112
    # utils_file.print_model_size(model.conv) # 36
    # utils_file.print_model_size(model.transformer) # 63
    # utils_file.print_model_size(model.fc) # 12
    #   269.677734M

    x = torch.randn(2, 4, 3584)
    x_lens = torch.tensor([4, 3])
    output = model(x, x_lens)
    print(output.shape)
    # Output: torch.Size([1, 2, 3584])
