import torch
import math

bs = 16
lens = 32
dim = 512
# 一个batch
data = torch.randn(bs, lens, dim)
lengths = torch.randint(5, 28, size=(bs,))


class myModule(torch.nn.Module):
    def __init__(self, dim):
        super(myModule, self).__init__()
        self.dim = dim
        self.linear = torch.nn.Linear(dim, dim, bias=False)

        self.bias = torch.randn(dim).cuda()
        self.activation = torch.nn.Tanh()
        self.activation2 = torch.nn.Tanh()
        self.w = torch.randn(dim).cuda()
        # self.U = torch.randn(dim, dim)
        self.s = torch.nn.Softmax(dim=-1)

    def forward(self, x, lens):
        max_len = int(lens.max())
        mask = torch.arange(x.size(1)).expand(lens.size(0), x.size(1)).to(lens.device)
        mask = (mask < lens.long().unsqueeze(1)).unsqueeze(-1)

        x = x.masked_fill(mask == 0, -10000)
        x = x.sort(dim=1, descending=True)[0]

        x = x.masked_fill(mask == 0, 0)
        linear = self.linear(x) + self.bias
        linear = linear.masked_fill(mask == 0, 0)
        linear1 = self.activation(linear)
        linear2 = (torch.mul(linear1, self.w.expand_as(linear1)).sum(2)) / math.sqrt(self.dim)
        # row = max_abs_per_row(linear2).unsqueeze(-1)
        # row = max_abs_per_row(linear2).unsqueeze(-1)
        # linear3 = self.activation2(linear2)

        # 修复关键问题：使用masked_fill代替原地操作
        mul = linear2.unsqueeze(-1).masked_fill(
            ~mask.squeeze(-1).unsqueeze(-1),  # 保持维度对齐
            -10000
        )
        #        mul = linear3.unsqueeze(-1)
        #        mul[torch.where(mask.squeeze() == 0)] = -10000
        weights = torch.softmax(mul, 1)
        pooled_features = (x * weights).sum(1)
        return pooled_features
        # pass


def max_abs_per_row(matrix):
    # 计算每一行元素的绝对值
    abs_matrix = torch.abs(matrix)
    # 找出每一行的最大值
    max_values, _ = torch.max(abs_matrix, dim=1)
    return max_values
# mm = myModule(512)
# mm1 = mm(data, lengths)
