from models.classify_model._base import SeqClassifyModule
from utils import Params
import torch.nn as nn
import torch
class SoftmaxSeqClassifyModule(SeqClassifyModule):
    def __init__(self, params:Params):
        super(SoftmaxSeqClassifyModule, self).__init__(params=params)

        # 定义特征提取模块
        if self.params.classify_fc_layers == 0:
            self.fc_layer = nn.Identity()
        else:
            layers = []
            input_unit = self.params.encoder_output_size
            for unit in self.params.classify_fc_hidden_size[:-1]:
                layers.append(
                    nn.Sequential(
                        nn.Dropout(self.params.classify_fc_dropout),
                        nn.Linear(input_unit, unit),
                        nn.LayerNorm(unit)
                    )
                )
                input_unit = unit
            layers.append(nn.Linear(input_unit, self.params.num_labels))
            self.fc_layer = nn.Sequential(*layers)

        # 定义损失模块
        self.loss_fn = nn.CrossEntropyLoss()


    def forward(self, input_feature, input_mask, labels=None, **kwargs):
        input_mask_weight = input_mask.unsqueeze(-1).to(input_feature.dtype)
        # 1. 获取每个Token对应的预测置信度
        feats = self.fc_layer(input_feature) * input_mask_weight  # [N,T,num_labels]

        # 2. 损失或者预测的执行
        if labels is not None:
            feats = torch.permute(feats, dims=[0,2,1])  # [N,T,num_labels]->[N,num_labels,T]
            loss = self.loss_fn(feats, labels)
            return loss
        else:
            pred_idx = torch.argmax(feats, dim=-1)  # [N,T,num_labels]->[N,T]
            return pred_idx
