#!/usr/bin/env python  
#-*- coding:utf-8 _*-  
""" 
@author:hello_life 
@license: Apache Licence 
@file: albert_bilstm_crf.py 
@time: 2022/05/08
@software: PyCharm 
description:
"""
import torch
import torch.nn as nn
from torchcrf import CRF

from transformers import AlbertModel


class Albert_Bilstm_CRF(nn.Module):
    def __init__(self,config):
        super(Albert_Bilstm_CRF,self).__init__()
        self.config=config
        self.albert=AlbertModel.from_pretrained(config.pretrained_model_path)
        self.lstm=nn.LSTM(384,config.hidden_size,bidirectional=True)
        self.classifier=nn.Linear(config.hidden_size*2,config.num_class)
        self.crf=CRF(config.num_class,batch_first=True)

    def forward(self,batch):
        output=self.albert(**batch)
        output,(h,c)=self.lstm(output.last_hidden_state)
        output=self.classifier(output)

        return output

    def forward_with_loss(self,token_ids,masks,y,lens):
        output = self.albert(token_ids, masks)
        output, (h, c) = self.lstm(output.last_hidden_state)
        output = self.classifier(output)

        #对seq,cls和padding进行mask

        for i,len_ in enumerate(lens):
            if len_<self.config.seq_len-2:
                masks[i][len_+1]=torch.Tensor([0])
                ##因为crf无法接受标签为-100向量，因此将-100改为0
                y[i][len_+1:]=torch.Tensor([0]*(self.config.seq_len-len_-1))
        mask=masks[:,1:-1].gt(0)
        loss=self.crf(output[:,1:-1,:],y[:,1:-1],mask)*(-1)

        return output, loss



