import torch.nn as nn
import torch

class BertLSTM(nn.Module):
    def __init__(self, transformer, num_hiddens, num_layers, freeze,num_cls=2):
        super().__init__()
        self.transformer = transformer  # 加载预训练模型
        hidden_dim = transformer.config.hidden_size

        # 冻结预训练模型除了adapers参数训练
        if freeze:
            for param_name, param in self.transformer.named_parameters():
                if 'adapter' not in param_name:  # 不包含在adapter中的参数
                    param.requires_grad = False

        self.encoder = nn.LSTM(hidden_dim, num_hiddens, num_layers=num_layers,
                               bidirectional=True, batch_first=True)
        self.decoder = nn.Linear(2 * num_hiddens, num_cls)  

    def forward(self, ids, mask):
        # ids = [batch size, seq len]
        output = self.transformer(ids, mask, output_attentions=False)
        hidden = output.last_hidden_state
        # hidden = [batch size, seq len, hidden dim]

        self.encoder.flatten_parameters()
        embedding, _ = self.encoder(hidden)
        # embedding = [batch size, seq len, 2*num_hiddens]

        outs = self.decoder(embedding)
        # outs = [batch size, seq len, output dim]

        return embedding,outs

if __name__=='__main__':

    from transformers import BertTokenizer
    from transformers import AutoModel

    pretrained_model = 'bert-base-chinese'
    tokenizer = BertTokenizer.from_pretrained(pretrained_model)
    transformer = AutoModel.from_pretrained(pretrained_model)

    freeze = True
    num_cls = 2
    num_hiddens,num_layers = 225,6
    model = BertLSTM(transformer, num_hiddens,num_layers, freeze)

    ids = torch.arange(6).unsqueeze(0)
    mask = torch.ones_like(ids)
    model(ids,mask)