from transformers import AutoTokenizer, AutoModel, AutoConfig
import torch.nn as nn
from torchcrf import CRF

'''
BERT+BiLSTM+CRF 结构为demo
'''
class AssembleModel(nn.Module):
    def __init__(self, model_name, labels_num, dropout_rate=0.2, seq_length=128, hidden_dim=256, fine_tuning=False):
        super(AssembleModel, self).__init__()
        self.pre_train_model = model_name
        self.fine_tuning = fine_tuning
        self.hidden_dim = hidden_dim
        self.labels_num = labels_num

        # 模型加载主要通过Hugging face的Auto类实现即可
        model_config = AutoConfig.from_pretrained(self.pre_train_model)
        # 可以通过model_config查看模型参数情况
        # print('model_config', model_config)
        self.backbone = AutoModel.from_config(model_config)
        self.dropout = nn.Dropout(dropout_rate)
        self.softmax = nn.Softmax(dim=-1)
        self.classifier = nn.Linear(model_config.hidden_size, labels_num)
        self.word_embedding = nn.Embedding(model_config.vocab_size, model_config.hidden_size)
        self.layer_norm = nn.LayerNorm(model_config.hidden_size)
        self.transformer_encoder_layer = nn.TransformerEncoderLayer(d_model=model_config.hidden_size,
                                                                    nhead=8, batch_first=True, activation='gelu')

    def freeze(self, _layer):  # 冻结某些网络的参数(即不参与反向传播)
        for p in _layer.parameters():
            p.requires_grad = False
        return _layer

    def _get_feature(self, src, mask):
        if not self.fine_tuning:
            self.backbone = self.freeze(self.backbone)
        
        outputs = self.backbone(input_ids=src, attention_mask=mask)
        
        sequence_output = outputs[0]  # 这个地方要参考hugging face的文档来选择

        sequence_output = self.dropout(sequence_output)
        logits = self.classifier(sequence_output)

        return logits

    def forward(self, sentence, tags, mask):
        emissions = self._get_feature(sentence, mask)
        # https://pytorch-crf.readthedocs.io/en/stable/
        if tags is None:
            decoder = self.crf.decode(emissions=emissions, mask=mask)
            loss=None
        else:
            decoder = None
            loss = -self.crf.forward(emissions, tags, mask,
                                reduction='mean')  # Returns:	The log likelihood. This will have size (batch_size,)
        return loss, decoder
    