#加载预训练模型
from torch.nn.functional import embedding
from transformers import BertModel
import torch

#定义训练设备，
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")

pretrained = BertModel.from_pretrained(r"D:\AI\model\bert-base-chinese").to(DEVICE)

print(pretrained)
print(pretrained.embeddings.word_embeddings)

# 定义下游任务模型（将主干网络所提取的特征进行分类）
class Model(torch.nn.Module):
    #模型结构设计
    def __init__(self):
        super().__init__()
        self.fc = torch.nn.Linear(768, 8) #768 从模型的out_features属性中获取;2为二分类任务,8为8分类任务
    # def forward(self, input_ids, attention_mask, token_type_ids):
    #     #上游任务不参与训练
    #     with torch.no_grad():
    #         out = pretrained(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
    #     #下游任务参与训练
    #     out = self.fc(out.last_hidden_state[:, 0])
    #     out = out.softmax(dim=1)
    #     return out
    def forward(self, input_ids, attention_mask, token_type_ids):
        attention_mask = attention_mask.to(torch.float)
        #让embeddings参与训练
        embedding_output = pretrained.embeddings(input_ids=input_ids)

        attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) #[N,1,1,sequence_length] 升唯，添加两个维度
        attention_mask = attention_mask.to(embedding_output.dtype)

        #冻结encoder和pooler部分，建议使用torch.no_grad()来节省显存
        with torch.no_grad():
            encoder_output = pretrained.encoder(embedding_output, attention_mask=attention_mask)
        # 分类任务只使用encoder_ouput的[CLS]的token
        out = self.fc(encoder_output.last_hidden_state[:, 0])
        out = out.softmax(dim=1)
        return out


