#! -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from transformers import BertConfig, BertModel


class ClassifyModel(nn.Module):  # 基本操作-继承基类nn.Module
    def __init__(self, config):  # 默认是10个分类，基于下载的与训练模型地址创建及初始化该类
        super(ClassifyModel, self).__init__()
        self.pretrain_model_path = config['pretrain_model_path']
        self.config = BertConfig.from_pretrained(self.pretrain_model_path)  # 导入下载好的预训练模型参数，确定好该模型的输出参数量，方便后面全连接层配置参数(输入-输出)
        self.bert = BertModel.from_pretrained(self.pretrain_model_path)  # 导入下好的预训练模型文件(加载预训练模型权重参数)，创建模型，赋值给该类参数
        self.hidden_size = self.config.hidden_size
        self.lstm = nn.LSTM(self.hidden_size, self.hidden_size, num_layers=1, batch_first=True, birectional=True)
        self.fc = nn.Linear(self.hidden_size, self.hidden_size)
        self.class_num = config['class_num']
        self.output = nn.Linear(self.hidden_size, self.class_num)  # 创建分类层，即全连接层，输入是bert模型的神经元参数总量，输出是10个类别

    def forward(self, input_ids, attention_mask=None, token_type_ids=None):  # 前馈网络，类模型，内部一层一层的计算过程
        outputs = self.bert(input_ids, attention_mask, token_type_ids)  # Bert模型的输入：三部分
        # print(outputs[0][:, 0, :] == outputs[1])
        out_pool = outputs[1]  # out_puts是一个元组，outputs[0]代表第一个token即（cls）最后一层的隐藏状态 (batch_size, hidden_size
        logit = self.output(out_pool)  # [bs, classes] 全连接优化参数（输出调成1个即可）
        return logit


class ClassifyBertLSTM(nn.Module):
    def __init__(self, config):
        super(ClassifyBertLSTM, self).__init__()
        self.pretrain_model_path = config['pretrain_model_path']
        self.config = BertConfig.from_pretrained(
            self.pretrain_model_path)  # 导入下载好的预训练模型参数，确定好该模型的输出参数量，方便后面全连接层配置参数(输入-输出)
        self.bert = BertModel.from_pretrained(self.pretrain_model_path)  # 导入下好的预训练模型文件(加载预训练模型权重参数)，创建模型，赋值给该类参数
        self.bert_hidden_size = 768
        self.lstm_hidden_size = 300
        self.bidirectional = True
        self.lstm = nn.LSTM(self.bert_hidden_size, self.lstm_hidden_size, num_layers=1, batch_first=True,
                            bidirectional=self.bidirectional)
        if self.bidirectional:
            self.linear = nn.Linear(self.lstm_hidden_size*2, self.lstm_hidden_size)
        else:
            self.linear = nn.Linear(self.lstm_hidden_size, self.lstm_hidden_size)
        self.class_num = config['class_num']
        self.fc = nn.Linear(self.lstm_hidden_size, self.class_num)
        self.dropout = nn.Dropout(0.2)

    def forward(self, input_ids, attention_mask=None, token_type_ids=None):  # 前馈网络，类模型，内部一层一层的计算过程
        bert_output = self.bert(input_ids, attention_mask, token_type_ids)  # Bert模型的输入：三部分
        state = bert_output[0]
        lstm_output, (last_hidden, _) = self.lstm(state)
        # 双向处理
        if self.bidirectional:
            hidden_last_left = last_hidden[-2]
            hidden_last_right = last_hidden[-1]
            hidden_last = torch.cat([hidden_last_left, hidden_last_right], dim=-1)
        else:
            hidden_last = last_hidden[-1]
        output = self.linear(hidden_last)
        output = self.dropout(output)
        logit = self.fc(output)  # [bs, classes] 全连接优化参数（输出调成1个即可）
        return logit
