"""
@Description :   模型构建
@Author      :   python_assignment_group
@Time        :   2022/10/30 07:24:15
"""

import torch.nn as nn
from transformers import BertModel

from configs import *
from tools.utils import *


class BertNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.bert = BertModel.from_pretrained("bert-base-uncased")  # bert预训练模型
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(in_features=768, out_features=384),
            # nn.GELU(),
            nn.ReLU(),
            nn.Dropout(),
            nn.Linear(in_features=384, out_features=192),
            # nn.GELU(),
            nn.ReLU(),
            nn.Dropout(),
            nn.Linear(
                in_features=192,
                out_features=train_configs[0]["labels_num"])
        )

    def forward(self, batch):
        """
        前向传播
        """
        b_input_ids = batch[0].to(device)
        b_input_mask = batch[1].to(device)
        outputs = self.bert(b_input_ids,
                            token_type_ids=None,
                            attention_mask=b_input_mask,
                            )
        outputs = outputs[1]
        outputs = self.classifier(outputs)
        return outputs

    # def initialize_weights(self):
    #     """
    #     权值初始化
    #     """
    #     for modules in self.modules():
    #         # 对全连接层进行初始化
    #         if isinstance(modules, nn.Linear):
    #             torch.nn.init.normal_(modules.weight.data, 0, 0.01)
    #             modules.bias.data.zero_()


class FastTextNet(nn.Module):
    def __init__(self):
        super().__init__()
        # LSTM
        self.lstm = nn.LSTM(100, 50, 4, dropout=0.4, bidirectional=True)

        self.fc = nn.Sequential(  # 序列函数
            nn.Linear(9000, 900),  # 这里的意思是先经过一个线性转换层
            nn.BatchNorm1d(900),  # 再进入一个BatchNorm1d
            nn.ReLU(inplace=True),  # 再经过Relu激活函数
            nn.Linear(900, 90),  # 最后再经过一个线性变换
            nn.BatchNorm1d(90),  # 再进入一个BatchNorm1d
            nn.ReLU(inplace=True),  # 再经过Relu激活函数
            nn.Linear(90, train_configs[0]["labels_num"])
        )

    def forward(self, batch):
        """
        前向传播
        """
        b_input_ids = batch[0].to(device).float()
        outputs = self.lstm(b_input_ids)
        outputs = outputs[0]
        outputs = torch.flatten(outputs, 1)
        outputs = self.fc(outputs)
        return outputs


# 网络部分
class Word2VecCNNNet(nn.Module):
    def __init__(self):
        super().__init__()
        # 第一卷积层
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(1, 3, 3),  # 输入通道为1, 输出通道为3, 卷积核大小为3x3, 将16x1x90x100 转化为 32x30x30
            torch.nn.ReLU(),
            # torch.nn.MaxPool2d() # 不池化
        )
        # 第二卷积层
        self.conv2 = torch.nn.Sequential(
            torch.nn.Conv2d(3, 6, 3),  # 输入通道为3, 输出通道为6, 卷积核大小为3x3, 将32x30x30 转化为 64x10x10
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(2)  # 将64x10x10 转化为 64x5x5
        )
        # 连接层
        self.dense = torch.nn.Sequential(
            torch.nn.Linear(12384, 1200),
            torch.nn.ReLU(),
            torch.nn.Dropout(),
            torch.nn.Linear(1200, 120),
            torch.nn.ReLU(),
            torch.nn.Dropout(),
            torch.nn.Linear(120, train_configs[2]["labels_num"])
        )

    def forward(self, batch):
        b_input_ids = batch[0].to(device).float()
        b_input_ids = b_input_ids.unsqueeze(1)
        output = self.conv1(b_input_ids)
        output = self.conv2(output)
        output = output.view(output.size(0), -1)
        output = self.dense(output)
        return output

