import torch.nn as nn
import torch
import torch.nn.functional as F
from config import parsers
from transformers import BertModel

class TextCNN(nn.Module):
    def __init__(self,vocab_dict,embedding_random=True,embedding_matrix=None):
        # embeding_matrix:词编码向量矩阵
        # vocab_size:词汇表大小
        # embedding_size:词编码维度
        super(TextCNN, self).__init__()
        self.vocab_dict=vocab_dict
        self.embedding_matrix=embedding_matrix
        self.W = nn.Embedding(len(self.vocab_dict)+1, parsers().hidden_size, padding_idx=len(self.vocab_dict))  # 初始化嵌入
        if not embedding_random:
            if embedding_matrix is not None:
                self.W.weight = nn.Parameter(self.embedding_matrix) #word2vec初始化
            else:print('embedding_matrix is None')

        # self.W.weight.requires_grad = False #不更新词向量

        # TextCNN
        self.convs = nn.ModuleList(
            [nn.Conv2d(in_channels=1, out_channels=parsers().num_filters,
                       kernel_size=(k, parsers().hidden_size)) for k in parsers().filter_sizes]
        )
        self.batchNorm2d = nn.BatchNorm2d(64)
        self.dropout = nn.Dropout(parsers().dropout)
        self.fc = nn.Linear(parsers().num_filters * len(parsers().filter_sizes), parsers().class_num)
        self.sigmoid = nn.Sigmoid()  # 添加 Sigmoid 激活函数
        self.tanh = nn.Tanh()  # tanh激活函数


    def conv_pool(self, x, conv):
        x = conv(x)
        x = F.relu(x)
        # x = F.leaky_relu(x)
        # x=self.tanh(x)
        x = x.squeeze(3)
        size = x.size(2)
        x = F.max_pool1d(x, size)
        x = x.squeeze(2)
        return x

    def forward(self, x):
        # 嵌入层
        input=x[0]
        embedded_chars = self.W(input)  # [6,3,2]
        embedded_chars = embedded_chars.unsqueeze(1)  # [6,1,3,2]
        out = torch.cat([self.conv_pool(embedded_chars, conv) for conv in self.convs],
                        1)  # shape  [batch_size, parsers().num_filters * len(parsers().filter_sizes]
        out = self.dropout(out)
        out = self.fc(out)
        return out

class BertClassifier(nn.Module):
    def __init__(self):
        super(BertClassifier, self).__init__()
        self.bert = BertModel.from_pretrained(parsers().bert_pred)
        for param in self.bert.parameters():
            param.requires_grad = True
        self.dropout = nn.Dropout(parsers().dropout)
        self.linear = nn.Linear(parsers().hidden_size, parsers().class_num)
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        input_ids, attention_mask, token_type_ids = x[0], x[1], x[2]
        _, pooled_output = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
                                     return_dict=False)
        dropout_output = self.dropout(pooled_output)
        linear_output = self.linear(dropout_output)
        # final_layer = self.sigmoid(linear_output)
        return linear_output

class BertTextModel_last_layer(nn.Module):
    def __init__(self):
        super(BertTextModel_last_layer, self).__init__()
        self.bert = BertModel.from_pretrained(parsers().bert_pred)
        for param in self.bert.parameters():
            param.requires_grad = True

        # TextCNN
        self.convs = nn.ModuleList(
            [nn.Conv2d(in_channels=1, out_channels=parsers().num_filters,
                       kernel_size=(k, parsers().hidden_size)) for k in parsers().filter_sizes]
        )
        self.batchNorm2d = nn.BatchNorm2d(64)
        self.dropout = nn.Dropout(parsers().dropout)
        self.fc = nn.Linear(parsers().num_filters * len(parsers().filter_sizes), parsers().class_num)
        self.sigmoid = nn.Sigmoid()  # 添加 Sigmoid 激活函数
        self.tanh = nn.Tanh()  # tanh激活函数

    def conv_pool(self, x, conv):
        x = conv(x)  # shape [batch_size, out_channels, x.shape[1] - conv.kernel_size[0] + 1, 1]
        x = F.relu(x)
        # x = F.leaky_relu(x)
        # x = self.tanh(x)
        x = x.squeeze(3)  # shape [batch_size, out_channels, x.shape[1] - conv.kernel_size[0] + 1]
        size = x.size(2)
        x = F.max_pool1d(x, size)  # shape[batch+size, out_channels, 1]
        x = x.squeeze(2)  # shape[batch+size, out_channels]
        return x

    def forward(self, x):
        input_ids, attention_mask, token_type_ids = x[0], x[1], x[2]  # shape [batch_size, max_len]
        hidden_out = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
                               output_hidden_states=False)
        out = hidden_out.last_hidden_state.unsqueeze(1)  # shape [batch_size, 1, max_len, hidden_size] 添加维度
        out = torch.cat([self.conv_pool(out, conv) for conv in self.convs],
                        1)  # shape  [batch_size, parsers().num_filters * len(parsers().filter_sizes]
        out = self.dropout(out)
        out = self.fc(out)
        return out

