import torch
from torch import nn
from transformers import BertModel, BertTokenizer


class bert:
    def __init__(self):
        self.bert = BertModel.form_pretrained('chinese-bert-wwm-ext')
        self.embedding = self.bert.embeddings
        self.bert_encoder = self.bert.encoder
        self.tokenizer = BertTokenizer.from_pretrained('chinese-bert-wwm-ext')
        self.mask_e = self.embedding(torch.tensor([[self.tokenizer.mask_token_id]], dtype=torch.long))
        self.vocab_size = self.tokenizer.vocab_size,
class biGruDetector(nn.Module):
    def __init__(self, input_size, hidden_size, num_layer=1):
        super(biGruDetector, self).__init__()
        # 双向GRU [input, hidden] => [output, hidden]
        self.rnn = nn.GRU(input_size, hidden_size, num_layer=num_layer, bidirectional=True, batch_first=True)
        self.linear = nn.Linear(hidden_size * 2, 1)

    def forward(self, x):
        rnn_output, _ = self.rnn(x)
        # [batch_size, max_len, feature_dim] =>  + Sigmoid(Linear (双向GRU(x))) => out_put
        # [batch_size, max_len, 1]
        return nn.Sigmoid()(self.linear(rnn_output))


class softMaskedBert(nn.Module):
    def __init__(self, **kwargs):
        super(softMaskedBert, self).__init__()
        self.vocab_size = kwargs['vocab_size']
        self.mask_e = kwargs['masked_e']
        self.bert_encoder = kwargs['bert_encoder']
        self.linear = nn.Linear(768, self.vocab_size)
        self.log_softmax = nn.LogSoftmax(dim=-1)

    def forward(self, x, p, input_mask=None):
        # 编码过程（掩码编码)
        soft_embedding = p * self.mask_e + (1 - p) * x
        # 送人bert进行处理
        bert_out = self.bert.encoder(hidden_states=soft_embedding, attention_mask=input_mask)
        h = bert_out[0] + x
        out = self.log_softmax(self.linear(h))
        return out

