import numpy as np
import torch
from torch import nn
from transformers import BertModel, BertTokenizer
import warnings
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
warnings.filterwarnings("ignore")


class Generator(nn.Module):
    # 需要传入bert和它的tokenizer，因为tokenizer里面才有所有字符表
    def __init__(self, bert,tokenizer):
        super(Generator, self).__init__()
        self.bert = bert
        self.tokenizer = tokenizer
        self.vocab = tokenizer.vocab
        # 索引 : 字符
        self.int_char = {i: char for i, char in enumerate(self.vocab)}  # 另一种写法：self.int_char = dict(enumerate(vocab))
        # 字符 : 索引
        self.char_int = {char: i for i, char in self.int_char.items()}
        self.vocab_size = tokenizer.vocab_size
        self.fc1 = nn.Linear(768, 1024)
        self.fc2 = nn.Linear(1024, self.vocab_size)
        self.dropout = nn.Dropout(0.1)
        self.relu = nn.ReLU()
        self.softmax = nn.LogSoftmax(dim=1)


    def forward(self, sent_id, mask):
        all_hidden_states, cls_hidden_state = self.bert(sent_id, attention_mask=mask, return_dict=False)
        x = self.fc1(cls_hidden_state)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        x = self.softmax(x)
        return x





if __name__ == '__main__':
    model_name = '../bert-base-chinese'
    model_path = '../bert-base-chinese'
    bert_model = BertModel.from_pretrained(model_path)
    tokenizer = BertTokenizer.from_pretrained(model_name)
    model = Generator(bert_model,tokenizer)
    text = "你无敌了[PAD],我没事,怎么撒s[SEP] ， 。 看得见我吗[PAD]"
    sent_id = tokenizer.encode(text,
                               add_special_tokens=True,
                               # 添加 [CLS] 和 [SEP] 标记
                               truncation=True,
                               # 指定序列的最大长度
                               max_length=100
                               )
    att_mask = [int(tok > 0) for tok in sent_id]
    sent_id = torch.tensor(sent_id)
    att_mask = torch.tensor(att_mask)
    sent_id = sent_id.unsqueeze(0)
    att_mask = att_mask.unsqueeze(0)
    print("sent_id.shape():",sent_id.shape)
    print("att_mask.shape():",att_mask.shape)
    print("vocab_size:",tokenizer.vocab_size)
    print(model.forward(sent_id,att_mask).shape)





