from transformers import pipeline, BertTokenizer, BertModel, BertForNextSentencePrediction, BertConfig
from transformers import AutoTokenizer, AutoModelForMaskedLM
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
# https://blog.csdn.net/qq_42189083/article/details/89350890
from torchcrf import CRF
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data import dataloader
from torch.utils.data import Dataset, DataLoader
import torch
from transformers import AdamW, get_linear_schedule_with_warmup
import torch.nn.functional as F


class Mydata(Dataset):

    def __init__(self):
        self.data = self.load_data()

    def __getitem__(self, index):
        return self.data[index]

    def __len__(self):
        return len(self.data)

    def load_data(self):
        l = ['刚刚起床', '尝试一下', '刚刚起床', '尝试一下']

        res = [(i, 1) for i in l]
        return res


class SentimentClassifier(nn.Module):

    def __init__(self, n_classes):
        super(SentimentClassifier, self).__init__()
        self.tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
        print(self.tokenizer.vocab_size)  # 21128
        print(self.tokenizer.model_max_length)  # 512
        self.bert = BertModel.from_pretrained('bert-base-chinese')
        self.drop = nn.Dropout(p=0.3)
        self.out = nn.Linear(self.bert.config.hidden_size, n_classes)

    def forward(self, x):
        output = self.tokenizer(x, return_tensors='pt')
        output = self.bert(**output)
        last_hidden_states = output.last_hidden_state
        pooler_output = output.pooler_output
        print (111,last_hidden_states.shape)
        print (222,pooler_output.shape)
        output = self.drop(pooler_output)
        output = self.out(output)
        print (333,output.shape)
        output = F.softmax(output, dim=1)
        print (444,output.shape)
        return output


text = "这个版本没写el,所以感觉有点粗糙,代码的扩展性不佳,然而写了dataparallel用多卡训练又因为transformer2.2.2与pytorch1.5.1冲突会报StopIteration的错误"

# tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese")
model = BertModel.from_pretrained("bert-base-chinese")
print(model.config.hidden_size)

# tokenizer.add_tokens(["我", "B"])
# model.resize_token_embeddings(len(tokenizer))

print("model ok")
# tensor([[ 101, 3217, 4697,  679, 6230, 3236,  102]])

loader = DataLoader(Mydata(), batch_size=2, shuffle=True)

model = SentimentClassifier(2)

EPOCHS = 10

optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)
total_steps = len(loader) * EPOCHS

scheduler = get_linear_schedule_with_warmup(
    optimizer,
    num_warmup_steps=0,
    num_training_steps=total_steps
)

loss_fn = nn.CrossEntropyLoss()

for d in loader:
    x, y = d
    outputs = model(x)
    print(1111,outputs.shape)
    _, preds = torch.max(outputs, dim=1)
    print (preds)
    loss = loss_fn(outputs, y)

    loss.backward()
    nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
    optimizer.step()
    optimizer.zero_grad()
