# encoding: utf-8

import torch
from transformers import BertTokenizer, BertConfig
from torch.utils import data
import numpy as np


class AlbertClassfier(torch.nn.Module):
    def __init__(self, bert_model, bert_config, num_class):
        super(AlbertClassfier, self).__init__()
        self.bert_model = bert_model
        self.dropout = torch.nn.Dropout(0.4)
        self.fc1 = torch.nn.Linear(bert_config.hidden_size, bert_config.hidden_size)
        self.fc2 = torch.nn.Linear(bert_config.hidden_size, num_class)

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]  # 句向量 [batch_size,hidden_size]
        bert_out = self.dropout(bert_out)
        bert_out = self.fc1(bert_out)
        bert_out = self.dropout(bert_out)
        bert_out = self.fc2(bert_out)  # [batch_size,num_class]
        return bert_out


model = torch.load('./model1.pt', map_location=lambda storage, loc:storage)
device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'
model.to(device)

text = '''


宝鸡市是关中平原城市群副中心城市，也是我国西部工业重镇、高端装备制造业基地，东岭、陕汽、法士特集团等多家国有企业分布于宝鸡市各县区，对地方经济发展发挥着重要作用。由于多数企业职工在市区居住，日常通勤主要以公路为主，但因天气等原因带来的道路封闭、通行困难、交通事故等情况时有发生，影响职工正常通勤。

'''

pretrained = 'voidful/albert_chinese_small'
tokenizer = BertTokenizer.from_pretrained(pretrained)
config = BertConfig.from_pretrained(pretrained)


def get_train_test_data(string, max_length=100):
    string = ''.join(string.split())
    data = []
    label = []
    ids = tokenizer.encode(string.strip(), max_length=max_length, truncation='longest_first')
    data.append(ids)
    label.append(1)
    return data, label


# dataloader
class DataGen(data.Dataset):
    def __init__(self, data, label):
        self.data = data
        self.label = label

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return np.array(self.data[index]), np.array(self.label[index])


X_train, y_train = get_train_test_data(text)
train_dataset = DataGen(X_train, y_train)
train_dataloader = data.DataLoader(train_dataset, batch_size=1)

with torch.no_grad():
    for step, (token_ids, label) in enumerate(train_dataloader):
        token_ids = token_ids.to(device).long()
        label = label.to(device).long()
        out = model(token_ids)
        print(out)
        print(out.argmax(1))
        print((out.argmax(1) == label).sum().item())
