# https://huggingface.co/clue/albert_chinese_tiny
# encoding: utf-8
# 使用Albert进行文本分类
# url https://blog.csdn.net/u013230189/article/details/108836511

import pandas as pd
import torch
from transformers import BertTokenizer, BertModel, BertConfig, AlbertModel
import numpy as np
from torch.utils import data
from sklearn.model_selection import train_test_split



# 加载预训练模型
pretrained = 'clue/albert_chinese_tiny'  # 使用small版本Albert
tokenizer = BertTokenizer.from_pretrained(pretrained)
model = AlbertModel.from_pretrained(pretrained)
config = BertConfig.from_pretrained(pretrained)



class AlbertClassfier(torch.nn.Module):
    def __init__(self, bert_model, bert_config, num_class):
        super(AlbertClassfier, self).__init__()
        self.bert_model = bert_model
        self.dropout = torch.nn.Dropout(0.4)
        self.fc1 = torch.nn.Linear(bert_config.hidden_size, bert_config.hidden_size)
        self.fc2 = torch.nn.Linear(bert_config.hidden_size, num_class)

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]  # 句向量 [batch_size,hidden_size]
        bert_out = self.dropout(bert_out)
        bert_out = self.fc1(bert_out)
        bert_out = self.dropout(bert_out)
        bert_out = self.fc2(bert_out)  # [batch_size,num_class]
        return bert_out


albertBertClassifier = AlbertClassfier(model, config, 2)
device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'
albertBertClassifier = albertBertClassifier.to(device)


def get_train_test_data(pos_file_path, neg_file_path, max_length=100, test_size=0.2):
    data = []
    label = []
    with open(pos_file_path, 'r', encoding='utf-8') as f:
        pos = f.readlines()
    for p in pos:
        ids = tokenizer.encode(p.strip(), max_length=max_length, padding="max_length", truncation='longest_first')
        data.append(ids)
        label.append(1)

    # up pos, down neg
    with open(neg_file_path, 'r', encoding='utf-8') as f:
        neg = f.readlines()
    for n in neg:
        ids = tokenizer.encode(n.strip(), max_length=max_length, padding="max_length", truncation='longest_first')
        data.append(ids)
        label.append(0)

    X_train, X_test, y_train, y_test = train_test_split(data, label, test_size=test_size, shuffle=True)
    return (X_train, y_train), (X_test, y_test)


pos_file_path = "./pos.txt"
neg_file_path = "./neg.txt"
(X_train, y_train), (X_test, y_test) = get_train_test_data(pos_file_path, neg_file_path)


# dataloader
class DataGen(data.Dataset):
    def __init__(self, data, label):
        self.data = data
        self.label = label

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return np.array(self.data[index]), np.array(self.label[index])


# print('X_train, y_train:=====', X_train[0], '\n', y_train[0])

train_dataset = DataGen(X_train, y_train)
test_dataset = DataGen(X_test, y_test)
train_dataloader = data.DataLoader(train_dataset, batch_size=256)
test_dataloader = data.DataLoader(test_dataset, batch_size=256)

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(albertBertClassifier.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)

print('======= prepair finished, start train ===========')
# quit()
# train
for epoch in range(50):
    loss_sum = 0.0
    accu = 0
    albertBertClassifier.train()
    for step, (token_ids, label) in enumerate(train_dataloader):
        token_ids = token_ids.to(device).long()
        label = label.to(device).long()
        out = albertBertClassifier(token_ids)
        loss = criterion(out, label)
        optimizer.zero_grad()
        loss.backward()  # 反向传播
        optimizer.step()  # 梯度更新
        loss_sum += loss.cpu().data.numpy()
        accu += (out.argmax(1) == label).sum().cpu().data.numpy()

    test_loss_sum = 0.0
    test_accu = 0
    albertBertClassifier.eval()
    for step, (token_ids, label) in enumerate(test_dataloader):
        token_ids = token_ids.to(device).long()
        label = label.to(device).long()
        with torch.no_grad():
            out = albertBertClassifier(token_ids)
            loss = criterion(out, label)
            test_loss_sum += loss.cpu().data.numpy()
            test_accu += (out.argmax(1) == label).sum().cpu().data.numpy()
    print("epoch % d, train loss:%f, train acc:%f, test loss:%f, test acc:%f" % (
        epoch, loss_sum / len(train_dataset), accu / len(train_dataset), test_loss_sum / len(test_dataset),
        test_accu / len(test_dataset)))

# albertBertClassifier.save('./model.py',)
torch.save(albertBertClassifier, './model.pt')
torch.save(tokenizer, './tokenizer.pt')

