import argparse
import os
import time
import datetime
from tqdm import tqdm
import torch
from transformers import BertTokenizer, BertModel, BertConfig
import numpy as np
from torch.utils import data
from sklearn.model_selection import train_test_split


# dataloader
class DataGen(data.Dataset):
    def __init__(self, data, label):
        self.data = data
        self.label = label

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return np.array(self.data[index]), np.array(self.label[index])


class Model(torch.nn.Module):
    def __init__(self, bert_model, bert_config, num_class):
        super(Model, self).__init__()
        self.bert_model = bert_model
        self.dropout = torch.nn.Dropout(0.4)
        self.fc1 = torch.nn.Linear(bert_config.hidden_size, bert_config.hidden_size)
        self.fc2 = torch.nn.Linear(bert_config.hidden_size, num_class)
        self.relu = torch.nn.ReLU()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]  # 句向量 [batch_size,hidden_size]
        bert_out = self.dropout(bert_out)
        bert_out = self.fc1(bert_out)
        bert_out = self.relu(bert_out)
        bert_out = self.dropout(bert_out)
        bert_out = self.fc2(bert_out)  # [batch_size,num_class]
        return bert_out


def do_train(args):
    print("开始读取数据")
    with open(f"./datas/{args.name}.txt", "r", encoding="utf-8") as f:
        datas = f.readlines()

    if args.data_num:
        datas = datas[:args.data_num]

    print("完成读取数据,数据长度为：", len(datas))

    datas = [x.strip() for x in datas]
    info_labels = []

    def get_label(x):
        if isinstance(x, str):
            if x not in info_labels:
                info_labels.append(x)
            return info_labels.index(x)
        else:
            return info_labels[x]

    if not os.path.exists('./roberta'):
        os.mkdir('./roberta')
    print("开始加载预训练模型")
    # 加载预训练模型
    pretrained = 'hfl/chinese-roberta-wwm-ext'
    tokenizer = BertTokenizer.from_pretrained(pretrained)
    bert_model = BertModel.from_pretrained(pretrained)
    config = BertConfig.from_pretrained(pretrained)
    print("完成加载预训练模型")

    def get_train_test_data(max_length=args.max_length, test_size=0.2):
        texts = []
        labels = []

        for one in tqdm(datas):
            result = one.split("__")
            if len(result) != 2:
                continue

            text, label = result
            try:
                lebal_index = get_label(label.strip())
                text = tokenizer.encode(text.strip(), max_length=max_length, padding="max_length",
                                        truncation="longest_first")
                texts.append(text)
                labels.append(lebal_index)
            except Exception as e:
                print(e)
                continue
        X_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=test_size, random_state=0,
                                                            shuffle=True)
        return (X_train, y_train), (X_test, y_test)

    print("开始转换数据")
    (X_train, y_train), (X_test, y_test) = get_train_test_data(test_size=args.test_size)
    print("完成转换数据")
    with open(f"./{args.name}.txt", "w", encoding="utf-8") as f:
        for label in info_labels:
            f.write(f"{label}\n")

    batch_size = args.batch_size
    train_dataset = DataGen(X_train, y_train)
    test_dataset = DataGen(X_test, y_test)
    train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size)
    test_dataloader = data.DataLoader(test_dataset, batch_size=batch_size)

    # 任务类型
    data_type = args.name

    model = Model(bert_model, config, len(info_labels))
    if args.pretrained:
        print("加载权重：", args.pretrained)
        model.load_state_dict(
            torch.load(os.path.join("roberta", args.pretrained), map_location=lambda storage, loc: storage)
        )
    device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'
    print("train device ==", device)
    model = model.to(device)

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=1e-4)

    for epoch in range(args.epoch):
        print(f"epoch = {epoch}, datetime = {datetime.datetime.now()}")
        start = time.time()
        loss_sum = 0.0
        accu = 0
        model.train()
        for token_ids, label in tqdm(train_dataloader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()
            out = model(token_ids)
            loss = criterion(out, label)
            optimizer.zero_grad()
            loss.backward()  # 反向传播
            optimizer.step()  # 梯度更新
            loss_sum += loss.cpu().data.numpy()
            accu += (out.argmax(1) == label).sum().cpu().data.numpy()

        test_loss_sum = 0.0
        test_accu = 0
        model.eval()
        for token_ids, label in tqdm(test_dataloader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()
            with torch.no_grad():
                out = model(token_ids)
                loss = criterion(out, label)
                test_loss_sum += loss.cpu().data.numpy()
                test_accu += (out.argmax(1) == label).sum().cpu().data.numpy()
        print("epoch %d, train loss:%f, train acc:%f, test loss:%f, test acc:%f, use time:" % (
            epoch, loss_sum / len(train_dataset), accu / len(train_dataset), test_loss_sum / len(test_dataset),
            test_accu / len(test_dataset)), int(time.time() - start))
        torch.save(model.state_dict(), f'./roberta/{data_type}_model_{epoch}_{test_accu / len(test_dataset)}.pt')


def setup_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name", type=str, help="name of the data")
    parser.add_argument("-p", "--pretrained", type=str, help="以前训练过的模型权重，加载继续训练")
    parser.add_argument("-e", "--epoch", default=25, type=int, help="训练迭代次数")
    parser.add_argument("-b", "--batch_size", default=128, type=int, help="训练批次大小")
    parser.add_argument("-t", "--test_size", default=0.1, type=float, help="测试集比例")
    parser.add_argument("-dn", "--data_num", type=int, help="训练的数据数量，可以用来测试batch size")
    parser.add_argument("-ml", "--max_length", default=64, type=int, help="训练的数据数量，可以用来测试batch size")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = setup_args()
    do_train(args)
