# encoding: utf-8

import sys
import os
import datetime
import argparse
import pickle
from tqdm import tqdm
import torch
from torch.utils import data
from transformers import BertModel, BertConfig

from models.model import DataGen, ModelExpert


def do_train(args):
    labels = []

    def get_label(c1):
        if isinstance(c1, str):
            if c1 not in labels:
                labels.append(c1)

            return labels.index(c1)
        else:
            return labels[c1]

    # 任务类型
    data_type = args.name

    if not os.path.exists(f'./{data_type}'):
        os.mkdir(f'./{data_type}')
    print("开始加载预训练模型")
    # 加载预训练模型
    if sys.platform.startswith('win'):
        pretrained = 'D:/codes/nlp_about/pretrained_model/hfl_chinese-roberta-wwm-ext'
    else:
        pretrained = '/root/.../pretrained_models/hfl-chinese-roberta-wwm-ext'

    bert_model = BertModel.from_pretrained(pretrained)
    config = BertConfig.from_pretrained(pretrained)
    print("完成加载预训练模型")

    print("开始加载数据")

    with open(f"./datas/{args.name}.pkl", "rb") as f:
        data_item = pickle.loads(f.read())
    X_train = data_item.get("xtrain")
    X_test = data_item.get("xtest")
    y_train = data_item.get("ytrain")
    y_test = data_item.get("ytest")
    labels = data_item.get("label")

    print("完成加载数据")

    with open(f"./{data_type}/{args.name}.txt", "w", encoding="utf-8") as f:
        for c4 in labels:
            f.write(f"{c4}\n")


    batch_size = args.batch_size
    train_dataset = DataGen(X_train, y_train)
    test_dataset = DataGen(X_test, y_test)
    train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size)
    test_dataloader = data.DataLoader(test_dataset, batch_size=batch_size)

    model = ModelExpert(bert_model, config, len(labels))
    if args.pretrained:
        print("加载权重：", args.pretrained)
        model.load_state_dict(
            torch.load(os.path.join("roberta", args.pretrained), map_location=lambda storage, loc: storage)
        )
    device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'
    print(f"using device:", device)
    model = model.to(device)

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=1e-4)

    for epoch in range(args.epoch):
        print(f"epoch = {epoch}, datetime = {datetime.datetime.now()}")
        loss_sum = 0.0
        accu = 0

        model.train()
        for token_ids, c1 in tqdm(train_dataloader):
            token_ids = token_ids.to(device).long()
            c1_label = c1.to(device).long()
            out = model(token_ids)
            loss = criterion(out, c1_label)
            optimizer.zero_grad()
            loss.backward()  # 反向传播
            optimizer.step()  # 梯度更新
            loss_sum += loss.cpu().data.numpy()
            accu += (out.argmax(1) == c1_label).sum().cpu().data.numpy()

        test_loss_sum = 0.0
        test_accu = 0

        model.eval()
        for token_ids, c1 in tqdm(test_dataloader):
            token_ids = token_ids.to(device).long()
            c1_label = c1.to(device).long()
            with torch.no_grad():
                out = model(token_ids)
                loss = criterion(out, c1_label)
                test_loss_sum += loss.cpu().data.numpy()
                test_accu += (out.argmax(1) == c1_label).sum().cpu().data.numpy()

        print(f"epoch:{epoch}, train acc:{accu / len(train_dataset)} train loss:{loss_sum / len(train_dataset)}, test loss:{test_loss_sum / len(test_dataset)}, test acc:{test_accu / len(test_dataset)}")

        torch.save(model.state_dict(), f'./{data_type}/{data_type}_model_{epoch}_{test_accu / len(test_dataset)}.pt')


def setup_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name", default="train", type=str, help="name of the data")
    parser.add_argument("-p", "--pretrained", type=str, help="以前训练过的模型权重，加载继续训练")
    parser.add_argument("-e", "--epoch", default=25, type=int, help="训练迭代次数")
    parser.add_argument("-b", "--batch_size", default=384, type=int, help="训练批次大小")
    parser.add_argument("-t", "--test_size", default=0.05, type=float, help="测试集比例")
    parser.add_argument("-dn", "--data_num", type=int, help="训练的数据数量，可以用来测试batch size")
    parser.add_argument("-ml", "--max_length", default=64, type=int, help="训练的数据数量，可以用来测试batch size")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = setup_args()
    do_train(args)
