import argparse
import os
import time
import datetime
import pickle
from tqdm import tqdm
import torch
from transformers import BertTokenizer, BertModel, BertConfig
import numpy as np
from torch.utils import data
from sklearn.model_selection import train_test_split

# from models.models import DataGen, ModelResnet15, ModelResnet51, ModelResnet101, Model, ModelLarge, ModelAuto
from models.models import DataGen, ModelResnet, _BaseModel


def freeze_model(model: _BaseModel):
    for name, param in model.named_parameters():
        if "bert_model" in name:
            param.requires_grad = False
    return model


def do_train(args):
    if not os.path.exists(f"./{args.name}"):
        os.mkdir(f"./{args.name}")

    pkl_path = f"./datas/{args.name}.pkl"

    with open(pkl_path, "rb") as f:
        pretrained_data = pickle.load(f)

    X_train = pretrained_data.get("xTrain")
    y_train = pretrained_data.get("yTrain")
    X_test = pretrained_data.get("xTest")
    y_test = pretrained_data.get("yTest")
    info_labels = pretrained_data.get("labels")

    batch_size = args.batch_size
    train_dataset = DataGen(X_train, y_train)
    test_dataset = DataGen(X_test, y_test)
    train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size)
    test_dataloader = data.DataLoader(test_dataset, batch_size=batch_size)

    # 任务类型
    data_type = args.name

    print("开始加载预训练模型")
    # 加载预训练模型
    pretrained = 'D:/codes/nlp_about/pretrained_model/hfl_chinese-roberta-wwm-ext'
    # pretrained = 'hfl/chinese-roberta-wwm-ext'
    bert_model = BertModel.from_pretrained(pretrained)
    config = BertConfig.from_pretrained(pretrained)
    print("完成加载预训练模型")

    model = ModelResnet(bert_model, config, len(info_labels), res_layer_num=15)

    # model = freeze_model(model)

    if args.pretrained:
        print("加载权重：", args.pretrained)
        model.load_state_dict(
            torch.load(os.path.join("roberta", args.pretrained), map_location=lambda storage, loc: storage)
        )
    device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'
    print("train device ==", device)
    model = model.to(device)

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=1e-4)

    for epoch in range(args.epoch):
        print(f"epoch = {epoch}, datetime = {datetime.datetime.now()}")
        start = time.time()
        loss_sum = 0.0
        accu = 0
        model.train()
        for token_ids, label in tqdm(train_dataloader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()
            out = model(token_ids)
            loss = criterion(out, label)
            optimizer.zero_grad()
            loss.backward()  # 反向传播
            optimizer.step()  # 梯度更新
            loss_sum += loss.cpu().data.numpy()
            accu += (out.argmax(1) == label).sum().cpu().data.numpy()

        test_loss_sum = 0.0
        test_accu = 0
        model.eval()
        for token_ids, label in tqdm(test_dataloader):
            token_ids = token_ids.to(device).long()
            label = label.to(device).long()
            with torch.no_grad():
                out = model(token_ids)
                loss = criterion(out, label)
                test_loss_sum += loss.cpu().data.numpy()
                test_accu += (out.argmax(1) == label).sum().cpu().data.numpy()
        print("epoch %d, train loss:%f, train acc:%f, test loss:%f, test acc:%f, use time:" % (
            epoch, loss_sum / len(train_dataset), accu / len(train_dataset), test_loss_sum / len(test_dataset),
            test_accu / len(test_dataset)), int(time.time() - start))
        torch.save(model.state_dict(), f'./{args.name}/{data_type}_model_{epoch}_{test_accu / len(test_dataset)}.pt')


def setup_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name", default="staples_c6", type=str, help="name of the data")
    parser.add_argument("-p", "--pretrained", type=str, help="以前训练过的模型权重，加载继续训练")
    parser.add_argument("-e", "--epoch", default=25, type=int, help="训练迭代次数")
    parser.add_argument("-b", "--batch_size", default=128, type=int, help="训练批次大小")
    parser.add_argument("-t", "--test_size", default=0.1, type=float, help="测试集比例")
    parser.add_argument("-dn", "--data_num", type=int, help="训练的数据数量，可以用来测试batch size")
    parser.add_argument("-ml", "--max_length", default=64, type=int, help="训练的数据数量，可以用来测试batch size")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = setup_args()
    do_train(args)
