import argparse
import os

import time
import datetime
import pickle
from tqdm import tqdm
import torch
from transformers import BertTokenizer, BertModel, BertConfig
import numpy as np
from torch.utils import data
from sklearn.model_selection import train_test_split


def do_process(args):
    print("开始读取数据")
    with open(f"./datas/{args.name}.txt", "r", encoding="utf-8") as f:
        datas = f.readlines()

    if args.data_num:
        datas = datas[:args.data_num]

    print("完成读取数据,数据长度为：", len(datas))

    datas = [x.strip() for x in datas]
    info_labels = []

    def get_label(x):
        if isinstance(x, str):
            if x not in info_labels:
                info_labels.append(x)
            return info_labels.index(x)
        else:
            return info_labels[x]

    if not os.path.exists('./roberta'):
        os.mkdir('./roberta')
    print("开始加载预训练模型")
    # 加载预训练模型
    pretrained = 'D:/codes/nlp_about/pretrained_model/hfl_chinese-roberta-wwm-ext'
    tokenizer = BertTokenizer.from_pretrained(pretrained)
    print("完成加载预训练模型")

    def get_train_test_data(max_length=args.max_length, test_size=0.2):
        texts = []
        labels = []

        for one in tqdm(datas):
            result = one.split("__")
            if len(result) != 2:
                continue

            text, label = result
            try:
                lebal_index = get_label(label.strip())
                text = tokenizer.encode(text.strip(), max_length=max_length, padding="max_length",
                                        truncation="longest_first")
                texts.append(text)
                labels.append(lebal_index)
            except Exception as e:
                print(e)
                continue
        X_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=test_size, random_state=0,
                                                            shuffle=True)
        return (X_train, y_train), (X_test, y_test)

    print("开始转换数据")
    (X_train, y_train), (X_test, y_test) = get_train_test_data(test_size=args.test_size)
    print("完成转换数据")

    data = dict()
    data['xTrain'] = X_train
    data['yTrain'] = y_train
    data['xTest'] = X_test
    data['yTest'] = y_test
    data['labels'] = info_labels

    pkl = f"./datas/{args.name}.pkl"

    # print(data)

    with open(pkl, "wb") as f:
        pickle.dump(data, f)


def setup_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name", type=str, help="name of the data")
    parser.add_argument("-p", "--pretrained", type=str, help="以前训练过的模型权重，加载继续训练")
    parser.add_argument("-e", "--epoch", default=25, type=int, help="训练迭代次数")
    parser.add_argument("-b", "--batch_size", default=128, type=int, help="训练批次大小")
    parser.add_argument("-t", "--test_size", default=0.1, type=float, help="测试集比例")
    parser.add_argument("-dn", "--data_num", type=int, help="训练的数据数量，可以用来测试batch size")
    parser.add_argument("-ml", "--max_length", default=64, type=int, help="训练的数据数量，可以用来测试batch size")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = setup_args()
    do_process(args)
