# encoding: utf-8

import sys
import argparse
import os

from tqdm import tqdm
import torch
from transformers import BertTokenizer, BertModel, BertConfig

from sklearn.model_selection import train_test_split
import pickle


def process_text(args):
    print("开始读取数据")
    with open(f"./{args.name}.txt", "r", encoding="utf-8") as f:
        datas = f.readlines()

    if args.data_num:
        datas = datas[:args.data_num]

    print("完成读取数据,数据长度为：", len(datas))

    datas = [x.strip() for x in datas]
    c1_labels = []

    def get_label(c1):
        if isinstance(c1, str):
            if c1 not in c1_labels:
                c1_labels.append(c1)

            return c1_labels.index(c1)
        else:
            return c1_labels[c1]

    # 任务类型
    data_type = args.name

    if not os.path.exists(f'./{data_type}'):
        os.mkdir(f'./{data_type}')
    print("开始加载预训练模型")
    # 加载预训练模型
    if sys.platform.startswith('win'):
        # pretrained = '/hfl/chinese-roberta-wwm-ext'
        pretrained = 'D:/codes/nlp_about/pretrained_model/hfl_chinese-roberta-wwm-ext'
    else:
        pretrained = '/root/.../pretrained_models/hfl-chinese-roberta-wwm-ext'

    tokenizer = BertTokenizer.from_pretrained(pretrained)
    print("完成加载预训练模型")

    def get_train_test_data(max_length=args.max_length, test_size=0.1):
        texts = []
        labels = []

        for one in tqdm(datas):
            result = one.split("__")
            if len(result) != 2:
                continue

            text, c1 = result
            try:
                lebal_index = get_label(c1.strip())
                text = tokenizer.encode(text.strip(), max_length=max_length, padding="max_length",
                                        truncation="longest_first")
                texts.append(text)
                labels.append(lebal_index)
            except Exception as e:
                print(e)
                continue
        X_train, X_test, y_train, y_test = train_test_split(texts, labels, test_size=test_size, random_state=0,
                                                            shuffle=True)
        return (X_train, y_train), (X_test, y_test)

    print("开始转换数据")
    (X_train, y_train), (X_test, y_test) = get_train_test_data(test_size=args.test_size)
    print("完成转换数据")
    item = dict()
    item["xtrain"] = X_train
    item["ytrain"] = y_train
    item["xtest"] = X_test
    item["ytest"] = y_test
    item['label'] = c1_labels

    with open(f"./{args.name}.pkl", "wb") as f:
        f.write(pickle.dumps(item))


def setup_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name", default="train", type=str, help="name of the data")
    parser.add_argument("-t", "--test_size", default=0.05, type=float, help="测试集比例")
    parser.add_argument("-dn", "--data_num", type=int, help="训练的数据数量，可以用来测试batch size")
    parser.add_argument("-ml", "--max_length", default=64, type=int, help="训练的数据数量，可以用来测试batch size")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = setup_args()
    process_text(args)
