"""
预处理数据，将数据转化为id并保存
"""
from transformers import BertTokenizer
from multiprocessing import Pool
import argparse
import random
import os
import re

import logging
import numpy as np
import jsonlines

from gpt_2.encoder.encoder import Encoder, SmallEncoder
from gpt_2.utils.extract_structure import extract_structure_single, get_context, extract_outline


logging.basicConfig(format='%(asctime)s %(message)s', filemode="data.log", level=logging.DEBUG)

regular_1 = re.compile(r"^第\d{1,2}章.*$")
regular_2 = re.compile(r"^\d{1,2}\.\d{1,2}[^\d.].+$")
regular_3 = re.compile(r"^\d{1,2}\.\d{1,2}\.\d{1,2}[^\d.].+$")

regular_index_1 = re.compile(r"^第\d{1,2}章")
regular_index_2 = re.compile(r"^\d{1,2}\.\d{1,2}")


def encode_sequence_data(file, save_name):
    # encoder连续的数据
    my_encoder1 = Encoder('data/old_encoder.json', 'data/old_wiki.bpe')
    with open(file, "r", encoding="utf-8") as f:
        lines = f.readlines()
    tokenized_ids = []
    for line in lines:
        line = line.strip()
        try:
            a = [101] + my_encoder1.encode(line) + [102]
        except:
            continue
        a = a + [0] * (1024 - len(a))
        tokenized_ids.append(a)

    np.save(save_name, np.array(tokenized_ids))
    logging.debug("%s序列化完成, %d个数据" % (save_name, len(tokenized_ids)))


def encode_outline(file, save_file):
    """
    组织带骨架的-->单个<--一级、二级提纲模型训练数据：
    (pad){0,32} + sp0 + title + sp1 + (keyword + sp2) + sp3 + structure + sp5 + 1st_outline +
        sp4 + structure + sp6 + 2nd_outline + endoftext

    gdriver：输入jsonl
    save_file：输出numpy文件
    """

    def add_paper_item(_list, _item):
        title, keywords, outlines_1st, outlines_2nd = extract_outline(_item)
        single_outline = [my_encoder.encoder["<special_token_0>"]]
        single_outline.extend(my_encoder.encode(title))
        if len(keywords) > 1:
            single_outline.append(my_encoder.encoder["<special_token_1>"])
            first_flag = 0
            for keyword in keywords:
                if first_flag == 0:
                    first_flag += 1
                    continue
                single_outline.extend(my_encoder.encode(keyword))
                single_outline.append(my_encoder.encoder["<special_token_2>"])
        single_outline.append(my_encoder.encoder["<special_token_3>"])

        for i in range(len(outlines_1st)):
            for j in range(len(outlines_2nd[i])):
                random_num = random.randint(0, 32)
                pad_list = [my_encoder.encoder["<pad>"]] * random_num

                temp_list = pad_list + single_outline

                _context = get_context(title, keywords)
                _outline_1 = regular_index_1.sub("", outlines_1st[i])
                _outline_2 = regular_index_2.sub("", outlines_2nd[i][j])
                _structure_1 = extract_structure_single(_outline_1, _context)
                _structure_2 = extract_structure_single(_outline_2, _context)

                temp_list.extend(my_encoder.encode(_structure_1))
                temp_list.append(my_encoder.encoder["<special_token_5>"])
                temp_list.extend(my_encoder.encode(_outline_1))
                temp_list.append(my_encoder.encoder["<special_token_4>"])
                temp_list.extend(my_encoder.encode(_structure_2))
                temp_list.append(my_encoder.encoder["<special_token_6>"])
                temp_list.extend(my_encoder.encode(_outline_2))
                temp_list.append(my_encoder.encoder["<|endoftext|>"])

                if len(temp_list) < 128:
                    temp_list.extend([my_encoder.encoder["<pad>"] for _ in range(128 - len(temp_list))])
                    _list.append(temp_list)
                else:
                    print("过长--------------------------------------------")
                    print(title)
                    print(keywords)
                    print(_outline_1)
                    print(_outline_2)
                    print("-------------------------------------------------")

                _list.append(temp_list)

    train_data = []
    with open(file, 'r+', encoding="utf-8") as f:
        counter = 0
        for item in jsonlines.Reader(f):
            counter += 1
            if counter % 1000 == 0:
                print(counter)
            add_paper_item(train_data, item)

    train_data = np.array(train_data)
    np.save(save_file, train_data)


def encode_gpt2_data(data_path, save_path, flag, vocab_file):
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    with open(vocab_file, "r", encoding="utf-8") as f:
        vocab = f.read().split("\n")
    files = os.listdir(data_path)
    logging.debug("总共%d个文件" % len(files))
    pool = Pool(25)
    for file_name in files:
        abs_file_path = os.path.join(data_path, file_name)
        abs_save_path = os.path.join(save_path, file_name)
        if flag == 0:
            pool.apply_async(encode_sequence_data, (abs_file_path, abs_save_path,))
        elif flag == 1:
            pool.apply_async(encode_outline, (abs_file_path, abs_save_path,))
    pool.close()
    pool.join()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--encoder_json', default='data/old_encoder.json', type=str, help='encoder')
    parser.add_argument('--bpe', default='data/old_wiki.bpe', type=str, help='bpe')
    parser.add_argument("--data_path", default="data/one_piece", type=str, help="data_path")
    parser.add_argument("--save_path", default="tokenized/one_piece", type=str, help="save_path")
    parser.add_argument("--flag", default=0, type=int, help="0 连续的数据 1 标题和一级、二级提纲带骨架")
    parser.add_argument("--encoder_type", default="bpe", type=str, help="encode的方式，可以选择bpe或者bert_tokenizer")
    parser.add_argument("--vocab", default="data/vocab.txt", type=str)
    parser.add_argument('--small_vocab', default="data/commom_chinese.txt", type=str)
    args = parser.parse_args()
    if args.encoder_type == "bpe":
        my_encoder = Encoder(args.encoder_json, args.bpe)
    elif args.encoder_type == "char":
        my_encoder = SmallEncoder(args.small_vocab)
    elif args.encoder_type == "bert":
        my_encoder = BertTokenizer.from_pretrained("bert-base-chinese")
        my_encoder.max_len = 1e12
    encode_gpt2_data(args.data_path, args.save_path, args.flag, args.vocab)

"""
运行示例 
nohup python -u encode_file.py \
--encoder_type bpe \
--data_path /data/corpus/writer/outline/splited \
--save_path /data/corpus/writer/outline/npy_1st_2st_with_structure_single \
--flag 1 > log1.txt & 
"""

