# 生成token表

# import jieba
import pandas as pd
from tqdm import tqdm
import numpy as np

from multiprocessing import Process, Manager, freeze_support


def gen_text_cut_by_jieba(one_data_in, return_voc):
    voc_id = []
    for i in tqdm(one_data_in):
        # voc_id += list(set(list(jieba.cut(i))))
        voc_id += list(set(list(i)))
        voc_id = list(set(voc_id))
    return_voc.append(voc_id)


def gen_data_pre():
    with open("fixed_couplets_in.txt", encoding="utf-8") as f:
        in_data = f.readlines()
    with open("fixed_couplets_out.txt", encoding="utf-8") as f:
        out_data = f.readlines()
    src_data = ["".join(i.strip().split() + j.strip().split()) for i, j in tqdm(zip(in_data, out_data))]
    src_data=pd.DataFrame(src_data)
    src_data["data_len"]=src_data[0].str.len()
    src_data=src_data.loc[src_data["data_len"]==18,0].values.tolist()
    pd.to_pickle({"data": src_data}, "src_data.pandas_pickle")


def gen_voc_to_pandas_pickle():
    src_data = pd.read_pickle("src_data.pandas_pickle")["data"]
    work_num = 8
    works_num_steps = len(src_data) // work_num
    voc_id_list = Manager().list()
    p_list = []
    for i in range(0, len(src_data), works_num_steps):
        # for i in range(0, 24, work_num):
        j = i + works_num_steps
        one_data = src_data[i:j]
        p = Process(target=gen_text_cut_by_jieba, args=(one_data, voc_id_list))
        p.start()
        p_list.append(p)
    for p in p_list:
        p.join()
    voc_id_list = ["<pad>"] + sorted(set(np.hstack(voc_id_list))) + ["<eos>"]
    pd.to_pickle({"data": voc_id_list}, "voc_data.pandas_pickle")


def gen_text_cut_by_jieba_to_token(src_data_s, one_voc, out_dir, start_index, end_index):
    voc_df = pd.DataFrame({"voc": one_voc})
    voc_df["voc_id"] = voc_df.index.values
    total_list = []
    for src_data_one in tqdm(src_data_s):
        # one_voc = pd.DataFrame({"voc": list(jieba.cut(src_data_one))})
        one_voc = pd.DataFrame({"voc": list(src_data_one)})
        new_voc_df = voc_df[voc_df["voc"].isin(one_voc["voc"])]
        one_voc = pd.merge(one_voc, new_voc_df, on="voc", how="left")
        total_list.append(one_voc["voc_id"].values.tolist())
        # 如何太大可以每隔n个进行保存
    pd.to_pickle({"data": total_list}, "{}/{}{}.pandas_pickle".format(out_dir, start_index, end_index))


def gen_src_data_to_token():
    src_data = pd.read_pickle("src_data.pandas_pickle")["data"]
    voc_id = pd.read_pickle("voc_data.pandas_pickle")["data"]
    work_num = 8
    works_num_steps = len(src_data) // work_num

    p_list = []
    for i in range(0, len(src_data), works_num_steps):
        # for i in range(0, 24, work_num):
        j = i + works_num_steps
        one_data = src_data[i:j]
        p = Process(target=gen_text_cut_by_jieba_to_token, args=(one_data, voc_id, "train_data_dir", i, j))
        p.start()
        p_list.append(p)
    for p in p_list:
        p.join()



if __name__ == '__main__':
    freeze_support()
    # 数据预处理
    gen_data_pre()
    # 生成token表
    gen_voc_to_pandas_pickle()
    # src_data_to_token
    gen_src_data_to_token()
