# 字典的收集和字典的连续性的判断
import time
# from pandarallel import pandarallel
# pandarallel.initialize(progress_bar=True)

import jieba
# 生成token表

# import jieba
import pandas as pd
from tqdm import tqdm
import numpy as np

from multiprocessing import Process, Manager, freeze_support


def gen_text_cut_by_jieba(one_data_in, return_voc):
    voc_id = []
    for i in tqdm(one_data_in):
        # voc_id += list(set(list(jieba.cut(i))))
        voc_id += list(set(list(i)))
        voc_id = list(set(voc_id))
    return_voc.append(voc_id)


def gen_data_pre():
    with open("fixed_couplets_in.txt", encoding="utf-8") as f:
        in_data = f.readlines()
    with open("fixed_couplets_out.txt", encoding="utf-8") as f:
        out_data = f.readlines()
    src_data = ["".join(i.strip().split() + j.strip().split()) for i, j in tqdm(zip(in_data, out_data))]
    src_data = pd.DataFrame(src_data)
    src_data["data_len"] = src_data[0].str.len()
    src_data = src_data.loc[src_data["data_len"] == 18, 0].values.tolist()
    pd.to_pickle({"data": src_data}, "src_data.pandas_pickle")


def gen_voc_to_pandas_pickle():
    src_data = pd.read_pickle("src_data.pandas_pickle")["data"]
    work_num = 8
    works_num_steps = len(src_data) // work_num
    voc_id_list = Manager().list()
    p_list = []
    for i in range(0, len(src_data), works_num_steps):
        # for i in range(0, 24, work_num):
        j = i + works_num_steps
        one_data = src_data[i:j]
        p = Process(target=gen_text_cut_by_jieba, args=(one_data, voc_id_list))
        p.start()
        p_list.append(p)
    for p in p_list:
        p.join()
    voc_id_list = ["<pad>"] + sorted(set(np.hstack(voc_id_list))) + ["<eos>"]
    pd.to_pickle({"data": voc_id_list}, "voc_data.pandas_pickle")


def gen_text_cut_by_jieba_to_token(src_data_s, one_voc, out_dir, start_index, end_index):
    voc_df = pd.DataFrame({"voc": one_voc})
    voc_df["voc_id"] = voc_df.index.values
    total_list = []
    for src_data_one in tqdm(src_data_s):
        # one_voc = pd.DataFrame({"voc": list(jieba.cut(src_data_one))})
        one_voc = pd.DataFrame({"voc": list(src_data_one)})
        new_voc_df = voc_df[voc_df["voc"].isin(one_voc["voc"])]
        one_voc = pd.merge(one_voc, new_voc_df, on="voc", how="left")
        total_list.append(one_voc["voc_id"].values.tolist())
        # 如何太大可以每隔n个进行保存
    pd.to_pickle({"data": total_list}, "{}/{}{}.pandas_pickle".format(out_dir, start_index, end_index))


def gen_src_data_to_token():
    # voc_id = pd.read_pickle("E:/just_and_sum/just_and_mask/voc_id_novel_3.7")
    # voc_df=pd.DataFrame(voc_id)
    # # 将词表转成dataframe  去掉单独的字 （后期将单独的字在text 中找到一个头部 或者尾部）
    # # 65535*65535 = 42亿词表 或者说直接切为两个字方可 将全部组合变为连续性词表而某些字符根本连接不上 或者说 组合是多个字必须才能组合故而
    # # 不会到达42亿,词表那么可以确定的是,21亿方可达到连续性词表(而实际上根本没有那么多，将20GB 的novel 转为词表方才只有100万）
    # # 就算将古诗对联等加入词表
    # with open("fixed_couplets_in.txt", encoding="utf-8") as f:
    #     in_data = f.readlines()
    # with open("fixed_couplets_out.txt", encoding="utf-8") as f:
    #     out_data = f.readlines()
    # src_data = ["".join(i.strip().split() + j.strip().split()) for i, j in tqdm(zip(in_data, out_data))]
    #
    # src_data=voc_df["voc_id"].values.tolist()+src_data
    # src_data=pd.DataFrame({"voc":src_data})
    # src_data=src_data.drop_duplicates("voc")
    # print(src_data.shape)
    src_data = pd.read_csv("max_voc_pandas.csv")
    with open("poetries.txt", "r", encoding="utf-8") as f:
        data = f.readlines()
    src_data = src_data["voc"].values.tolist() + [i.strip() for i in tqdm(data)]
    src_data = pd.DataFrame({"voc": src_data})
    src_data = src_data.drop_duplicates("voc")
    print(src_data.shape)
    src_data.to_csv("max_voc_pandas.csv", index=False, encoding="utf-8")


def check_by_check_text(one_max_csv, one_need_check, return_list):
    need_check_res = one_max_csv[one_max_csv["voc"].str[:len(one_need_check)] == one_need_check]
    return_list.append(need_check_res)


def use_max_voc_data():
    # 搜索之前先试用头开始 进行组是的没错，这样只要查找组方可
    max_csv = pd.read_csv("max_voc_pandas.csv")
    max_csv = max_csv[max_csv["voc"].str.len() >=2]

    max_csv["head"] = max_csv["voc"].str[:1]
    max_csv_group=max_csv.groupby(["head"],as_index=False)
    max_csv_group=dict(iter(max_csv_group))
    # 上面是可以预先初始化的


    text = "明天下午去哪"
    start = time.time()
    need_check_list = list(jieba.lcut(text))
    print(time.time() - start)
    need_check = need_check_list[-1]

    print(time.time() - start)
    max_bool =max_csv_group[need_check[0]]
    max_bool = max_bool[max_bool["voc"].str.contains(need_check)]
    print(time.time() - start)
    # need_check_res = max_csv.loc[max_bool]
    #
    # print(time.time() - start)
    #

if __name__ == '__main__':
    # freeze_support()
    # 数据预处理
    # gen_data_pre()
    # 任意的txt 文本生成词表
    # gen_voc_to_pandas_pickle()
    # 合并词表
    # gen_src_data_to_token()
    # 简单的使用
    use_max_voc_data()
