import os
import time
from glob import glob
from multiprocessing import Process, Manager
from multiprocessing import freeze_support

import pandas as pd
import psutil
from tqdm import tqdm

from check_data import gen_hash
from config import config


# 等待方法
def wait_process():
    available_memory_percent = 100 - psutil.virtual_memory().percent
    if available_memory_percent < 10:
        while True:
            time.sleep(10)
            if available_memory_percent > 20:
                break


def process_data1(signal_dict, data_seq, data_list):
    batch_size = signal_dict.get("batch_size")

    for one in tqdm(range(0, len(data_list), batch_size)):
        # 检测等待
        wait_process()
        # 处理数据
        two = one + batch_size
        # 数据读取或者对齐
        data_seq += get_data1(data_list[one:two], signal_dict["sample_id"])


# 数据处理
def get_data1(data_path, data_id):
    seq_len = config["seq_len"]
    data_len = seq_len + (seq_len // 2) * data_id
    sample_id_list = list(range(data_id+1))[::-1]
    sample_id_list = [2 ** i for i in sample_id_list]
    sample_len_list = [seq_len] + [seq_len // 2] * data_id
    data_dict = []
    for one in data_path:

        with open(one, "r", encoding="utf-8") as f:
            one_data = f.read()
            one_data = "".join(one_data.split())
        if len(one_data) <= data_len - 256:
            continue

        for i_id, i in enumerate(range(0, len(one_data), data_len)):
            j = data_len + i
            two_data = one_data[i:j]
            two_data_list = []
            ot_len = []
            for len_id, ii in enumerate(sample_id_list):
                len_id = len_id + 1
                if len_id == 0:
                    if sample_len_list[len_id-1] // ii <= 32:
                        ot = "3" * 32
                    else:

                        ot = two_data[:sum(sample_len_list[:len_id])][:: ii]
                else:
                    if sample_len_list[len_id-1] //  ii <= 32:
                        ot = "3" * 32
                    else:

                        ot = two_data[sum(sample_len_list[:len_id - 1]):sum(sample_len_list[:len_id])][:: ii]

                ot_len.append(len(ot))

                two_data_list.append(ot)
            if 32 in ot_len:
                th = gen_hash(two_data[:sum(sample_len_list[:ot_len.count(32)])])
                two_data_list = [th] + two_data_list[ot_len.count(32):]

            two_data = "".join(two_data_list)

            if seq_len != len(two_data):
                two_data += "。" * (seq_len - len(two_data))
            one_df = pd.DataFrame({"voc": list(two_data), "path": one}).drop_duplicates("voc")
            two_df = pd.DataFrame({"voc": list(two_data), "count": one}).groupby("voc").count()
            one_df = pd.merge(one_df, two_df, on="voc", how="left")
            data_dict.append(one_df)
            break
    return data_dict


def process_data(signal_dict, data_seq, data_list):
    batch_size = signal_dict.get("batch_size")

    for one in tqdm(range(0, len(data_list), batch_size)):
        # 检测等待
        wait_process()
        # 检测结束

        # 处理数据
        two = one + batch_size
        # 数据读取或者对齐
        res = get_data(data_list[one:two])
        data_seq += res


# 数据处理

def get_data(data_path):
    data_dict = []
    for one in data_path:
        with open(one, 'r', encoding='utf-8') as f:
            data = f.read()
            data = "".join(data.split())[:config["seq_len"]]
        one_df = pd.DataFrame({"voc": list(data), "path": one}).drop_duplicates("voc")
        two_df = pd.DataFrame({"voc": list(data), "count": one}).groupby("voc").count()
        one_df = pd.merge(one_df, two_df, on="voc", how="left")
        data_dict.append(one_df)
    return data_dict


# 控制程序
def main_data(sample_id):
    path_list = glob(config["data_set_path"])
    for i in tqdm(glob(config["lora_data_group_path"] + "*")):
        os.remove(i)

    num_works = 10
    batch_size = 128

    # 参数设置
    signal_dict = Manager().dict()
    data_seq = Manager().list()

    signal_dict["batch_size"] = batch_size
    signal_dict["sample_id"] = sample_id

    # 开启异步处理数据
    p_list = []
    for i in range(0, len(path_list), len(path_list) // num_works):
        j = i + len(path_list) // num_works
        if sample_id == 0:

            p = Process(target=process_data, args=(signal_dict, data_seq, path_list[i:j]))
        else:
            p = Process(target=process_data1, args=(signal_dict, data_seq, path_list[i:j]))
        p.start()
        p_list.append(p)
    # 训练结束回收进程
    for p in p_list:
        p.join()
    total_dict = pd.concat(data_seq)
    for i in tqdm(total_dict.groupby("voc")):
        pd.to_pickle(i[1][["path", "count"]].values.tolist(),
                     "{}lora_data_group_{}_.pandas_pickle".format(config["lora_data_group_path"], gen_hash(i[0])))



if __name__ == '__main__':
    # get_data(glob(config["data_set_path"]))
    # get_data1(glob(config["data_set_path"]),1)

    freeze_support()
    main_data(0)
