import time
from glob import glob
from multiprocessing import Process, Manager
from multiprocessing import freeze_support

import numpy as np
import pandas as pd

from cvfo_gpt import CvFo
import torch
import psutil
from tqdm import tqdm
from config import config
from check_data import gen_hash


# 等待方法
def wait_process():
    available_memory_percent = 100 - psutil.virtual_memory().percent
    if available_memory_percent < 10:
        while True:
            time.sleep(10)
            if available_memory_percent > 20:
                break


def gen_basic(data_id):
    if data_id == 0:
        return [config["seq_len"]], [config["seq_len"]]
    seq_len_list = []
    basic = [config["seq_len"]]
    seq_len_list.append(np.array(basic).copy().tolist())
    for _ in tqdm(range(data_id)):

        for j in range(len(basic)):
            basic[j] = basic[j] // 2
        basic.append(config["seq_len"] // 2)

        seq_len_list.append(np.array(basic).copy().tolist())
    basic = pd.DataFrame(seq_len_list).fillna(0).astype("int").values
    basic_len = np.sum(np.triu(basic), -1)
    basic_len = np.cumsum(basic_len)
    return basic_len, basic


def sample_text(one_data, data_id):
    if data_id == 0:
        return one_data[:config["seq_len"]]
    basic_len, basic = gen_basic(data_id)
    if 64 not in basic[:, 0].tolist():

        row = basic.shape[0]
    else:
        row = basic[:, 0].tolist().index(64)

    if row + 1 >= data_id:
        two_data = one_data[:basic_len[0]][::2 ** data_id]
        for j in range(1, data_id + 1):
            two_data += one_data[basic_len[j - 1]:basic_len[j]][::2 ** (data_id - j)]

    else:
        end = basic[-1]
        end_index = end.tolist().index(64)
        hash_text = one_data[:basic_len[end_index - 1]]
        if hash_text == "":
            hash_text = ""
        else:
            hash_text = gen_hash(hash_text)
        two_data_list = []
        for one in range(end_index - 1, basic_len.size - 1):
            if len(one_data) <= basic_len[one]:
                break
            two_data = one_data[basic_len[one]:basic_len[one + 1]]
            two_data = two_data[::config["seq_len"] // end[one + 1] // 2]
            two_data_list.append(two_data)
        two_data = hash_text + "".join(two_data_list)
    return two_data


def process_data1(signal_dict, data_seq, data_list, one_name):
    batch_size = signal_dict.get("batch_size")

    for one in tqdm(range(0, len(data_list), batch_size)):
        # 检测等待
        wait_process()
        # 处理数据
        two = one + batch_size
        # 数据读取或者对齐
        one_data = get_data1(data_list[one:two], signal_dict["voc_dict"], signal_dict["sample_id"],
                             signal_dict["lora_name"])
        for one1 in range(0, len(one_data), batch_size):
            two1 = one1 + batch_size

            signal_dict["data_total_count"] += two1 - one1
            data_seq.append(one_data[one1:two1])
    signal_dict[one_name] = +1


# 数据处理
def get_data1(data_path, voc_dict, data_id, name):
    seq_len = config["seq_len"]
    voc_id_list = [voc_dict["。"]] * config["seq_len"]
    data_len = seq_len + (seq_len // 2) * data_id

    data_dict = []
    for one in data_path:
        with open(one, "r", encoding="utf-8") as f:
            one_data = f.read()
            one_data = "".join(one_data.split())
        if len(one_data) <= data_len - config["seq_len"] // 2:
            continue
        two_data = sample_text(one_data, data_id)
        oe_list = []
        for oi, o in enumerate(two_data[:-1]):
            oe_list.append(voc_dict.get(o, voc_dict["。"]))
            if gen_hash(o) == name:
                data_dict.append(
                    np.array(oe_list +
                             [voc_dict.get(one_data[oi + 1], voc_dict["。"])] +
                             voc_id_list[len(oe_list) + 1:]).copy().tolist())
    return data_dict


def process_data(signal_dict, data_seq, data_list, one_name):
    batch_size = signal_dict.get("batch_size")

    for one in range(0, len(data_list), batch_size):
        # 检测等待
        wait_process()

        # 处理数据
        two = one + batch_size
        # 数据读取或者对齐
        one_data = get_data(data_list[one:two], signal_dict["voc_dict"], signal_dict["lora_name"])
        for one1 in range(0, len(one_data), batch_size):
            two1 = one1 + batch_size

            signal_dict["data_total_count"] += two1 - one1
            data_seq.append(one_data[one1:two1])
    signal_dict[one_name] = +1


# 数据处理
def get_data(data_path, voc_dict, name):
    one_data_list = []
    voc_id_list = [voc_dict["。"]] * config["seq_len"]
    for one in data_path:
        with open(one, "r", encoding="utf-8") as f:
            one_data = f.read()
            one_data = "".join(one_data.split())[:config["seq_len"]]
            oe_list = []
            for oi, o in enumerate(one_data[:-1]):
                oe_list.append(voc_dict.get(o, voc_dict["。"]))
                if gen_hash(o) == name:
                    one_data_list.append(
                        oe_list + [voc_dict.get(one_data[oi + 1], voc_dict["。"])] + voc_id_list[len(oe_list) + 1:])
    return one_data_list


# 训练程序
def train_data(bar_train, one_batch_train, opt_train, loss_f_train, net_train):
    input_data = torch.Tensor(one_batch_train).int()
    label_o = input_data[:, 1:]
    input_o = input_data[:, :-1]
    out = net_train(input_o)
    loss = loss_f_train(out.reshape([-1, out.shape[-1]]), label_o.reshape([-1]).long())
    acc = sum(sum((torch.argmax(out, -1) == label_o).int())) / (out.shape[0] * out.shape[1])
    opt_train.zero_grad()
    loss.backward()
    opt_train.step()
    bar_train.set_description("loss_{:.5f}___acc___{:.5f}".format(loss.item(), acc.item()))
    bar_train.update(len(one_batch_train))


# 控制程序
def main_data(leval):
    voc_id = pd.read_pickle(config["voc_data_path"])
    net = CvFo(len(voc_id) + 1, config["hidden_dim"], config["row_layers"],
               config["heads"],
               config["group_num"])
    #  加载模型 基础模型
    net.load_state_dict(torch.load(config["basic_model_path"]))
    loss_func = torch.nn.CrossEntropyLoss()
    opt = torch.optim.Adam(lr=0.0001, params=net.lora.parameters())

    lora_group_path = glob(config["lora_data_group_path"] + "*")
    for one_group in lora_group_path:
        print("lora_{}".format(leval))
        path_list = pd.read_pickle(one_group)
        hash_name = one_group.split("_")[-3]

        num_works = config["num_works"]

        epochs = config["epochs"]
        # 参数设置
        signal_dict = Manager().dict()
        data_seq = Manager().list()

        signal_dict["lora_name"] = hash_name

        signal_dict["batch_size"] = config["batch_size"]
        signal_dict["epochs"] = epochs
        signal_dict["data_total_count"] = 0
        signal_dict["data_totals"] = len(path_list) * epochs
        signal_dict["voc_dict"] = {k: v for v, k in enumerate(voc_id)}
        signal_dict["sample_id"] = leval
        signal_dict["son_process"] = 0

        # 提前打乱好所有epoch的数据
        total_path_list = []
        other_count = 0
        for i in range(epochs):
            np.random.shuffle(path_list)

            total_path_list += np.array(path_list)[:, 0].copy().tolist()
            other_count += np.array(path_list)[:, 1].astype("int").sum()
        path_list = total_path_list

        # 开启异步处理数据

        p_list = []
        if num_works > len(path_list):
            num_works = 1
        signale_flag = 0
        signale_flag_name = []
        for i in range(0, len(path_list), len(path_list) // num_works):
            j = i + len(path_list) // num_works

            signal_dict["single_{}".format(signale_flag)] = 0
            signale_flag_name.append("single_{}".format(signale_flag))
            if leval == 0:

                p = Process(target=process_data,
                            args=(signal_dict, data_seq, path_list[i:j], "single_{}".format(signale_flag)))
            else:

                # process_data1(signal_dict, data_seq, path_list[i:j])
                p = Process(target=process_data1,
                            args=(signal_dict, data_seq, path_list[i:j], "single_{}".format(signale_flag)))
            signale_flag += 1
            p_list.append(p)
            p.start()

        # 进行训练
        bar = tqdm(total=epochs * other_count)
        total_count = 1
        while True:
            # 主线程处理其他逻辑
            if sum([signal_dict[i] for i in signale_flag_name]) >= num_works and len(data_seq) == 0:
                break
            if len(data_seq) > 0:
                one_batch = data_seq.pop(0)
            else:
                one_batch = []
            if len(one_batch) > 0:
                train_data(bar, one_batch, opt, loss_func, net)

                total_count += 1

        # 训练结束回收进程
        for p in p_list:
            p.join()
        net.save_lora(config["lora_model_dir"] + "/lora_" + "{}_{}_".format(hash_name, leval) + ".torch")


def train_lora_main(leval):
    # 初始化数据

    main_data(leval)


if __name__ == '__main__':
    freeze_support()
    train_lora_main(1)
