import time
from glob import glob
from multiprocessing import Process, Manager
from multiprocessing import freeze_support

import numpy as np
import pandas as pd

from cvfo_gpt import CvFo
import torch
import psutil
from tqdm import tqdm
from config import config


# 等待方法
def wait_process():
    available_memory_percent = 100 - psutil.virtual_memory().percent
    if available_memory_percent < 10:
        while True:
            time.sleep(10)
            if available_memory_percent > 20:
                break


def process_data(signal_dict, data_seq, data_list, one_name):
    batch_size = signal_dict.get("batch_size")

    for one in range(0, len(data_list), batch_size):
        # 检测等待
        wait_process()
        # 处理数据
        two = one + batch_size
        # 数据读取或者对齐
        one_data = get_data(data_list[one:two], signal_dict["voc_dict"])
        signal_dict["data_total_count"] += two - one
        data_seq.append(one_data)
    signal_dict[one_name] = +1
    # 检测结束


# 数据处理
def get_data(data_path, voc_token):
    data_list = []
    for one in data_path:
        seq_len = config["seq_len"]
        with open(one, "r", encoding="utf-8") as f:
            one_data = f.read()
            one_data = "".join(one_data.split())
        for i_id, i in enumerate(range(0, len(one_data), seq_len)):
            j = seq_len + i
            two_data = one_data[i:j]
            if seq_len != len(two_data):
                two_data += "。" * (seq_len - len(two_data))
            two_data = [voc_token.get(ii, voc_token["。"]) for ii in two_data]
            data_list.append(two_data)
            break
    return data_list


# 训练程序
def train_data(bar_train, one_batch_train, opt_train, loss_f_train, net_train):
    input_data = torch.Tensor(one_batch_train).int()
    label_o = input_data[:, 1:]
    input_o = input_data[:, :-1]
    out = net_train(input_o)
    loss = loss_f_train(out.reshape([-1, out.shape[-1]]), label_o.reshape([-1]).long())
    acc = sum(sum((torch.argmax(out, -1) == label_o).int())) / (out.shape[0] * out.shape[1])
    opt_train.zero_grad()
    loss.backward()
    opt_train.step()
    bar_train.set_description("loss_{}___acc___{}".format(loss.item(), acc.item()))
    bar_train.update(len(one_batch_train))


# 控制程序
def main_data():
    voc_id = pd.read_pickle(config["voc_data_path"])
    net = CvFo(len(voc_id) + 1, config["hidden_dim"], config["row_layers"],
               config["heads"],
               config["group_num"])
    loss_func = torch.nn.CrossEntropyLoss()
    opt = torch.optim.Adam(lr=0.0001, params=net.parameters())
    path_list = glob(config["data_set_path"])

    num_works = config["num_works"]

    epochs = config["epochs"]
    # 参数设置
    signal_dict = Manager().dict()
    data_seq = Manager().list()

    signal_dict["stop"] = False
    signal_dict["batch_size"] = config["batch_size"]
    signal_dict["epochs"] = epochs
    signal_dict["data_total_count"] = 0
    signal_dict["data_totals"] = len(path_list) * epochs
    signal_dict["voc_dict"] = {k: v for v, k in enumerate(voc_id)}

    # 提前打乱好所有epoch的数据
    total_path_list = []
    for i in range(epochs):
        np.random.shuffle(path_list)

        total_path_list += np.array(path_list).copy().tolist()
    path_list = total_path_list

    # 开启异步处理数据

    p_list = []
    signale_flag = 0
    signale_flag_name = []
    for i in range(0, len(path_list), len(path_list) // num_works):
        j = i + len(path_list) // num_works
        signal_dict["single_{}".format(signale_flag)] = 0
        signale_flag_name.append("single_{}".format(signale_flag))
        p = Process(target=process_data, args=(signal_dict, data_seq, path_list[i:j], "single_{}".format(signale_flag)))
        p_list.append(p)
        p.start()
        signale_flag += 1

    # 进行训练
    bar = tqdm(total=len(path_list) * epochs)
    total_count = 1
    while True:
        # 主线程处理其他逻辑
        if sum([signal_dict[i] for i in signale_flag_name]) == num_works and len(data_seq) == 0:
            break
        if len(data_seq) > 0:
            one_batch = data_seq.pop(0)
        else:
            one_batch = []
        if len(one_batch) > 0:
            train_data(bar, one_batch, opt, loss_func, net)
            if total_count % ((len(path_list) * epochs) // config["batch_size"] // 10) == 0:
                torch.save(net.state_dict(), "model_" + str(bar.n) + ".torch")
            total_count += 1

    # 训练结束回收进程
    for p in p_list:
        p.join()
    torch.save(net.state_dict(), "basic_model.torch")


if __name__ == '__main__':
    freeze_support()

    main_data()
