import os.path
from multiprocessing import Process, Manager, freeze_support

import numpy as np
import pandas as pd

from net import QKS

from glob import glob
# import jieba
import warnings
import paddle
from tqdm import tqdm

warnings.filterwarnings("ignore")


def son_process_of_train_net(epoch, bar, one_data, max_len, loss_avg, net, loss_f, opt, steps):
    if (len(loss_avg) + 1) % 1000 == 0:
        # print(np.mean(loss_avg), "____", np.mean(acc_avg))
        paddle.save(net.state_dict(), "long_attention_model")
        paddle.save({"data": loss_avg}, "loss_avg")

    one_data = np.array(one_data)
    # print(one_data.shape)
    label_data = one_data[:, 1:]
    in_data = one_data[:, :-1]

    i_data = paddle.to_tensor(in_data)
    l_data = paddle.to_tensor(label_data)

    # print(i_data[0], l_data[0])

    # 训练代码写这里

    outl, _ = net(i_data.astype("int64"), 0)
    loss = loss_f(outl, l_data.astype("int64"))

    acc = np.mean((paddle.argmax(outl, -1)[:, :].reshape([-1]) == l_data[:, :].reshape([-1])).numpy())
    # loss = loss_f(out, label.reshape([-1]).astype("int64"))
    # acc = np.mean((paddle.argmax(out, -1) == label.reshape([-1])).numpy())

    loss_data = loss.item()

    loss_avg.append(loss_data)
    bar.set_description(
        desc="{}{}{}{}{:.5f}".format(epoch, "____", "steps___{}____".format(steps),
                                     "loss___{:.5f}___".format(np.mean(loss_avg)), acc))
    opt.clear_grad()
    loss.backward()
    opt.step()


def read_one_pickle(read_path, data_voc):
    one_set = []

    for one_v in tqdm(read_path):
        one_data = pd.read_pickle(one_v)
        one_data = one_data["data"]
        one_set += one_data

    data_voc.append(one_set)


def load_data_to_mem(paths_list_r, voc_id, works_num):
    resv_data = Manager().list()

    np.random.shuffle(paths_list_r)
    p_list = []
    # 发任务到异步进程
    for i in range(0, len(paths_list_r), len(paths_list_r) // works_num):
        j = len(paths_list_r) // works_num + i

        p = Process(target=read_one_pickle, args=(paths_list_r[i:j], resv_data))
        p.start()
        p_list.append(p)

    for p in p_list:
        p.join()
    new_data = []
    for i in list(resv_data):
        new_data += i
    resv_data = pd.DataFrame(new_data).fillna(len(voc_id) - 1)
    pd.to_pickle({"data": resv_data}, "src_data_token_and_padding.pandas_pickle")


def train_net_multi_process(paths_list_r):
    voc_id = pd.read_pickle("voc_data.pandas_pickle")["data"]

    net = QKS(len(voc_id), 256, 8)
    # net.load_dict(paddle.load("long_attention_model"))
    # net.eval()
    print("加载成功")
    opt = paddle.optimizer.Adam(parameters=net.parameters(), learning_rate=0.0003)
    loss_f = paddle.nn.CrossEntropyLoss()
    # loss_f = HeadLoss()

    max_len = 256
    works_num = 8
    batch_size = 750
    epochs = 54 * 10 * 60

    if not os.path.exists("src_data_token_and_padding.pandas_pickle"):
        load_data_to_mem(paths_list_r, voc_id, works_num)
        data_sets = pd.read_pickle("src_data_token_and_padding.pandas_pickle")["data"].values.tolist()
    else:
        data_sets = pd.read_pickle("src_data_token_and_padding.pandas_pickle")["data"].values.tolist()
    bar = tqdm(range(epochs))
    loss_avg = []
    for epoch in bar:
        np.random.shuffle(data_sets)
        for i in range(0, len(data_sets), batch_size):
            count = i + batch_size
            one_data = data_sets[i:count]
            son_process_of_train_net(epoch, bar, one_data, max_len, loss_avg, net, loss_f, opt,
                                     count)
    # 回收异步进程
    paddle.save(net.state_dict(), "long_attention_model")
    paddle.save({"data": loss_avg}, "loss_avg")


if __name__ == '__main__':
    freeze_support()
    train_net_multi_process(glob("train_data_dir/*"))
