#!/usr/bin/env python3
# -*- coding: utf-8 -*-

# author    : Xiangwei Wang
# email     : wangxw-cn@qq.com
# datetime  : 2021/5/11 22:03

"""
"""

from sklearn.ensemble import RandomForestRegressor
from MLP.MLPtrain import read_data

import torch.multiprocessing as mp


# 每个子进程分别对“转”“评”“赞”进行训练和预测：
def rfc_pred(pid, type, q):
    features, labels, features_pred, result_file = q.get()

    # 使用随机森林回归器进行训练：
    rfc = RandomForestRegressor(n_jobs=1)  # n_jobs 会导致内存被复制 n 份，设太大时电脑32GB内存不够用
    print(f"pid: {pid}, 开始训练: {type}")
    rfc.fit(features, labels[type])

    # 对测试数据进行评估：
    print(f"pid: {pid}, 训练完成, 开始评估")
    result = rfc.predict(features_pred)
    print(f"pid: {pid}, 开始写入文件")

    # 对结果（数值列表，每个值对应一个微博的预测转发数）直接写入文件：
    res_str = ""
    for r in result:
        res_str += str(round(r))
        res_str += "\t"
    with open(result_file[type], "w", encoding='utf-8') as f:
        f.write(res_str)

if __name__ == "__main__":

    # 设置结果写入文件路径：
    result_file = ["../../Data/predict/result_0.txt", "../../Data/predict/result_1.txt", "../../Data/predict/result_2.txt"]

    print("开始读数据: train_data")
    train_data = read_data("../../Data/feature_sum.json")
    print("开始读数据: predict_data")
    predict_data = read_data("../../Data/predict/feature_sum.json")

    print("读取完成")

    features = []
    labels = [[], [], []]
    features_pred = []
    # 读取训练数据：
    for sample in train_data:
        # print(sample[0], sample[1][0])
        features.append(sample[0])
        labels[0].append(sample[1][0])  # 转发
        labels[1].append(sample[1][1])  # 评论
        labels[2].append(sample[1][2])  # 赞

    # 读取测试数据：
    for sample in predict_data:
        # print(sample[0], sample[1][0])
        features_pred.append(sample[0])

    # 设置3个子进程进行并发处理“转”“评”“赞”，
    # 同时设置一个 data_queue 队列，用于进程间数据传输：
    processes = []
    n_worker = 3
    data_queue = mp.Manager().Queue()

    # 3个子进程分别处理转、评、赞：
    for w in range(n_worker):
        # 将子进程所需的部分的训练和测试数据放入队列：
        data_queue.put((features, labels, features_pred, result_file))
        # 设置子进程：
        p = mp.Process(
            target=rfc_pred,
            args=(w, w, data_queue)
        )
        # 启动子进程：
        p.start()
        processes.append(p)

    # 等待所有子进程完成：
    for p in processes:
        p.join()

    print("完成")

    # print(f'RandomForest cross-validate, estimators = 100, {score}')

