"""
利用进程池执行异步调用多进程任务
"""

import json
import os
import time
from collections import Counter
from multiprocessing import Process, Pool

import jieba

# 关闭jieba库的配置打印
jieba.setLogLevel(jieba.logging.INFO)


def Map(path):
    """
    读取文档并进行词频统计，返回该文本的词频统计结果。
    :param path: 文档路径
    :param q: 词频统计结果队列，储存词频统计结果
    :return:
    """
    # 如果是有效路径，读取并分析，将结果传入词频统计结果队列
    with open(path, "r", encoding='utf8') as f:
        s = f.read()
    word_stat = Counter(jieba.lcut(s))
    print(f"Stat of {path} has done by process {os.getpid()}.")
    return word_stat

def Reduce(q):
    """
    收集所有Map进程提供的文档词频统计，更新总的文档库词频。
    :param q: 词频统计结果队列，储存文档词频统计结果
    :param m_num: Map进程总数量，用于判断是否统计结束
    :return:
    """
    doc_lib = dict()  # 文档库

    for d in q:
        if d is None:
            print("Stat Complete.")
            with open("result.json", "w", encoding='utf8') as f:
                json.dump(doc_lib, f)
            return
        else:
            # 将词频统计结果写入文档库
            for key in d:
                doc_lib[key] = doc_lib.get(key, 0) + d[key]

if __name__ == '__main__':
    # 文档文件夹目录
    PATH = r"C:\Users\lenovo\Resource"
    # 确定Map进程数量，
    Map_num = 4

    # 计时开始
    a = time.time()

    result0 = []
    p = Pool()
    # 将路径写入文档路径队列
    for i in range(131604, 131624): # 220316
        res = p.apply_async(Map, args=(f"{PATH}\\{i}.txt",))
        result0.append(res)
    p.close()
    p.join()
    result = [res.get() for res in result0]+[None]

    r = Process(target=Reduce, args=(result,))
    r.start()
    r.join()

    # 计时结束，打印结果
    b = time.time()
    print(f"Time costs {b - a}s.")
