import json
import os
import time
from collections import Counter
from multiprocessing import Process, Queue

import jieba

# 关闭jieba库的配置打印
jieba.setLogLevel(jieba.logging.INFO)


def Map(p: Queue, q: Queue):
    """
    读取文档并进行词频统计，返回该文本的词频统计结果。
    :param p: 文档路径队列，储存文档路径字符串
    :param q: 词频统计结果队列，储存词频统计结果
    :return:
    """
    while True:
        path = p.get()  # 从文档路径队列接收文档路径
        if path is None:
            # 如果是None，说明所有文档已经分析完毕，向Reduce传入None，结束进程
            q.put(None)
            return
        else:
            # 如果是有效路径，读取并分析，将结果传入词频统计结果队列
            with open(path, "r", encoding='utf8') as f:
                s = f.read()
            word_stat = Counter(jieba.lcut(s))
            q.put(word_stat)
            print(f"Stat of {path} has done by process {os.getpid()}.")


def Reduce(q: Queue, m_num):
    """
    收集所有Map进程提供的文档词频统计，更新总的文档库词频。
    :param q: 词频统计结果队列，储存文档词频统计结果
    :param m_num: Map进程总数量，用于判断是否统计结束
    :return:
    """
    doc_lib = dict()  # 文档库
    count = 0  # 用于统计已结束的Map进程数量

    while True:
        d = q.get()  # 接收词频统计结果
        if d is None:
            count += 1  # 接收到None，已结束Map进程数量+1

            # 如果所有Map进程都已结束，将文档库写入磁盘
            if count == m_num:
                print("Stat Complete.")
                with open("result.json", "w", encoding='utf8') as f:
                    json.dump(doc_lib, f)
                return
        else:
            # 将词频统计结果写入文档库
            for key in d:
                doc_lib[key] = doc_lib.get(key, 0) + d[key]


if __name__ == '__main__':

    # 文档文件夹目录
    PATH = r"C:\Users\lenovo\Resource"
    # 确定Map进程数量，
    Map_num = 4

    # 计时开始
    a = time.time()

    # 建立文档路径队列和词频统计结果队列
    in_q, res_q = Queue(), Queue()
    # 将路径写入文档路径队列
    for i in range(131604, 131624): # 220316
        in_q.put(rf"{PATH}\{i}.txt")
    # 向文档队列路径插入与Map进程数量相等的None，用于结束Map进程
    for i in range(Map_num):
        in_q.put(None)

    # 建立Map子进程，开始进程
    Mappers = []
    for i in range(Map_num):
        m = Process(target=Map, args=(in_q, res_q))
        Mappers.append(m)
    for m in Mappers:
        m.start()

    # Reduce子进程，收集所有词频统计结果，存入文档库后写进磁盘
    r = Process(target=Reduce, args=(res_q, Map_num))
    r.start()
    r.join()  # 主进程等待Reduce子进程结束，而无需等待Map进程结束

    # 计时结束，打印结果
    b = time.time()
    print(f"Time costs {b - a}s.")
