'''
陈超依 19377189
现代程序设计第十一次作业
'''
import glob
import os
import time
from multiprocessing import JoinableQueue, Pool, Process

import jieba


def dat_helper(dat_path : str, out_path : str, dat_name : str,
length : int):
    #读取输入的dat文件的指定内容，并按照指定行数分解为若干小文件
    #将结果以固定行数的txt文件形式存入指定路径中
    data = open(dat_path, 'r', errors='ignore')
    file_number = 1
    line_number = 0
    dat_output = open('{}/processed_dat_{}.txt'.format(out_path, file_number), 'w', encoding='utf-8')
    while True:
        line=data.readline()
        if (line == '' ):#检测是否已到dat文件末尾
            dat_output.close()
            break
        if ((dat_name in line) and (dat_name+'title' not in line)):#检测该行是否为所需数据
            if (line_number >= length):#检测目前小文件是否已写满
                dat_output.close()
                file_number += 1
                line_number = 0
                dat_output = open('{}/processed_dat_{}.txt'.format(out_path, file_number), 'w', encoding='utf-8')
            dat_output.write(((line.split('<'+dat_name+'>')[-1]).split('</'+dat_name+'>'))[0])#去除头尾部<>格式
            dat_output.write('\n')
            line_number += 1
        else:
            continue
    data.close()

def Map(path : str):
    #输入txt文件，返回各行词频统计结果
    print("开始读取文件{} pid:{}\n".format(path.split('\\')[-1], os.getpid()))
    data = open(path, mode='r', encoding='utf-8').read()
    Words = jieba.lcut(data)
    Count = { }
    for word in Words:
        if word in Count:
            Count[word] = Count[word]+1
        else:
            Count[word] = 1
    #在小字典中进行初步筛选以减少字典合并的时间
    Ltemp = [ ]
    for key in Count:
        if Count[key]<20:
            Ltemp.append(key)
    for key in Ltemp:
        Count.pop(key)
    print("文件{}已读取完成 pid:{}\n".format(path.split('\\')[-1], os.getpid()))
    return Count


def Map_callback(Count : dict):
    Map_return.put(Count)

class Reduce(Process):
    
    def __init__(self, Map_return : JoinableQueue, out_path : str, length : int):
        super().__init__()
        self.Map_return = Map_return
        self.out_path = out_path
        self.length = length
    
    def run(self):
        result = { }
        for i in range(self.length):
            if (self.Map_return.empty()):
                print("出现队列空的情况\n")
                time.sleep(20)
                if (self.Map_return.empty()):
                    print("\ntimeout\n")
                    raise TimeoutError
            data = self.Map_return.get()
            result.update(data)
            self.Map_return.task_done()
            print("完成一次合并\n")
        #输出到文件
        self.Map_return.join()
        f_out = open(self.out_path, 'w', encoding='utf-8')
        for word in result:
            f_out.write("{} : {} \n".format(word, result[word]))
        f_out.close()

if __name__=='__main__':
    #首先对下载下来的文件进行预处理,结果存入processed文件夹
    '''dat_helper("E:/Py_Programs/week11/news_sohusite_xml.dat",
        "E:/Py_Programs/week11/processed", "content", 1000)'''
    #再通过多进程对各个文件进行关键词分析
    Max_Process = 8

    print("主程序pid : {}".format(os.getpid()))
    start_time = time.time()
    path_list = glob.glob(r"E:/Py_Programs/week11/processed/*.txt")#获得小文件路径
    global Map_return
    Map_return = JoinableQueue()
    Map_pool = Pool(Max_Process)
    for path in path_list:
        Map_pool.apply_async(Map, args=(path,), callback=Map_callback)
    Map_pool.close()
    Reduce_process = Reduce(Map_return, "E:/Py_Programs/week11/result.txt", len(path_list))
    Reduce_process.start()
    Map_pool.join()
    Map_return.join()
    end_time = time.time()
    print("程序运行完成 运行时间：{}  所选择进程数：{}\n".format(end_time-start_time, Max_Process))