import jieba
import os
from collections import defaultdict
from math import log
import xml.etree.ElementTree as ET
import csv

dir_path = "hupu/news/new_xml"
txt_path = "hupu/news/new_txt"
# xml 转化成 txt 文件
# for path in os.listdir(dir_path):
#     file_path = dir_path + '/' + path
#     tree = ET.parse(file_path)
#     root = tree.getroot()
#     title = root.find('title').text
#     datetime = root.find('datetime').text
#     body = root.find('body').text if root.find('body').text else ''
#     content = datetime + title + body
#     with open('hupu/news/new_txt/' + path.replace('xml', 'txt'), 'w', encoding='utf-8') as file:
#         file.write(content)

data = {}
for path in os.listdir(txt_path):
    file_path = txt_path + '/' + path
    with open(file_path, 'r') as f:
        content = f.read()
        data[path[:-4]] = [content[:19], content[19:]]

def pre_process(content): 
    with open("hupu/news/stopwords.txt", "r", encoding="utf-8") as f:
        stopwords = set([line.strip() for line in f.readlines()])
    words = jieba.cut(content, cut_all = False)
    pro_content = [word for word in words if word not in stopwords]
    return pro_content

def mapper(content, file_path):
    pro_content = pre_process(content)
    for c in pro_content:
        yield(c, file_path)

def reducer(c, file_paths):
    return (c, list(set(file_paths))), (c, file_paths)

def Map_Reduce(data):
    count_dict = {}
    for file_path, content in data.items():
        for key, value in mapper(content[1], file_path):
            if key in count_dict:
                count_dict[key].append(value)
            else:
                count_dict[key] = [value]
    
    result = {}
    result_count = {}
    for c, paths in count_dict.items():
        result[c] = reducer(c, paths)[0][1]
        result_count[c] = reducer(c, paths)[1][1]
    
    return result, result_count

result, result_count = Map_Reduce(data)

with open("hupu/news/stopwords.txt", "r", encoding="utf-8") as f:
        stopwords = set([line.strip() for line in f.readlines()])


csv_path = "hupu/news/new_posting_list.csv"
csv_data = []
for word in result:
    temp_string = ""
    doc_num = len(result[word])
    for doc_id in result[word]:
        words = jieba.cut(data[doc_id][1], cut_all = False)
        pro_content = [word for word in words if word not in stopwords]
        temp_string = temp_string + doc_id + '\t' + data[doc_id][0] + '\t' + str(result_count[word].count(doc_id)) + '\t' + str(len(pro_content)) + '\\'
    word_set = (word, doc_num, temp_string)
    csv_data.append(word_set)


with open(csv_path, 'w', newline='', encoding='utf-8') as csvfile:
    csv_writer = csv.writer(csvfile)
    csv_writer.writerows(csv_data)
