import pandas as pd  
import numpy as np  
import os  
import json  
from elasticsearch import Elasticsearch  

# 读取数据  
df_data = pd.read_csv("../data/segmented.csv", encoding='utf-8-sig', index_col='url')  
print(f"数据读取完成，共 {len(df_data)} 条记录。")  

# 创建目录，存储倒排索引文件  
if not os.path.exists("./jsons"):  
    os.mkdir("./jsons")  
print("目录创建完成。")  

# 读取停用词表  
stop_word_list = []  
with open("stopwords.txt", 'r', encoding='utf-8') as f:  
    stop_word_list = f.read().splitlines()  
print("停用词表读取完成，共 {} 个停用词。".format(len(stop_word_list)))  

 
def calculate_term_frequency(df_data=df_data, title_only=False):  
    term_frequency_index = {}  
    total_records = len(df_data)  
    for index, (url, record_info) in enumerate(df_data.iterrows()):  
        term_frequency_index[url] = {}  
        for word in record_info.title.split(" "):  
            if word not in stop_word_list:  
                term_frequency_index[url][word] = term_frequency_index[url].get(word, 0) + 1  
        if not title_only:  
            for word in str(record_info.description).split(" "):  
                if word not in stop_word_list:  
                    term_frequency_index[url][word] = term_frequency_index[url].get(word, 0) + 1  
            for word in str(record_info.content).split(" "):  
                if word not in stop_word_list:  
                    term_frequency_index[url][word] = term_frequency_index[url].get(word, 0) + 1  
            for word in str(record_info.editor).split(" "):  
                if word not in stop_word_list:  
                    term_frequency_index[url][word] = term_frequency_index[url].get(word, 0) + 1  
        # 打印进度  
        print(f"处理第 {index + 1} 条记录：{url}")  
    return term_frequency_index  

term_frequency_index = calculate_term_frequency()  
term_frequency_index_title_only = calculate_term_frequency(df_data, True)  
print("TF计算完成。")  


def create_inverted_index(index):
    inverted_index = {}
    print("开始创建倒排索引...")
    total_documents = len(index)
    processed_documents = 0
    for url, words in index.items():
        for word, frequency in words.items():
            if word not in inverted_index:
                inverted_index[word] = {}
            inverted_index[word][url] = frequency       
        processed_documents += 1
        print(f"处理进度：已处理 {processed_documents} / {total_documents} 个文档，当前文档：{url}")
    print("倒排索引创建全部完成。")  
    return inverted_index

inverted_index = create_inverted_index( term_frequency_index)  
inverted_title_index = create_inverted_index(term_frequency_index_title_only)  

# 计算TF  
def calculate_tf(term_frequency_index):  
    print("开始计算TF...")  
    tf_results = {}  
    for url, words in term_frequency_index.items():  
        tf_results[url] = {word: freq for word, freq in words.items()}  
        print(f"TF计算完成：{url}")  
    print("TF计算全部完成。")  
    return tf_results  

tf_results = calculate_tf(term_frequency_index)  
tf_results_title_only = calculate_tf(term_frequency_index_title_only)  

# 计算IDF  
def calculate_idf(term_frequency_index):  
    print("开始计算IDF...")  
    idf_results = {}  
    doc_count = len(term_frequency_index)  
    for url, frequency_dict in term_frequency_index.items():  
        for word in frequency_dict.keys():  
            idf_results[word] = idf_results.get(word, 0) + 1  
        print(f"IDF计算完成：{url}")  
    for word, count in idf_results.items():  
        idf_results[word] = np.log(doc_count / count) if count > 0 else 0  
    print("IDF计算全部完成。")  
    return idf_results  

idf_results = calculate_idf(term_frequency_index)  
idf_results_title_only = calculate_idf(term_frequency_index_title_only)  

# 计算tf-idf值  
def calculate_tf_idf(term_frequency_index, idf_results):  
    print("开始计算TF-IDF...")  
    tf_idf_results = {}  
    for url, words in term_frequency_index.items():  
        tf_idf_results[url] = {word: frequency * idf_results[word] for word, frequency in words.items()}  
        print(f"TF-IDF计算完成：{url}")  
    print("TF-IDF计算全部完成。")  
    return tf_idf_results  

tf_idf_results = calculate_tf_idf(term_frequency_index, idf_results)  
tf_idf_results_title_only = calculate_tf_idf(term_frequency_index_title_only, idf_results_title_only)  

# 保存各个文档的TF-IDF值  
print("开始保存TF-IDF值...")  
with open('./jsons/TF_IDF.json', 'w', encoding='utf-8') as f:  
    json.dump(tf_idf_results, f, ensure_ascii=False)  

with open('./jsons/TF_IDF_Title.json', 'w', encoding='utf-8') as f:  
    json.dump(tf_idf_results_title_only, f, ensure_ascii=False)  

with open("./jsons/TF.json", 'w', encoding='utf-8') as f:  
    json.dump(tf_results, f, ensure_ascii=False)  

with open("./jsons/TF_Title.json", 'w', encoding='utf-8') as f:  
    json.dump(tf_results_title_only, f, ensure_ascii=False)  

with open("./jsons/IDF.json", 'w', encoding='utf-8') as f:  
    json.dump(idf_results, f, ensure_ascii=False)  

with open("./jsons/IDF_Title.json", 'w', encoding='utf-8') as f:  
    json.dump(idf_results_title_only, f, ensure_ascii=False)  
print("TF-IDF值保存完成。")  

# 保存倒排索引为json格式，便于前端使用  
with open('./jsons/invert_index.json', 'w', encoding='utf-8') as f:  
    json.dump(inverted_index, f, ensure_ascii=False) 

with open('./jsons/invert_index_title.json', 'w', encoding='utf-8') as f:  
    json.dump(inverted_title_index, f, ensure_ascii=False)  
print("倒排索引保存完成。")

# 计算所有非停用词的总词频  
def calculate_all_tf(term_frequency_index):  
    print("开始计算所有非停用词的总词频...")  
    word_frequency_results = {}  
    for url, words in term_frequency_index.items():  
        for word, frequency in words.items():  
            word_frequency_results[word] = word_frequency_results.get(word, 0) + frequency  
        print(f"词频计算完成：{url}")  
    print("词频计算全部完成。")  
    return word_frequency_results  # 确保返回完整的词频统计结果  

# 计算并保存所有非停用词的词频  
all_tf_results = calculate_all_tf(term_frequency_index)  
all_tf_results_title_only = calculate_all_tf(term_frequency_index_title_only)  

# 保存所有非停用词的词频结果到文件  
with open('./jsons/final_TF.json', 'w', encoding='utf-8') as f:  
    json.dump(all_tf_results, f, ensure_ascii=False)  

with open('./jsons/final_TF_title.json', 'w', encoding='utf-8') as f:  
    json.dump(all_tf_results_title_only, f, ensure_ascii=False)  

print("词频保存完成。")  

