# 输入文件路径
import json
from concurrent.futures import ProcessPoolExecutor

import jellyfish
import jieba
from rapidfuzz import process, fuzz
from tqdm import tqdm

# 准备文档
path2 = r"C:\Users\Administrator\Documents\WXWork\1688853051339318\Cache\File\2024-10\wanfang"
with open(path2, "r", encoding="utf-8") as f:
    documents = [line.strip() for line in f.readlines()]


# 分词函数
def segment(text):
    return list(jieba.cut(text))


# 搜索函数
def search(query):
    # 对查询字符串进行分词
    segmented_query = " ".join(segment(query))
    results = process.extract(segmented_query, documents, scorer=fuzz.WRatio, limit=20)
    return results  # 根据需要调整阈值


# 处理每个搜索查询的函数
def process_query(search_query):
    results = search(search_query)
    output = []
    for doc, score,num in results:
        similarity = fuzz.partial_ratio(search_query, doc)
        if similarity >= 90:
            dicts_result = {"cnki": search_query, "wanfang": doc, "sim": similarity, "score": score}
            output.append(dicts_result)
        # dicts_result = {"cnki": search_query, "wanfang": doc, "score": score}
        # output.append(dicts_result)

    return output


if __name__ == "__main__":
    path1 = r"C:\Users\Administrator\Documents\WXWork\1688853051339318\Cache\File\2024-10\cnki"
    with open(path1, "r", encoding="utf-8") as f:
        lists = [line.strip() for line in f.readlines()]

    # 并行计算
    results_all = []
    with ProcessPoolExecutor(max_workers=15) as executor:
        for result in tqdm(executor.map(process_query, lists), total=len(lists)):
            results_all.extend(result)

    # 写入结果文件
    with open(r"E:\jupyterlab\data\会议\result_20.txt", 'a', encoding="utf-8") as f:
        for result in results_all:
            f.write(json.dumps(result, ensure_ascii=False) + "\n")
