import json
import re
import xlrd, xlwt
import openpyxl
import os
import tqdm
import pickle
from elasticsearch import Elasticsearch
from match import *
import zhconv
from qwen3_emb import QwenEmbeddingModel

import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--threshold", type=float, default=4)  # 可以修改
parser.add_argument("--topk", type=int, default=5)  # 可以修改
parser.add_argument("--use_es", action="store_true")
parser.add_argument("--use_qwen", action="store_true")
parser.add_argument("--create_es_index", action="store_true")

args = parser.parse_args()

qwen_model = None
if args.use_qwen:
    qwen_model = QwenEmbeddingModel("/home/liwei/Qwen3-Embedding-0.6B")


def build_es_index():
    print("building es index")
    es = Elasticsearch(
        "http://localhost:9200",
    )
    return es

def create_index(index_name):
    if es.indices.exists(index=index_name):
        es.indices.delete(index=index_name)
    es.indices.create(index=index_name)
    return es

import time
import logging


# logging.basicConfig(level=logging.INFO)


def log(func):
    def wrapper(*args, **kwargs):
        logging.info(f"正在调用函数 {func.__name__}，参数为 {args}，{kwargs}")
        result = func(*args, **kwargs)
        logging.info(f"函数 {func.__name__} 执行完成，返回结果为 {result}")
        return result

    return wrapper


def timeit(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        execution_time = end_time - start_time
        print(f"函数 {func.__name__} 的执行时间为: {execution_time} 秒")
        return result

    return wrapper
def count_docs_by_count_api(es, index_name):
    """通过 _count API 获取文档数量"""
    try:
        response = es.count(index=index_name)
        return response["count"]
    except Exception as e:
        print(f"获取文档数失败（_count API）：{e}")
        return None

def convert_qwen_score( score):
    """
    将Qwen模型的相似度分数转换为可能大于1的范围
    """
    return int(score > 0.5) * 1.0

@timeit
def read_reference(name, es=None, content_col=0, index_name=None):
    print("processing reference", name)
    data = {}
    IDF = {}
    DF = {}
    doc_num = 0
    if name.endswith("xlsx"):
        # xlsx格式文件需要使用openpyxl包读取
        df = openpyxl.load_workbook(name)
        table = df.worksheets[0]  # 取出第一个工作表
        col_names = []
        for line_num, row in enumerate(table.rows):
            # 对每一行数据迭代
            if line_num == 0:
                # 第一行是数据名
                col_names = [col.value for col in row]
                continue
            record = [col.value for col in row]
            # 匹配时只需要看大字这一列即可
            text = record[content_col]
            if "羅近溪先生" in name:
                text = zhconv.convert(text, "zh-cn")
            if text is None:
                continue
            for c in set(text):
                if c not in DF:
                    DF[c] = 1
                else:
                    DF[c] += 1
            doc_num += 1
            data[text] = record
            if es and args.create_es_index:
                es.index(index=index_name, id=line_num, body={"key": text})
    else:
        df = xlrd.open_workbook(name)
        table = df.sheets()[0]
        for line_num in range(1, table.nrows):
            record = table.row_values(line_num)
            text = record[4]
            data[text] = record
    for c in DF:
        IDF[c] = (doc_num / (DF[c] + 1))
    return data, IDF


def match_ref(text, ref_data, IDF, es=None, topk=args.topk, index_name=None):
    """
       在引用数据中匹配给定文本，寻找最相似的内容

       参数:
       text (str): 需要匹配的目标文本
       ref_data (dict): 引用数据字典，键为文本，值为完整记录，从read_yulei函数返回
       es (Elasticsearch, 可选): Elasticsearch客户端实例，用于快速搜索

       返回:
       """
    all_candidates = []
    # 如果提供了Elasticsearch客户端，则使用ES进行快速搜索
    if es is not None:
        # 在Elasticsearch索引中搜索相似文本
        result = es.search(index=index_name, query={"match": {"key": text}}, size=topk)
        # 遍历搜索结果中的每个匹配项
        cand_texts = [hit["_source"]["key"] for hit in result["hits"]["hits"]]
        if args.use_qwen:
            qwen_scores = qwen_model.similarity(text, cand_texts)[0]
            # print(qwen_scores)
        for i, cand_text in enumerate(cand_texts):
            # 获取es得到的候选文本
            sim_score = lcs_skip(cand_text, text, IDF)
            if args.use_qwen:
                sim_score += convert_qwen_score(qwen_scores[i])
            if sim_score > args.threshold:
                all_candidates.append((sim_score, cand_text))
    else:
        text_set = set(text)
        candidates = []  # 用于存储所有候选匹配项及其相似度分数
        for k_text in ref_data:
            if len(set(k_text).intersection(text_set)) > args.threshold:  # 粗筛，至少有一个共同字符
                # 以下字符串匹配方法需要实验效果
                # 考虑朱子、朱熹
                sim_score = lcs_skip(k_text, text, IDF)
                if sim_score < args.threshold:
                    continue
                if len(candidates) <= topk:
                    candidates.append((sim_score, k_text))
                elif sim_score > candidates[-1][0]:
                    candidates[-1] = (sim_score, k_text)
                    # 按相似度分数降序排序，并保留topk个
                    candidates.sort(reverse=True, key=lambda x: x[0])
                    candidates = candidates[:topk]
        all_candidates = [cand[1] for cand in candidates[:topk] if cand[0] > args.threshold]
    return all_candidates


@timeit
def read_target_book(name, ref_data, es=None, content_col=0, IDF=None, index_name=None):
    data = []
    if name.endswith("xlsx"):
        df = openpyxl.load_workbook(name)
        table = df.worksheets[0]
        for line_num in tqdm.tqdm(range(1, table.max_row)):
            row = table[line_num]
            item = [col.value for col in row]
            if item[content_col] is None:
                continue
            if type(item[content_col]) != str:
                print(item[content_col])
                continue
            # 去掉正文中花括号
            #match_text = re.sub(re.compile("\\{\\{.*\\}\\}"), "", item[content_col])
            match_text = item[content_col]
            if "羅近溪先生" in name:
                match_text = zhconv.convert(match_text, "zh-cn")
            all_candidates = match_ref(match_text, ref_data, IDF, es=es, index_name=index_name)  # 比较目标文本和朱子语类文本_1000.pkl
            for sim_score, key in all_candidates:
                if sim_score >= min(args.threshold, len(key)):
                    sim_span = get_lcs_naive(key, item[content_col])
                    data.append(item + ref_data[key] + ["；".join(sim_span)])  # 构造写入excel行数据
            if line_num % 100 == 0 and line_num > 0:
                pkl_book_name = name.split("/")[-1].split(".")[0]
                if args.use_es:
                    pickle.dump(data,
                                open("./es_pkl/" + pkl_book_name + "_" + str(line_num) + ".pkl", "wb"))  # 防止数据丢失导致无结果生成
                else:
                    pickle.dump(data,
                                open("./pkl/" + pkl_book_name + "_" + str(line_num) + ".pkl", "wb"))  # 防止数据丢失导致无结果生成

    else:
        df = xlrd.open_workbook(name)
        table = df.sheets()[0]
        for line_num in range(table.nrows):
            item = table.row_values(line_num)
            best_match, max_score, best_key = match_ref(item[content_col], ref_data)
            if max_score > args.threshold:
                data.append(item + best_match)  # 构造写入excel行数据
    return data


def write_reference_book(name, data):
    df = xlwt.Workbook()
    table = df.add_sheet('sheet0')
    for line_num in range(len(data)):
        for col_num, item in enumerate(data[line_num]):
            table.write(line_num, col_num, item)
    df.save(name)


if __name__ == '__main__':
    dir = "2025-9-9三书互"
    ref_data = {}
    IDFs = {}
    es = build_es_index()
    for book_name in os.listdir(dir):
        if args.create_es_index:
            create_index(book_name[:5])
        tem_ref_data, IDFs[book_name] = read_reference(os.path.join(dir, book_name), es=es, content_col=0, index_name=book_name[:5])
        count = count_docs_by_count_api(es, book_name[:5])
        print("index", book_name[:5], "has", count, "docs")
        ref_data[book_name] = tem_ref_data
    for book in os.listdir(dir):
        print("processing target", book)
        for ref_book in os.listdir(dir):
            if ref_book == book:
                continue
            print("comparing", ref_book,  book)
            assert es is not None
            data = read_target_book(os.path.join(dir, book), ref_data[ref_book], es=es, content_col=0,
                                    IDF=IDFs[ref_book], index_name=ref_book[:5])
            write_dir = os.path.join("./edited/", dir, "threshold_" + str(int(args.threshold)))
            if not os.path.exists(write_dir):
                os.mkdir(write_dir)
            write_reference_book(os.path.join(write_dir, book.split(".")[0] + "_对_" + ref_book.split(".")[0] + ".xls"), data)
