import json
import re
import xlrd, xlwt
import openpyxl
import os
import tqdm
import pickle
from elasticsearch import Elasticsearch
from match import lcs_skip

import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--threshold", type=float, default=4)  # 可以修改
parser.add_argument("--topk", type=int, default=5)  # 可以修改
parser.add_argument("--use_es", action="store_true")
parser.add_argument("--create_es_index", action="store_true")
args = parser.parse_args()


def build_es_index():
    print("building es index")
    es = Elasticsearch(
        "http://localhost:9200",
        timeout=3600
    )
    if not args.create_es_index:
        return es
    if es.indices.exists(index="zhuziyulei"):
        es.indices.delete(index="zhuziyulei")
    es.indices.create(index="zhuziyulei")
    return es


import time
import logging


# logging.basicConfig(level=logging.INFO)


def log(func):
    def wrapper(*args, **kwargs):
        logging.info(f"正在调用函数 {func.__name__}，参数为 {args}，{kwargs}")
        result = func(*args, **kwargs)
        logging.info(f"函数 {func.__name__} 执行完成，返回结果为 {result}")
        return result

    return wrapper


def timeit(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        execution_time = end_time - start_time
        print(f"函数 {func.__name__} 的执行时间为: {execution_time} 秒")
        return result

    return wrapper


@timeit
def read_reference(name, content_col=2):
    print("processing yulei")
    data = {}
    IDF = {}
    DF = {}
    doc_num = 0
    es = None
    if args.use_es:
        es = build_es_index()
    if name.endswith("xlsx"):
        # xlsx格式文件需要使用openpyxl包读取
        df = openpyxl.load_workbook(name)
        table = df.worksheets[0]  # 取出第一个工作表
        col_names = []
        for line_num, row in enumerate(table.rows):
            # 对每一行数据迭代
            if line_num == 0:
                # 第一行是数据名
                col_names = [col.value for col in row]
                continue
            record = [col.value for col in row]
            # 匹配时只需要看大字这一列即可
            text = record[content_col]
            if text is None:
                # 大字空缺
                continue
            for c in set(text):
                if c not in DF:
                    DF[c] = 1
                else:
                    DF[c] += 1
            doc_num += 1
            data[text] = record
            if args.use_es:
                es.index(index="zhuziyulei", id=line_num, body={"key": text})
    else:
        df = xlrd.open_workbook(name)
        table = df.sheets()[0]
        for line_num in range(1, table.nrows):
            record = table.row_values(line_num)
            text = record[4]
            data[text] = record
    for c in DF:
        IDF[c] = (doc_num / (DF[c] + 1))
    return data, es, IDF


def match_ref(text, ref_data, es=None, topk=args.topk):
    """
       在引用数据中匹配给定文本，寻找最相似的内容

       参数:
       text (str): 需要匹配的目标文本
       ref_data (dict): 引用数据字典，键为文本，值为完整记录，从read_yulei函数返回
       es (Elasticsearch, 可选): Elasticsearch客户端实例，用于快速搜索

       返回:
       """
    all_candidates = []
    # 如果提供了Elasticsearch客户端，则使用ES进行快速搜索
    if es is not None:
        # 在Elasticsearch索引中搜索相似文本
        result = es.search(index="zhuziyulei", query={"match": {"key": text}}, size=topk)
        # 遍历搜索结果中的每个匹配项
        for hit in result["hits"]["hits"]:
            # 获取es得到的候选文本
            cand_text = hit["_source"]["key"]
            sim_score = lcs_skip(cand_text, text)
            if sim_score > args.threshold:
                all_candidates.append((sim_score, cand_text))
    else:
        text_set = set(text)
        candidates = []  # 用于存储所有候选匹配项及其相似度分数
        for k_text in ref_data:
            if len(set(k_text).intersection(text_set)) > args.threshold:  # 粗筛，至少有一个共同字符
                # 以下字符串匹配方法需要实验效果
                # 考虑朱子、朱熹
                sim_score = lcs_skip(k_text, text)
                if "朱子" in text or "朱氏" in text:
                    sim_score = sim_score + 2
                else:
                    sim_score = sim_score
                if sim_score < args.threshold:
                    continue
                if len(candidates) <= topk:
                    candidates.append((sim_score, k_text))
                elif sim_score > candidates[-1][0]:
                    candidates[-1] = (sim_score, k_text)
                    # 按相似度分数降序排序，并保留topk个
                    candidates.sort(reverse=True, key=lambda x: x[0])
                    candidates = candidates[:topk]
        all_candidates = [cand[1] for cand in candidates[:topk] if cand[0] > args.threshold]
    return all_candidates


@timeit
def read_target_book(name, ref_data, es=None, content_col=2, IDF=None):
    data = []
    if name.endswith("xlsx"):
        df = openpyxl.load_workbook(name)
        table = df.worksheets[0]
        for line_num in tqdm.tqdm(range(1, table.max_row)):
            row = table[line_num]
            item = [col.value for col in row]
            if item[content_col] is None:
                continue
            if type(item[content_col]) != str:
                print(item[content_col])
                continue
            # 去掉正文中花括号
            match_text = re.sub(re.compile("\\{\\{.*\\}\\}"), "", item[content_col])
            all_candidates = match_ref(match_text, ref_data, es=es)  # 比较目标文本和朱子语类文本_1000.pkl
            for sim_score, key in all_candidates:
                if sim_score >= min(args.threshold, len(key)):
                    sim_span = get_lcs_naive(key, item[content_col])
                    data.append(item + ref_data[key] + ["；".join(sim_span)])  # 构造写入excel行数据
            if line_num % 100 == 0 and line_num > 0:
                pkl_book_name = name.split("/")[-1].split(".")[0]
                if args.use_es:
                    pickle.dump(data,
                                open("./es_pkl/" + pkl_book_name + "_" + str(line_num) + ".pkl", "wb"))  # 防止数据丢失导致无结果生成
                else:
                    pickle.dump(data,
                                open("./pkl/" + pkl_book_name + "_" + str(line_num) + ".pkl", "wb"))  # 防止数据丢失导致无结果生成

    else:
        df = xlrd.open_workbook(name)
        table = df.sheets()[0]
        for line_num in range(table.nrows):
            item = table.row_values(line_num)
            best_match, max_score, best_key = match_ref(item[content_col], ref_data)
            if max_score > args.threshold:
                data.append(item + best_match)  # 构造写入excel行数据
    return data


def write_reference_book(name, data):
    df = xlwt.Workbook()
    table = df.add_sheet('sheet0')
    for line_num in range(len(data)):
        for col_num, item in enumerate(data[line_num]):
            table.write(line_num, col_num, item)
    df.save(name)




if __name__ == '__main__':
    config = json.load(open("config.json"))
    target_files = config["target"]
    source_file = config["source"]
    ref_data, es, IDF = read_reference(os.path.join(source_file["dir"], source_file["name"]), content_col=source_file["content_col"])
    for book in target_files:
        book_name = book["name"]
        print("processing", book_name)
        data = read_target_book(os.path.join(book["dir"], book_name), ref_data, es=es, content_col=book["content_col"], IDF=IDF)
        write_dir = os.path.join("./edited/", "threshold_" + str(int(args.threshold)))
        if not os.path.exists(write_dir):
            os.mkdir(write_dir)
        write_reference_book(os.path.join(write_dir, book_name), data)
    '''print(sys.getrecursionlimit())
    sys.setrecursionlimit(3000)
    print(sys.getrecursionlimit())
    output_repeat_content()'''