# -*- coding:utf-8 -*-
from gensim.models import Word2Vec
import os
import json
import warnings
import yaml
import time
from utils import extract_chapter_text, extract_text_by_page, extract_chapter_structure, classify_text_with_similarity, \
    entity_extraction, merge_dicts,entity_extraction_by_llm

warnings.filterwarnings('ignore')


def config(config_path):
    # 读取 YAML 文件
    with open(config_path, "r") as file:
        config = yaml.safe_load(file)
    # 获取参数值
    input_folder = config["input_folder"]
    output_folder = config["output_folder"]
    schemas_path = config["schemas"]
    word2vec_corpus_path = config["word2vec_corpus"]
    useLLM = config["useLLM"]
    api_key =config["api_key"]
    checkpoint = config["checkpoint"]
    return input_folder, output_folder, schemas_path, word2vec_corpus_path,useLLM,api_key,checkpoint


# 训练Word2Vec模型
def train_word2vec(word2vec_corpus_path):
    # 读取训练语料
    with open(word2vec_corpus_path, 'r', encoding='utf-8') as file:
        # 加载 JSON 数据
        data = json.load(file)
    sentences_key = data['sentence']
    categories_and_keywords = data['categories_and_keywords']
    # 将sentences key:[w1,w2,...] -> [[w1,w2,...],[],...]
    sentences = []
    for k, v in sentences_key.items():
        if isinstance(v, list):
            sentences.append(v)

    model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, workers=4)
    # 定义类别及其关键词列表
    model.build_vocab(categories_and_keywords, update=True)
    model.train(categories_and_keywords, total_examples=len(categories_and_keywords), epochs=10)
    return model, categories_and_keywords


def extract(pdf_path, schemas_path, model, categories_and_keywords,useLLM,api_key,check_point):
    # 判断有无checkpoint
    best_model = False
    if os.path.exists(output_path):
        best_model = True
    # 读取schemas，统一最后得到的json格式
    with open(schemas_path, 'r', encoding='utf-8') as file:
        # 加载 JSON 数据
        schemas = json.load(file)
    # 将data的 key：[key1,key2,...] -> entity_res的 key:{key1:set(),...}格式
    entity_res = {}
    for k, v in schemas.items():
        # k对应一级标题，v是二级标题的数组
        entity_res[k] = {}
        if isinstance(v, list):
            for k2 in v:
                entity_res[k][k2] = set()

    chapter_structure = extract_chapter_structure(pdf_path)  # 一级目录->二级目录->对应页码
    if chapter_structure is not None:
        text_by_page = extract_text_by_page(pdf_path)
        chapter_text = extract_chapter_text(text_by_page, chapter_structure)  # 一级目录->二级目录->对应文本
        # 由于文本较长 对于不同的category仍然每次只输入二级目录下的文本内容，而不是合并一齐输入
        for chapter_l1, value in chapter_text.items():
            predicted_category = classify_text_with_similarity(chapter_l1, categories_and_keywords, model)
            if predicted_category is not None:
                for chapter_l2, text in value.items():
                    schema = schemas[predicted_category]
                    res = entity_extraction(text, schema, best_model,check_point)
                    # 对于每个类别得到的res的实体类型都相同 对其答案进行合并
                    entity_res[predicted_category] = merge_dicts(entity_res[predicted_category], res)
                    if useLLM:
                        entity_res[predicted_category] = entity_extraction_by_llm(entity_res[predicted_category], text, schema, api_key)
                        # entity_res[predicted_category] = merge_dicts(entity_res[predicted_category], res_llm)

    # 遍历结果，将所有set对象转为list
    for l1, value1 in entity_res.items():
        for l2, value2 in value1.items():
            entity_res[l1][l2] = list(value2)

    return entity_res


if __name__ == '__main__':
    # 获取参数值
    input_folder, output_folder, schemas_path, word2vec_corpus_path, useLLM, api_key, check_point = config("config.yaml")
    # 训练word2vec模型
    model, categories_and_keywords = train_word2vec(word2vec_corpus_path)
    count = 0
    pdf_files = [f for f in os.listdir(input_folder) if f.endswith('.pdf')]
    s = time.time()
    for pdf_file in pdf_files:
        pdf_path = os.path.join(input_folder, pdf_file)  # 得到完整的pdf路径
        if useLLM:
            output_path = os.path.join(output_folder, pdf_file.split('.')[0] + '_with_llm.json')  # 得到完整的输出结果路径
        else:
            output_path = os.path.join(output_folder, pdf_file.split('.')[0] + '.json')  # 得到完整的输出结果路径
        # 如果有了同名的json文件说明已经处理过了，则跳过
        if os.path.exists(output_path):
            continue
        print(f"开始处理{pdf_file}-----------------------------------\n")
        start = time.time()
        res = extract(pdf_path, schemas_path, model, categories_and_keywords,useLLM,api_key, check_point)

        # 将每个pdf的结果以json格式存入单独的文件中
        with open(output_path, 'w', encoding='utf-8') as file:
            json.dump(res, file, ensure_ascii=False, indent=2)
        end = time.time()
        count += 1
        duration = end - start
        print("第{}个文档处理结束！耗时：{:.2f} 秒".format(count, duration))
    e = time.time()
    d = e - s
    print("共处理{}个文档，耗时：{:.2f} 秒".format(count, d))

    # todo
    # openai做知识融合？
