# -*- coding:utf-8 -*-
import time
import os
import os
import fitz  # PyMuPDF
import re
from sklearn.metrics.pairwise import cosine_similarity
import jieba
from paddlenlp import Taskflow
import openai
# 目前需要设置代理才可以访问 api 填自己的代理地址
os.environ["http_proxy"] = "http://localhost:7890"
os.environ["https_proxy"] = "http://localhost:7890"

# 读取pdf目录结构
def extract_chapter_structure(pdf_path):
    doc = fitz.open(pdf_path)
    toc = doc.get_toc()  # 提取目录信息
    if len(toc) == 0:
        print("未提取到目录信息")
        return None
    chapter_structure = {}
    current_chapter_l1 = None
    current_chapter_l2 = None
    for item in toc:
        if item[0] == 1:
            current_chapter_l1 = preprocess_text(item[1])  # 读取一级目录作为key # Chapter level
            current_chapter_l2 = None
            chapter_structure[current_chapter_l1] = {}
        if item[0] == 2:
            if current_chapter_l2 is not None:
                if chapter_structure[current_chapter_l1][current_chapter_l2]["end_page"] < item[2] - 1:
                    chapter_structure[current_chapter_l1][current_chapter_l2]["end_page"] = item[2] - 1
            current_chapter_l2 = item[1]
            chapter_structure[current_chapter_l1][current_chapter_l2] = {"start_page": item[2], "end_page": item[2]}

    doc.close()
    return chapter_structure


# 根据页码读取内容 返回内容下标为页码 值为当页的文本
def extract_text_by_page(pdf_path):
    doc = fitz.open(pdf_path)
    text_by_page = {}
    for page_num in range(doc.page_count):
        page = doc[page_num]
        text_by_page[page_num + 1] = page.get_text()
    doc.close()
    return text_by_page


# 根据目录和页码匹配每个章节的内容 返回内容键为章节名称，值为该章节的内容
def extract_chapter_text(text_by_page, chapter_structure):
    chapter_text = {}
    for key1, value1 in chapter_structure.items():
        chapter_text[key1] = {}
        for key2, value2 in value1.items():
            start_page = value2["start_page"]
            end_page = value2["end_page"]
            text = ""
            for page_num in range(start_page, end_page + 1):
                text += text_by_page.get(page_num, "")
            # 对text进行清洗，去除掉空格换行符等
            chapter_text[key1][key2] = re.sub(r'\s+', ' ', text).strip()
    return chapter_text


def preprocess_text(text):
    # 去除数字和标点符号
    text = re.sub(r'\d+', '', text)
    text = re.sub(r'[^\w\s]', '', text)
    return text


# 计算词语相似度
def get_word_similarity(word1, word2, model):
    try:
        vector1 = model.wv[word1].reshape(1, -1)
        vector2 = model.wv[word2].reshape(1, -1)
        similarity_score = cosine_similarity(vector1, vector2)[0][0]
        return similarity_score
    except KeyError:
        return 0.0


# 根据词语语义相似度匹配要抽取的类别（第一级类别）
def classify_text_with_similarity(text, categories, model):
    similarity_scores = {category: 0.0 for category in categories}

    # 中文分词
    words = jieba.cut(text)
    words = ' '.join(words)
    for word in words.split():
        for category in categories:
            for keyword in categories[category]:
                similarity_scores[category] += get_word_similarity(word, keyword, model)

    # 若分数全是0 则丢弃这段文本（考虑成本问题
    if all(v == 0 for v in similarity_scores.values()):
        return None
    else:
        # Choose the category with the highest accumulated similarity score 若是全0的话默认为‘学校基本情况’
        predicted_category = max(similarity_scores, key=similarity_scores.get)
        return predicted_category


def entity_extraction(sentence, schema,best_model,check_point):
    # schema = entity_types[category]
    if best_model:
        ie = Taskflow('information_extraction', schema=schema, task_path=check_point)
    else:
        ie = Taskflow('information_extraction', schema=schema)

    entities = ie(sentence)
    # 处理得到的预测结果，只取命名实体类别：命名实体识别值
    data_dict = entities[0]
    new_dict = {}
    if data_dict:
        for key, value_list in data_dict.items():
            if value_list[0]:
                texts = value_list[0]['text']
                new_dict[key] = texts
    return new_dict


def chatgpt_function(content, key=None):
    openai.api_key = key
    # print("问题为：{}".format(content))
    try:
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",  # 该模型最便宜
            messages=[
                {"role": "system", "content": "你是一个自然语言处理模型，用于完成NLP基础任务"},
                {"role": "user", "content": content}
            ],
            temperature=0.1
        )
    except openai.error.APIConnectionError as error:
        print(error)
        time.sleep(120)
        response = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": "你是一个自然语言处理模型，用于完成NLP基础任务"},
                {"role": "user", "content": content}
            ],
            temperature=0.1
        )
    return response["choices"][0]["message"]["content"]


def entity_extraction_by_llm(res, sentence, schema, api_key):
    chunk_size = 2500  # 模型最多能处理的promt长度为token=4097;1中文字符对应1.5个token,问题定义长度大概为2500个字符
    chunks = [sentence[i:i + chunk_size] for i in range(0, len(sentence), chunk_size)]
    for chunk in chunks:
        prompt = "你现在需要完成一个实体识别任务，定义的实体类别有：" + "、".join(
            schema) + "\n要求：1、输出格式表示为实体类型:实体名;2、输出的每个结果用换行符分割;3.未提及的内容输出为'None';4.若有多个答案只输出最优的那个;5.请从给定的句子中抽取，不要自行总结。\n" + f"句子：{chunk}"
        result = chatgpt_function(prompt, key=api_key)
        # 返回结果用字典存储
        res_dic = {}
        result_split = result.split('\n')
        for r in result_split:
            if len(r.split(':')) == 2:
                k, v = r.split(':')
                res_dic[k.strip()] = None if v.strip() == 'None' else v.strip()
        # 对于每个类别得到的res的实体类型都相同 对其答案进行合并
        entity_res = merge_dicts(res, res_dic)
    return entity_res



def merge_dicts(dict1, dict2):
    merged_dict = dict1.copy()  # 保证传入的dict1总是为上一次合并的结果
    for key, value2 in dict2.items():
        if key in merged_dict:
            value1 = merged_dict[key]
            # 处理值不为空且不相同的情况
            if value2 is not None and not isinstance(value2, list):
                # 把后抽取到的值作为新值，加入原来的set，set允许存储多个值
                # todo 可以根据概率来更新
                merged_dict[key].add(value2)
            elif isinstance(value2, list):
                # value2有多个值
                merged_dict.update(value2)

        else:
            # 处理新的键
            merged_dict[key] = set()
            if value2 is not None and not isinstance(value2, list):
                # value2为单个字符串且不为None
                merged_dict[key].add(value2)
            elif isinstance(value2, list):
                # value2有多个值
                merged_dict.update(value2)

    return merged_dict


# 将set转为json可以序列化的对象list
def set_encoder(obj):
    if isinstance(obj, set):
        return list(obj)
    raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
