import requests
import time
import json
import re

from prompt import prompt_judge
from typing import List, Dict


class LLMModel:
    """
    大模型、embedding、维基百科文本检索服务分别对应端口8000、8001、8002
    """

    def __init__(self, url: str, temperature: float, max_length: int = 10000):
        """
        初始化大模型
        :param url: 路由
        :param temperature: 随机度
        :param max_length: 最大长度
        """
        self.url = url
        self.temperature = temperature
        self.max_length = max_length

    def call_llm_local(self, prompt, system, port: int = 8000):
        """
        本地访问大模型
        :param port:
        :param prompt:
        :param system:
        :return:
        """
        data = {
            "messages": [
                {
                    "role": "system",
                    "content": system
                },
                {
                    "role": "user",
                    "content": prompt
                }
            ]
        }
        start = time.time()
        # 发送POST请求
        response = requests.post(self.url + port, data=json.dumps(data))
        end = time.time()
        print(end - start, "seconds")
        # 处理响应
        if response.status_code == 200:
            # 假设返回的JSON格式包含model生成的内容
            response_data = response.json()
            return {"data": response_data['response'], "response_code": response.status_code}
        else:
            return {"response_code": response.status_code, "error_info": response.text}

    def call_embedding(self, sentence1, sentence2, port: int = 8001):
        """
        提供embedding匹配分数
        :param sentence1:
        :param sentence2:
        :param port:
        :return:
        """
        data = {
            "sentence1": sentence1,
            "sentence2": sentence2
        }
        response = requests.post(self.url + port, data=json.dumps(data))
        if response.status_code == 200:
            # 假设返回的JSON格式包含model生成的内容
            response_data = response.json()
            return {"score": response_data['score'], "response_code": response.status_code}
        else:
            return {"response_code": response.status_code, "error_info": response.text}

    def call_index(self, query: str, top_k: int = 3, port: int = 8088):
        """
        文本检索，得到相关的段落/Wikipedia 内容
        :param query: 实体名+原始问题描述
        :param top_k:
        :param port:
        :return: 返回的结果中要包含top_k个上下文(context)、分数(score)
        """
        data = {
            "query": query,
            "top_k": top_k
        }
        response = requests.post(self.url + port, data=json.dumps(data))
        if response.status_code == 200:
            # 假设返回的JSON格式包含model生成的内容
            response_data = response.json()
            return {"query": response_data['query'], "response_code": response.status_code, "context": response_data['context']}
        else:
            return {"response_code": response.status_code, "error_info": response.text}

    def call_index_score(self, query: str, top_k: int = 3, port: int = 8088):
        """
        文本检索，得到相关的段落/Wikipedia 内容的最高得分
        :param query: 实体名+原始问题描述
        :param top_k:
        :param port:
        :return: 返回的结果中要包含top_k个上下文(context)、分数(score)
        """
        data = {
            "query": query,
            "top_k": top_k
        }
        response = requests.post(self.url + port, data=json.dumps(data))
        if response.status_code == 200:
            # 假设返回的JSON格式包含model生成的内容
            response_data = response.json()
            return {"score": response_data['score'], "response_code": response.status_code, "context": response_data['context']}
        else:
            return {"response_code": response.status_code, "error_info": response.text}

    def call_relation(self, query_id: str, port: int = 8088, api: str = "get_all_relations_of_an_entity") -> dict:
        """
        找出所有相关的关系和下一个节点
        :param api:
        :param port:
        :param query_id: 节点id
        :return:
        """
        data = {
            "query_id": query_id
        }
        response = requests.post(self.url + port + "/" + api, data=json.dumps(data))
        if response.status_code == 200:
            # 假设返回的JSON格式包含model生成的内容
            response_data = response.json()
            return {"relations": response_data['relations']["tail"], "response_code": response.status_code}
        else:
            return {"response_code": response.status_code, "error_info": response.text}

    def call_qid(self, entity: str, port: int = 8088, api: str = "label2qid") -> dict:
        data = {
            "entity": entity
        }
        response = requests.post(self.url + port + "/" + api, data=json.dumps(data))
        if response.status_code == 200:
            # 假设返回的JSON格式包含model生成的内容
            response_data = response.json()
            qid_list = response_data['qid']
            # 选择最短的
            str_length = 100
            qid_return = ""
            for i, qid in enumerate(qid_list):
                if len(qid) < str_length:
                    str_length = len(qid)
                    qid_return = qid
            if qid_return == "":
                return {"qid": "", "label": response_data['label'], "error": "没有qid"}
            else:
                return {"qid": qid_return, "label": response_data['label'], "response_code": response.status_code}
        else:
            return {"response_code": response.status_code, "error_info": response.text}

    def call_tail(self, query_id: str, pid: str, port: int = 8088, api: str = "get_tail_entities_given_head_and_relation"):
        data = {
            "query_id": query_id,
            "pid": pid
        }
        response = requests.post(self.url + port + "/" + api, data=json.dumps(data))
        if response.status_code == 200:
            # 假设返回的JSON格式包含model生成的内容
            response_data = response.json()
            # response_data["data"]是一个列表，列表中每个元素是一个字典，字典中包含"qid"和"label"两个键，分别表示实体的id和标签。
            return {"data": response_data["tail_entities"]["head"], "response_code": response.status_code}
        else:
            return {"response_code": response.status_code, "error_info": response.text}

class TopicEntityPruning:
    """
    实体裁剪,选择最适合2-3个实体,创新点：融合了大模型跟embedding进行评分
    """

    def __init__(self, url: str, temperature: float, max_length: int = 10000):
        """
        初始化模型
        :param url:
        :param temperature:
        :param max_length:
        """
        self.url = url
        self.temperature = temperature
        self.max_length = max_length
        llm_model = LLMModel(self.url, self.temperature, self.max_length)
        self.llm_model = llm_model

    def _score_with_llm(self, question: str, entities: list):
        """
        得到大模型的评分
        :param question:
        :param entities:
        :return:
        """
        scores = []
        system_llm = """这个判断实体是否有可能帮助构建知识图谱来回答上述问题？请只用回答是或否 。"""
        for entity in entities:
            prompt_llm = f"""【用户问题】
    {question}
【实体描述】
    {entity.extract}
            """
            response = self.llm_model.call_llm_local(prompt_llm, system_llm, 8000)
            score = 1.0 if "是" in response else 0.0
            scores.append(score)
        return scores

    def _score_with_embedding(self, question: str, entities: list):
        """
        得到embedding的评分
        :param question:
        :param entities:
        :return:
        """
        scores = []
        for entity in entities:
            score = self.llm_model.call_embedding(question, entity.extract, 8001)["score"]
            scores.append(score)
        return scores

    def prune(self, question: str, entities: list, weight: float = 0.5, top_k: int = 3):
        """
        对候选实体进行话题裁剪，返回评分前top_k的实体及其融合分数
        :param top_k: 选择前k个实体
        :param weight:
        :param question:
        :param entities:
        :return:
        """
        llm_scores = self._score_with_llm(question, entities)
        embedding_scores = self._score_with_embedding(question, entities)

        final_scores = []
        for entity, llm_score, emb_score in zip(entities, llm_scores, embedding_scores):
            combined_score = weight * llm_score + (1 - weight) * emb_score
            final_scores.append({"entity": entity, "score": combined_score})

        final_scores.sort(key=lambda x: x[1], reverse=True)
        if top_k > len(final_scores):
            top_k = len(final_scores)
        return {"final_scores": final_scores[:top_k]}

def extract_json(data):
    # 正则表达式提取Python代码块
    pattern = r"```json\n(.*?)\n```"
    match = re.search(pattern, data, re.DOTALL)
    if match:
        json_str = match.group(1)  # 提取匹配到的JSON字符串
        try:
            return json.loads(json_str)  # 尝试解析JSON字符串
        except json.JSONDecodeError as e:
            print(f"JSON解析失败: {e}")
            return None
    else:
        print("未找到有效的JSON代码块")
        return None

def get_wiki_data(entity: str):
    """
    从维基百科中查询实体的概述
    :param entity:
    :return:
    """
    # 先进入维基百科里面进行查询
    url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{entity}"
    r = requests.get(url)
    if r.status_code == 200:
        # 获取到概述和节点ID
        print(r.json())
        return {"extract": r.json().get("extract"), "response_code": r.status_code}
    return {"response_code": r.status_code, "error_info": r.text}

server = LLMModel("http://127.0.0.1", 0.1, 10000)

def ner_extract(query: str):
    """
    实体识别部分
    :param query:
    :return:
    """
    system_extract_entities = """请仔细阅读问题内容，并从中识别出所有具有具体名称的对象（如人名、地名、组织名、时间、事件等）。 将这些实体以结构化的 JSON 格式输出。每个实体应包含其类型和对应的名称。
        【输出示例】
        ```json
            {
                "entities": ["张三", "比赛"]
            }
        ```
            """
    llm_response = server.call_llm_local(query, system_extract_entities, 8000)["data"]
    json_data = extract_json(llm_response)
    return json_data

def entity_linking(query: str, entities_links: dict):
    """
    实体链接
    :param query:
    :param entities_links:
    :return:
    """
    entities_link = entities_links.get("entities")
    # 多个要大模型做评估
    all_extracts = []
    for entity in entities_link:
        extracts = get_wiki_data(entity)
        all_extracts.append({"entity": entity, "extract": extracts["extract"], "wiki_id": extracts["wiki_id"]})
    # 构造实体链接的提示词
    prompt = f"【用户输入】\n  {query}\n"
    system = """请从用户的输入中选择最合适的实体，并用json固定格式返回实体的名称
    【输出示例】
        '''json
        {
            "entity": ["实体1", "实体2"]
        }
        '''
    """
    for extract in all_extracts:
        prompt += f"""【实体部分描述】\n  实体：{extract}，概述：{extract["extract"]}\n"""

    llm_response = server.call_llm_local(prompt, system, 8000)["data"]
    llm_response = extract_json(llm_response)
    return llm_response

def retrieval(query_question: str, entities_list: list[dict]):
    """
    文本检索
    :param query_question:
    :param entities_list:
    :return:
    """
    # 构造检索内容：实体＋问题
    query = "【实体】：\n"
    for i, entity in enumerate(entities_list):
        query += f"    {i}." + entity["entity"] + "\n"
    query += f"\n【用户问题】\n    {query_question}"
    # 进行检索
    index_responses = server.call_index(query, top_k=3)["query"]
    # 构造上下文context_all
    context_all = ""
    for i, index in enumerate(index_responses):
        context_all += f"{i}." + index.context + "\n"
    return {"context_all": context_all}

def llm_judge(query_question: str, context: str):
    """
    大模型判断
    :param query_question:
    :param context:
    :return:
    """
    system = """请判断此上下文是否支持回答该用户问题？如果支持则回答{是},如果不支持则回答{否}"""
    prompt = f"【用户问题】\n  {query_question}\n【上下文】\n  {context}\n  "
    llm_response = server.call_llm_local(prompt, system, 8000)["data"]
    if "{是}" in llm_response:
        return {"judge_result": True}
    else:
        return {"judge_result": False}


# 用户问题
question = "Who is the father of Ada Lovelace?"

clue = ""

depth = 3
for i in range(0, depth):
    # 第一步实现

    # 实体识别
    json_data = ner_extract(question)

    # 实体链接
    entities = entity_linking(question, json_data)

    # 话题裁剪
    topic_pruning = TopicEntityPruning("http://127.0.0.1", 0.1, 10000)
    entities_data = topic_pruning.prune(question, entities)

    # 文本检索
    retrieval_data = retrieval(question, entities_data["final_scores"])
    context = retrieval_data["context_all"]

    # 回答判断（大模型）
    judge_data = llm_judge(question, retrieval_data["context_all"])

    if judge_data["judge_result"]:
        print("回答：")
        # 大模型回答结果
    else:
        # 第二步实现
        entities_all = entities_data["final_scores"]
        entities_data_all = []
        for entity in entities_all:
            call_data = server.call_qid(entity["entity"])
            entities_data_all.append({"qid": call_data["qid"], "label": call_data["label"]})

        # 路径结构
        path_list = []
        system = """请判断此关系是否与用户问题有关？如果有关则回答{是},如果不有关则回答{否}"""
        for entity in entities_data_all:

            # 找到所有相关的关系
            all_relations = server.call_relation(entity["qid"])["relations"]

            # 用大模型对关系做裁剪
            for relation in all_relations:

                prompt = f"【用户问题】\n  {question}\n【关系】\n  " + relation["label"] + "\n  "
                llm_response = server.call_llm_local(prompt, system, 8000)["data"]
                if "{是}" in llm_response:

                    # 记录路径结构 [{query_id -> , relations -> }]
                    path_list.append({"head": entity["label"], "relations": relation["label"]})

        # 第三步实现
        # 查询关系尾实体
        path_list_2 = []
        for path in path_list:
            tail_datas = server.call_tail(path["head"], path["relations"])
            scores = []
            for tail in tail_datas:
                # 第四步实现

                # 文本构造
                prompt_rdf = path["head"] + " " + path["relations"] + " " + tail["label"]
                # 筛选尾实体
                scores.append({"score": server.call_index_score(prompt_rdf), "tail": tail["label"]})
            for score in scores:
                # 路径记录
                path_list_2.append({"head": path["head"], "relations": path["relations"], "tail": score["tail"]})

        path_prompt = ""

        for i, path_data in enumerate(path_list_2):
            path_prompt = f"{i}.({path_data['head']}, {path_data['relations']}, {path_data['tail']})"

        judge_system = prompt_judge

        judge_prompt = f"""问题：\n  {question} \n相关线索：\n  {clue}\n知识三元组：\n  {path_prompt}\n参考文献：\n    {context}\n"""
        judge_answer = server.call_llm_local(question, judge_system, 8000)["data"]
        llm_data = extract_json(judge_answer)
        # {
        #     "answer": ""
        #     "clue": ""
        #     "question_answer": ""
        #     "new_query": ""
        # }
        if llm_data["answer"]:
            # 返回结果
            break
        else:
            clue = llm_data["clue"]
            question = llm_data["new_query"]


# 输出结果

print(llm_data)













