import requests
import time
import json
import re
import logging
from typing import List, Dict, Optional, Tuple, Any
from dataclasses import dataclass
from enum import Enum
import numpy as np
from collections import defaultdict

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('./logs/ToG.log', encoding='utf-8'),  # 输出到文件
        logging.StreamHandler()  # 同时输出到控制台（可选）
    ]
)
logger = logging.getLogger(__name__)

class ReasoningMode(Enum):
    """推理模式枚举"""
    RETRIEVAL_ONLY = "retrieval_only"
    HYBRID = "hybrid"
    GRAPH_ONLY = "graph_only"
    LLM_ONLY = "llm_only"

@dataclass
class Entity:
    """实体数据结构"""
    name: str
    qid: str = ""
    label: str = ""
    extract: str = ""
    confidence: float = 0.0
    entity_type: str = ""

@dataclass
class Relation:
    """关系数据结构"""
    head: str
    relation: str
    tail: str
    confidence: float = 0.0
    pid: str = ""

@dataclass
class ReasoningPath:
    """推理路径数据结构"""
    entities: List[Entity]
    relations: List[Relation]
    score: float = 0.0
    evidence: str = ""

class LLMModel:
    """改进的大模型接口类"""
    
    def __init__(self, base_url: str, temperature: float = 0.1, max_length: int = 10000):
        self.base_url = base_url
        self.temperature = temperature
        self.max_length = max_length
        self.request_timeout = 200
        self.retry_count = 3
        
    def _make_request(self, endpoint: str, data: Dict, retries: int = None) -> Dict:
        """统一的请求处理方法"""
        if retries is None:
            retries = self.retry_count
            
        for attempt in range(retries):
            try:
                if "8088" in endpoint:
                    self.base_url = "http://localhost"
                else:
                    self.base_url = "http://172.20.0.89"

                response = requests.post(
                    f"{self.base_url}:{endpoint}", 
                    data=json.dumps(data),
                    headers={'Content-Type': 'application/json'},
                    timeout=self.request_timeout
                )
                
                if response.status_code == 200:
                    return {"data": response.json(), "response_code": 200}
                else:
                    logger.warning(f"Request failed with status {response.status_code}: {response.text}")
                    
            except requests.exceptions.RequestException as e:
                logger.error(f"Request attempt {attempt + 1} failed: {str(e)}")
                if attempt == retries - 1:
                    return {"response_code": 500, "error_info": str(e)}
                time.sleep(2 ** attempt)  # 指数退避
                
        return {"response_code": 500, "error_info": "Max retries exceeded"}

    # 模型正在下载
    def call_llm_local(self, prompt: str, system: str, port: int = 8009) -> Dict:
        """调用本地LLM    qwen2.5-7b qwen2.5-32b   llama-3-8b """
        data = {
            "messages": [
                {"role": "system", "content": system},
                {"role": "user", "content": prompt}
            ],
            "temperature": self.temperature,
            "max_tokens": self.max_length,
            "model": "/share/models/qwen2.5-7b"
        }
        
        start_time = time.time()
        result = self._make_request(f"{port}/v1/chat/completions", data)
        end_time = time.time()
        
        logger.info(f"LLM call completed in {end_time - start_time:.2f} seconds")
        
        if result["response_code"] == 200:
            return {"data": result["data"]["choices"][0]["message"]["content"], "response_code": 200}
        return result

    # 已解决
    def call_embedding(self, sentence1: str, sentence2: str, port: int = 8001) -> Dict:
        """计算embedding相似度"""
        data = {"sentence1": sentence1, "sentence2": sentence2}
        result = self._make_request(f"{port}", data)
        
        if result["response_code"] == 200:
            return {"score": result["data"].get('score', 0.0), "response_code": 200}
        return result

    # 已解决
    def call_index(self, query: str, top_k: int = 3, port: int = 8089) -> Dict:
        """文本检索"""
        data = {"query": query, "top_k": top_k}
        print("给文本检索的数据：", data)
        result = self._make_request(f"{port}/search", data)
        print("文本检索返回数据样式：", result)
        if result["response_code"] == 200:
            return {
                "contexts": result["data"]["results"],
                "query": query,
                "response_code": 200
            }
        return result

    # 已解决
    def call_qid(self, entity: str, port: int = 8088) -> Dict:
        """获取实体QID"""
        data = {"label": entity}
        result = self._make_request(f"{port}/label2qid", data)
        print("label2qid_result:", result)
        if result["response_code"] == 200:
            qid_list = result["data"]
            if qid_list:
                # 选择最短的QID作为最可能的匹配
                best_qid = min(qid_list, key=len)
                return {
                    "qid": best_qid,
                    "label": entity,
                    "response_code": 200
                }
            return {"qid": "", "label": "", "error": "No QID found"}
        return result

class EntityRecognizer:
    """命名实体识别器"""
    
    def __init__(self, llm_model: LLMModel):
        self.llm_model = llm_model
        
    def extract_entities(self, query: str) -> List[Entity]:
        """从查询中提取实体"""
        system_prompt = """Please analyze the user question carefully and identify all important named entities.Please keep entity names to one word whenever possible.
        Label each entity with a type(PERSON、LOCATION、ORGANIZATION、EVENT、TIME, etc.).Requires entities to be filled in according to Wikipedia's entity name format.
        
        Output format:
        ```json
        {
            "entities": [
                {"name": "Entity name", "type": "Entity type", "confidence": 0.95}
            ]
        }
        ```"""
        
        response = self.llm_model.call_llm_local(query, system_prompt)
        
        if response["response_code"] == 200:
            json_data = self._extract_json(response["data"])
            print("extract_json_data:", json_data)
            if json_data and "entities" in json_data:
                entities = []
                for ent_data in json_data["entities"]:
                    entity = Entity(
                        name=ent_data.get("name", ""),
                        entity_type=ent_data.get("type", ""),
                        confidence=ent_data.get("confidence", 0.0)
                    )
                    entities.append(entity)

                return entities

        logger.warning("Entity extraction failed, returning empty list")
        return []
    
    def _extract_json(self, text: str) -> Optional[Dict]:
        """从文本中提取JSON"""
        pattern = r"```json\n(.*?)\n```"
        match = re.search(pattern, text, re.DOTALL)
        if match:
            try:
                return json.loads(match.group(1))
            except json.JSONDecodeError as e:
                logger.error(f"JSON parsing failed: {e}")
        return None

class EntityLinker:
    """实体链接器"""
    
    def __init__(self, llm_model: LLMModel):
        self.llm_model = llm_model

        
    def link_entities(self, query: str, entities: List[Entity]) -> List[Entity]:
        """将实体链接到知识库"""
        linked_entities = []
        time.sleep(3)
        
        for entity in entities:
            # 从Wikipedia获取实体信息
            wiki_data = self._get_wiki_data(entity.name)
            print("wiki_data:", wiki_data)
            if wiki_data.get("response_code") == 200:
                entity.extract = wiki_data.get("extract", "")

                # 获取QID
                qid_data = self.llm_model.call_qid(entity.name)
                if qid_data.get("response_code") == 200:
                    print("qid_data:", qid_data)
                    entity.qid = qid_data.get("qid", "")
                    # if entity.qid == "":
                    #     # 处理实体为空的qid
                    #     judge_qid, judge_label = self._judge_entity_linking(query, entity)
                    #     print("qid_data:", judge_qid, judge_label)

                    entity.label = qid_data.get("label", entity.name)

                linked_entities.append(entity)
                logger.info(f"Successfully linked entity: {entity.name} -> {entity.qid}")
            else:
                logger.warning(f"Failed to link entity: {entity.name}")
        print("linked_entities:", linked_entities)
        return linked_entities

    def _get_wiki_data(self, entity: str) -> Dict:
        """从Wikipedia获取实体信息"""
        time.sleep(3)
        try:
            url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{entity.replace(' ', '_')}"
            response = requests.get(url, timeout=1000)
            
            if response.status_code == 200:
                data = response.json()
                return {
                    "extract": data.get("extract", ""),
                    "response_code": 200
                }
        except Exception as e:
            logger.error(f"Wikipedia API error: {e}")
            logger.info("Retrying...")
            return_data = self._get_wiki_data(entity)
            return return_data

    def _judge_entity_linking(self, query: str, entity: Entity):
        """
        判断实体链接是否成功
        :param query:
        :param entity:
        :return:
        """
        judge_qid, judge_label = "", ""
        # 构造提示词，原有的实体提取有问题,需要重新提取
        prompt = f"用户问题：\n  {query}\n"
        prompt += f"原提取实体：{entity.name}"
        system = """我原提取实体无法从维基百科中获取到实体的qid，请帮我在用户问题中选择最符合能够查询、与原实体意义相同的的上下文实体
    输出格式：
    '''json
    {
            "best_match": "最佳匹配的实体名称",
            "confidence": 0.85,
            "reasoning": "选择理由"
    }
    ``
        """
        response = self.llm_model.call_llm_local(prompt, system)

        if response["response_code"] == 200:
            json_data = self._extract_json(response["data"])

        return judge_qid, judge_label

    @staticmethod
    def _extract_json(text: str) -> Optional[Dict]:
        pattern = r"```json\n(.*?)\n```"
        match = re.search(pattern, text, re.DOTALL)
        if match:
            try:
                return json.loads(match.group(1))
            except json.JSONDecodeError:
                pass
        return None

class TopicEntityPruning:
    """改进的实体裁剪器"""
    
    def __init__(self, llm_model: LLMModel):
        self.llm_model = llm_model
        
    def prune_entities(self, question: str, entities: List[Entity], 
                      weight: float = 0.8, top_k: int = 3) -> List[Entity]:
        """对实体进行话题相关性裁剪"""
        print("prune_entities_entities:", entities)
        if not entities:
            return []
            
        # 计算LLM评分
        llm_scores = self._score_with_llm(question, entities)
        
        # 计算embedding评分
        embedding_scores = self._score_with_embedding(question, entities)
        
        # 综合评分
        scored_entities = []
        for i, entity in enumerate(entities):
            combined_score = (weight * llm_scores[i] + 
                            (1 - weight) * embedding_scores[i])
            entity.confidence = combined_score
            scored_entities.append(entity)
        
        # 排序并返回top_k
        scored_entities.sort(key=lambda x: x.confidence, reverse=True)
        print("scored_entities:", scored_entities)
        return scored_entities[:min(top_k, len(scored_entities))]
    
    def _score_with_llm(self, question: str, entities: List[Entity]) -> List[float]:
        """使用LLM计算实体相关性评分"""
        scores = []
        system_prompt = """请评估给定实体与用户问题的相关性。
        输出格式：
        ```json
        {"relevance_score": 0.85, "reasoning": "评分理由"}
        ```
        评分范围：0.0-1.0，越高表示越相关。"""
        
        for entity in entities:
            prompt = f"""【用户问题】
{question}

【实体信息】
名称：{entity.name}
类型：{entity.entity_type}
描述：{entity.extract[:200]}...

请评估此实体对回答问题的重要性。"""
            
            response = self.llm_model.call_llm_local(prompt, system_prompt)
            
            if response["response_code"] == 200:
                json_data = self._extract_json(response["data"])
                if json_data and "relevance_score" in json_data:
                    scores.append(float(json_data["relevance_score"]))
                else:
                    # 使用简单的关键词匹配作为后备
                    simple_score = 1.0 if any(word in response["data"].lower() 
                                            for word in ["相关", "重要", "有用", "relevant"]) else 0.3
                    scores.append(simple_score)
            else:
                scores.append(0.3)  # 默认分数
                
        return scores
    
    def _score_with_embedding(self, question: str, entities: List[Entity]) -> List[float]:
        """使用embedding计算实体相关性评分"""
        scores = []
        for entity in entities:
            entity_text = f"{entity.name} {entity.extract[:100]}"
            response = self.llm_model.call_embedding(question, entity_text)
            
            if response["response_code"] == 200:
                scores.append(response["score"])
            else:
                scores.append(0.3)  # 默认分数
                
        return scores
    
    def _extract_json(self, text: str) -> Optional[Dict]:
        """从文本中提取JSON"""
        pattern = r"```json\n(.*?)\n```"
        match = re.search(pattern, text, re.DOTALL)
        if match:
            try:
                return json.loads(match.group(1))
            except json.JSONDecodeError:
                pass
        return None

class GraphReasoner:
    """图推理器"""
    
    def __init__(self, llm_model: LLMModel):
        self.llm_model = llm_model
        
    def find_reasoning_paths(self, entities: List[Entity], question: str, 
                           max_depth: int = 3) -> List[ReasoningPath]:
        """找到推理路径"""
        all_paths = []
        print("entities:", entities)
        for entity in entities:
            if not entity.qid:
                continue

            # 获取实体的所有关系
            relations_datas = self.llm_model._make_request(
                "8088/get_all_relations_of_an_entity",
                {"qid": entity.qid}
            )
            print("relations_datas:", relations_datas)
            
            if relations_datas["response_code"] != 200:
                continue
                
            relations = relations_datas["data"]
            print("进行过滤")
            # 过滤相关关系 -- 有问题，直接全部都过滤掉了
            relevant_relations = self._filter_relevant_relations(question, relations)
            print("relevant_relations:", relevant_relations)
            # 构建推理路径
            for relation in relevant_relations:
                path = self._build_reasoning_path(entity, relation, question)
                if path:
                    all_paths.append(path)

        # 按分数排序
        all_paths.sort(key=lambda x: x.score, reverse=True)
        print("all_paths:", all_paths)
        return all_paths
    
    def _filter_relevant_relations(self, question: str, relations) -> List[Dict]:
        """过滤与问题相关的关系"""
        relevant_relations = []

        system_prompt = """Please determine if the relationship helps answer the question? Just answer {yes} or {no}."""
        print("关系过滤的关系数据部分", relations)
        for relation in relations:
            input_relation = "\t".join(relation_data + "\n" for relation_data in relations.get(relation, []))
            prompt = f"""question:
    {question}

relations:
    {input_relation}
"""
            response = self.llm_model.call_llm_local(prompt, system_prompt)
            if response["response_code"] == 200 and "{yes}" in response["data"]:
                relevant_relations.append(relation)
                
        return relevant_relations
    
    def _build_reasoning_path(self, head_entity: Entity, relation: str,
                            question: str) -> Optional[ReasoningPath]:
        """构建推理路径"""
        tail_entities = []
        try:
            # 获取尾实体
            tail_data = self.llm_model._make_request(
                "8088/get_tail_entities_given_head_and_relation",
                {"qid": head_entity.qid, "pid": relation}
            )
            print("tail_data:", tail_data)
            if tail_data["response_code"] != 200:
                return None

            # 把所有用到过的尾实体全部放入列表中
            for tail in tail_data["data"]:
                tail_entities.append({tail: tail_data["data"][tail]})

            print("tail_entities:", tail_entities)
            if not tail_entities:
                return None

            # 统一处理选择最相关的尾实体,并返回推理路径
            best_tail = self._select_best_tail_entity(question, tail_entities)

            if not best_tail:
                return None
            relation_label = self.llm_model._make_request("8088/pid2label", {"pid": relation})["data"][0]
            print("relation_label:", relation_label)
            # 创建推理路径（只创建了一条）
            path_relation = Relation(
                head=head_entity.name,
                relation=relation_label,
                tail=best_tail.get("label", ""),
                pid=relation
            )

            path = ReasoningPath(
                entities=[head_entity],
                relations=[path_relation]
            )

            # 计算路径分数
            path.score = self._calculate_path_score(path, question)

            return path

        except Exception as e:
            logger.error(f"Error building reasoning path: {e}")
            return None
    
    def _select_best_tail_entity(self, question: str, tail_entities: List[Dict]) -> Dict:
        """选择最佳的尾实体"""
        if not tail_entities:
            return None


        # 改为用大模型来判断，而非embedding向量
        # for entity in tail_entities:
        #     entity_text = entity.get("label", "")
        #     response = self.llm_model.call_embedding(question, entity_text)
        #
        #     if response["response_code"] == 200:
        #         score = response["score"]
        #         if score > best_score:
        #             best_score = score
        #             best_entity = entity
        system = """Please determine which of these tail entities would be most helpful in answering the question and return the most helpful qid value and entity name in json format.
'''json
    {
        "qid": "qid value"
        "label": "Entity name"
    }
'''
"""
        entity_prompt = ""
        for entity in tail_entities:
            for qid in entity:
                entity_prompt += f"qid:{qid}\tEntity name:{entity[qid]}\n\t"

        prompt = f"""question:
{question}

entity:
{entity_prompt}

"""
        print("entity_prompt:", entity_prompt)
        response = self.llm_model.call_llm_local(prompt, system)
        answer = response["data"]
        answer_json = self._extract_json(answer)
        print("answer_json:", answer_json)
        if answer_json:
            return {"label": answer_json["label"]}

        return {}


    
    def _calculate_path_score(self, path: ReasoningPath, question: str) -> float:
        """计算推理路径分数"""
        # 构建路径描述
        path_text = ""
        for relation in path.relations:
            path_text += f"\t({relation.head}, {relation.relation}, {relation.tail}) "
        
        # 使用LLM评估路径质量
        system_prompt = """Please evaluate the usefulness of a given knowledge graph path for answering the question.
        Output format：
        ```json
        {"usefulness_score": 0.85}
        ```
        Rating range：0.0-1.0"""
        
        prompt = f"""question:
    {question}

reasoning path:
    {path_text}

Please rate the usefulness of this path."""
        
        response = self.llm_model.call_llm_local(prompt, system_prompt)
        
        if response["response_code"] == 200:
            json_data = self._extract_json(response["data"])
            print("评分处数据：", json_data)
            if json_data and "usefulness_score" in json_data:
                return float(json_data["usefulness_score"])
        
        return 0.1  # 默认分数
    
    def _extract_json(self, text: str) -> Optional[Dict]:
        """从文本中提取JSON"""
        pattern = r"```json\n(.*?)\n```"
        match = re.search(pattern, text, re.DOTALL)
        if match:
            try:
                return json.loads(match.group(1))
            except json.JSONDecodeError:
                pass
        return None

class ThinkOnGraphSystem:
    """改进的Think-on-Graph主系统"""
    
    def __init__(self, base_url: str = "http://172.20.0.89", temperature: float = 0.1):
        self.llm_model = LLMModel(base_url, temperature)
        self.entity_recognizer = EntityRecognizer(self.llm_model)
        self.entity_linker = EntityLinker(self.llm_model)
        self.entity_pruner = TopicEntityPruning(self.llm_model)
        self.graph_reasoner = GraphReasoner(self.llm_model)
        
        # 推理历史
        self.reasoning_history = []
        
    def answer_question(self, question: str, max_iterations: int = 3, 
                       mode: ReasoningMode = ReasoningMode.HYBRID) -> Dict:
        """回答问题的主入口"""
        logger.info(f"Starting question answering: {question}")

        if mode == ReasoningMode.LLM_ONLY:
            system_llm = "Answer the user's question in detail, based on the context and clues provided.If there is insufficient information, be honest and explain."
            response = self.llm_model.call_llm_local(question, system_llm)
            if response["response_code"] == 200:
                return {
                    "final_answer": response["data"],
                    "confidence": 0.8,
                    "evidence_type": "llm",
                    "reasoning_method": "llm_only"
                }
            return {"final_answer": "抱歉，无法基于现有信息回答该问题。", "confidence": 0.0}

        # 记录问题和线索
        original_question = question
        accumulated_clues = ""
        
        for iteration in range(max_iterations):
            logger.info(f"Iteration {iteration + 1}/{max_iterations}")
            
            try:
                # 步骤1: 实体识别

                print("实体识别")
                entities = self.entity_recognizer.extract_entities(question)
                logger.info(f"Extracted {len(entities)} entities")

                
                if not entities:
                    return self._generate_fallback_answer(original_question)
                
                # 步骤2: 实体链接 (qid, label, extract)

                print("实体链接")
                linked_entities = self.entity_linker.link_entities(question, entities)
                # 如果出现实体链接qid为空的情况，则需要重新进行实体识别，然后再进行相关处理
                # 如何调整
                # for entity in entities:
                #     if not entity.qid:
                #         entities = self.entity_recognizer.extract_entities(question)
                #         break
                logger.info(f"Linked {len(linked_entities)} entities")

                
                # 步骤3: 实体裁剪（裁剪后实体,参数：qid、label、extract）

                print("实体裁剪")
                pruned_entities = self.entity_pruner.prune_entities(question, linked_entities)
                logger.info(f"Kept {len(pruned_entities)} entities after pruning")


                # 步骤4: 文本检索（相关文本数据）
                print("文本检索")
                retrieval_context = self._perform_retrieval(question, pruned_entities)
                print("retrieval_context:", retrieval_context)


                # 步骤5: 检查是否可以直接回答

                print("检查回答")
                can_answer_directly = self._can_answer_directly(question, retrieval_context)


                if can_answer_directly and mode != ReasoningMode.GRAPH_ONLY:
                    return self._generate_direct_answer(question, retrieval_context, accumulated_clues)
                

                if mode != ReasoningMode.RETRIEVAL_ONLY:
                    # 步骤6: 图推理
                    print("进入图推理")
                    reasoning_paths = self.graph_reasoner.find_reasoning_paths(
                        pruned_entities, question
                    )
                    print("reasoning_paths:", reasoning_paths)
                    # 步骤7: 综合判断
                    print("综合判断")
                    result = self._comprehensive_reasoning(
                        question, retrieval_context, reasoning_paths, accumulated_clues
                    )
                    print("result:", result)
                    
                    if result.get("final_answer") != "":
                        # 记录推理历史
                        self.reasoning_history.append({
                            "question": original_question,
                            "iterations": iteration + 1,
                            "final_answer": result["final_answer"],
                            "reasoning_paths": len(reasoning_paths),
                            "mode": mode.value
                        })
                        return result
                    
                    # 更新线索和问题进行下一轮推理
                    accumulated_clues += result.get("clues", "")
                    question = result.get("refined_question", question)
                else:
                    return self._generate_direct_answer(question, retrieval_context, accumulated_clues)
                    
            except Exception as e:
                print(e)
                logger.error(f"Error in iteration {iteration + 1}: {e}")
                continue
        
        # 如果所有迭代都失败，返回最佳努力答案
        return self._generate_fallback_answer(original_question)
    
    def _perform_retrieval(self, question: str, entities: List[Entity]) -> str:
        """执行文本检索"""
        if not entities:
            return ""
            
        # 构建检索查询
        query = f"{question}"

        
        retrieval_result = self.llm_model.call_index(query, top_k=5)
        print("检索结果：", retrieval_result)

        # 对检索的结果大模型进行判断是否有用
        results = retrieval_result["contexts"]
        last_results = []
        for result in results:
            prompt = f"question:\n\t{question}" + f"\nreference:\n\ttitle:\n\t\t" + result["title"] + "\n\t" + "content:\n\t\t" + result["content"]
            print("prompt:", prompt)
            system = "Please judge the search result and give a yes or no answer to indicate whether the result is helpful in answering the question. The results will be displayed in the form of {yes} or {no}"
            judge_llm = self.llm_model.call_llm_local(prompt, system)["data"]
            print("大模型判断结果:", judge_llm)
            if "{yes}" in judge_llm:
                last_results.append({"contexts": "title:\n\t\t" + result["title"] + "\n\t" + "content:\n\t\t" + result["content"]})
        if retrieval_result["response_code"] == 200:
            return_contexts = "\n".join([ctx.get("contexts", "") for ctx in last_results])
            print("文本检索返回的结果：", return_contexts)
            return return_contexts
            # contexts = retrieval_result.get("contexts", [])
            # return "\n".join([ctx.get("context", "") for ctx in contexts])
        
        return ""
    
    def _can_answer_directly(self, question: str, context: str) -> bool:
        """判断是否可以直接回答"""
        if not context.strip():
            return False
            
        system_prompt = """Please decide whether the given context contains enough information to answer the user question. Answer only with "{yes}" or "{no}"."""
        
        prompt = f"""【question】
{question}

【contexts】
{context}
"""
        print("判断是否可以直接回答:", prompt)
        response = self.llm_model.call_llm_local(prompt, system_prompt)
        print("判断回答用户问题:", response["data"])
        return (response["response_code"] == 200 and 
                "{yes}" in response["data"])
    
    def _generate_direct_answer(self, question: str, context: str, clues: str = "") -> Dict:
        """生成直接答案"""
        system_prompt = """Answer the user's question in detail, based on the context and clues provided.If there is insufficient information, be honest and explain."""
        
        prompt = f"""question:
    {question}

context:
    {context}

known clues:
    {clues}

Please provide a detailed answer."""
        
        response = self.llm_model.call_llm_local(prompt, system_prompt)
        
        if response["response_code"] == 200:
            return {
                "final_answer": response["data"],
                "confidence": 0.8,
                "evidence_type": "retrieval",
                "reasoning_method": "direct_retrieval"
            }
        
        return {"final_answer": "抱歉，无法基于现有信息回答该问题。", "confidence": 0.0}
    
    def _comprehensive_reasoning(self, question: str, context: str, 
                                paths: List[ReasoningPath], clues: str) -> Dict:
        """综合推理"""
        # 构建推理路径描述
        path_descriptions = []
        for i, path in enumerate(paths):
            path_desc = ""
            for relation in path.relations:
                path_desc += f"({relation.head}, {relation.relation}, {relation.tail}) "
            path_descriptions.append(f"{i+1}. {path_desc}")
        
        paths_text = "\n".join(path_descriptions)
        
        system_prompt = """You are an intelligent reasoning system. Please answer the question by making multiple steps of reasoning based on the information provided. Please give your answer in one sentence.

Output format：
```json
{
    "final_answer": "The final answer (if the answer can be determined) or an empty string",
    "confidence": 0.85,
    "reasoning_steps": ["Reasoning step 1", "Reasoning step 2"],
    "clues": "newly discovered clues",
    "refined_question": "Improved question (for next round of reasoning)"
}
```"""

        prompt = f"""question:
    {question}

known clues:
    {clues}

textual evidence:
    {context[:800]}

Knowledge Graph Path:
    {paths_text}

Please make comprehensive reasoning."""
        print("推理过程:", prompt)
        
        response = self.llm_model.call_llm_local(prompt, system_prompt)
        
        if response["response_code"] == 200:
            json_data = self._extract_json(response["data"])
            if json_data:
                return json_data
        
        return {
            "final_answer": "",
            "clues": clues,
            "refined_question": question,
            "confidence": 0.0
        }
    
    def _generate_fallback_answer(self, question: str) -> Dict:
        """生成后备答案"""
        return {
            "final_answer": f"抱歉，我无法找到足够的信息来回答问题：{question}",
            "confidence": 0.0,
            "reasoning_method": "fallback"
        }
    
    def _extract_json(self, text: str) -> Optional[Dict]:
        """从文本中提取JSON"""
        pattern = r"```json\n(.*?)\n```"
        match = re.search(pattern, text, re.DOTALL)
        if match:
            try:
                return json.loads(match.group(1))
            except json.JSONDecodeError as e:
                logger.error(f"JSON parsing failed: {e}")
        return None
    
    def get_reasoning_statistics(self) -> Dict:
        """获取推理统计信息"""
        if not self.reasoning_history:
            return {"total_questions": 0}
        
        total_questions = len(self.reasoning_history)
        avg_iterations = sum(h["iterations"] for h in self.reasoning_history) / total_questions
        successful_answers = sum(1 for h in self.reasoning_history if h["final_answer"])
        
        return {
            "total_questions": total_questions,
            "success_rate": successful_answers / total_questions,
            "average_iterations": avg_iterations,
            "reasoning_modes": list(set(h["mode"] for h in self.reasoning_history))
        }

# 使用示例
def main():
    """主函数示例"""
    # 初始化系统
    system = ThinkOnGraphSystem()
    
    # 测试问题
    test_questions = [
        # "Who is the father of Ada Lovelace?",
        # "What is the capital of France?",
        # "When was the first computer invented?",
        # "what does jamaican people speak?",
        # "what did james k polk do before he was president?",
        "who formed the american federation of labor?"
    ]
    
    for question in test_questions:
        logger.info(f"\n{'='*50}")
        logger.info(f"Question: {question}")
        logger.info(f"{'='*50}")
        
        # 尝试不同的推理模式
        for mode in [ReasoningMode.HYBRID, ReasoningMode.RETRIEVAL_ONLY]:
            logger.info(f"\nMode: {mode.value}")
            result = system.answer_question(question, mode=mode)
            print("最终结果：", result)
            logger.info(f"Answer: {result.get('final_answer', 'No answer')}")
            logger.info(f"Confidence: {result.get('confidence', 0.0)}")

    # 输出统计信息
    stats = system.get_reasoning_statistics()
    logger.info(f"\nReasoning Statistics: {stats}")

if __name__ == "__main__":
    main()