import datetime
import os
import time
import pickle
import logging
import asyncio
import itertools
import uuid
from fastapi import FastAPI, UploadFile, File, HTTPException, Request, Form, Query
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
import uvicorn

import tiktoken
from typing import List, Dict, Any, Optional, Iterator
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import PyPDFLoader
from langchain_experimental.graph_transformers.llm import LLMGraphTransformer
from langchain_openai import ChatOpenAI
from whyhow import schemas, AsyncWhyHow
from whyhow.schemas import Workspace
from dotenv import load_dotenv
from datetime import datetime
from whyhow import WhyHow, Node, Relation, Triple

# Uncomment if you are using rdflib elsewhere and need these imports
# from rdflib import Graph, URIRef, Literal
# from rdflib.namespace import RDF, RDFS, OWL
# from whyhow.raw.autogen import TriplePattern

# 加载环境变量
load_dotenv()
DEFAULT_TIMEOUT = 3000.0
# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('graph_processor.log'),
        logging.StreamHandler()
    ]
)

# ----------------------------------------
# RateLimiter 定义
# ----------------------------------------
class RateLimiter:
    def __init__(self, rpm_limit: int, tpm_limit: int, tpd_limit: int):
        """
        初始化速率限制器。
        :param rpm_limit: 每分钟最大请求数
        :param tpm_limit: 每分钟最大 Token 数
        :param tpd_limit: 每天最大 Token 数
        """
        self.rpm_limit = rpm_limit
        self.tpm_limit = tpm_limit
        self.tpd_limit = tpd_limit

        # 请求时间记录
        self.request_times = []

        # Token 使用记录
        self.token_count_minute = 0
        self.token_count_day = 0
        self.last_minute_reset = time.time()
        self.last_day_reset = self._get_start_of_day()

    def _get_start_of_day(self):
        """获取当天开始的时间戳"""
        now = datetime.now()
        start_of_day = datetime(now.year, now.month, now.day)
        return start_of_day.timestamp()

    async def wait_for_rpm(self):
        """确保请求符合 RPM 限制"""
        now = time.time()
        # 每分钟重置 RPM 计数器
        if now - self.last_minute_reset >= 60:
            self.request_times = []
            self.last_minute_reset = now

        # 计算最小请求间隔
        min_interval = 60 / self.rpm_limit
        if self.request_times:
            elapsed = now - self.request_times[-1]
            if elapsed < min_interval:
                await asyncio.sleep(min_interval - elapsed)
        self.request_times.append(now)

    async def wait_for_tpm(self, tokens_used: int):
        """确保 Token 使用符合 TPM 限制"""
        now = time.time()
        # 每分钟重置 TPM 计数器
        if now - self.last_minute_reset >= 60:
            self.token_count_minute = 0
            self.last_minute_reset = now

        # 如果当前分钟 Token 使用量超过限制，等待下一分钟
        if self.token_count_minute + tokens_used > self.tpm_limit:
            wait_time = 60 - (now - self.last_minute_reset)
            logging.warning(f"TPM limit reached ({self.token_count_minute}/{self.tpm_limit}), waiting {wait_time:.1f}s")
            await asyncio.sleep(wait_time)
            self.token_count_minute = 0
            self.last_minute_reset = time.time()

        self.token_count_minute += tokens_used

    async def wait_for_tpd(self, tokens_used: int):
        """确保 Token 使用符合 TPD 限制"""
        now = time.time()
        # 每天重置 TPD 计数器
        if now - self.last_day_reset >= 86400:  # 86400 秒 = 1 天
            self.token_count_day = 0
            self.last_day_reset = self._get_start_of_day()

        # 如果当天 Token 使用量超过限制，等待下一天
        if self.token_count_day + tokens_used > self.tpd_limit:
            wait_time = 86400 - (now - self.last_day_reset)
            logging.warning(f"TPD limit reached ({self.token_count_day}/{self.tpd_limit}), waiting {wait_time:.1f}s")
            await asyncio.sleep(wait_time)
            self.token_count_day = 0
            self.last_day_reset = self._get_start_of_day()

        self.token_count_day += tokens_used

    async def wait(self, tokens_used: int):
        """综合 RPM、TPM 和 TPD 限制"""
        await self.wait_for_rpm()
        await self.wait_for_tpm(tokens_used)
        await self.wait_for_tpd(tokens_used)

# ----------------------------------------
# GraphProcessor 定义
# ----------------------------------------
class GraphProcessor:
    def __init__(self):
        # API 限制配置
        self.MODEL_NAME = "gpt-3.5-turbo"
        self.RPM_LIMIT = 500
        self.TPM_LIMIT = 30_000
        self.TPD_LIMIT = 90_000
        self.MAX_CONCURRENT = 8
        self.BATCH_SIZE = 5 # Note: Using small_batch_size=1 in process_pdf_to_triples
        self.MAX_RETRIES = 5

        # 初始化 RateLimiter
        self.rate_limiter = RateLimiter(
            rpm_limit=self.RPM_LIMIT,
            tpm_limit=self.TPM_LIMIT,
            tpd_limit=self.TPD_LIMIT
        )

        # 初始化 LLM
        self.llm = ChatOpenAI(
            model=os.getenv("OPENAI_MODEL_NAME", self.MODEL_NAME), # Allow model name from env
            openai_api_key="yourOPENAIkey", # 从环境变量或硬编码读取
            max_retries=self.MAX_RETRIES,
            request_timeout=int(os.getenv("OPENAI_REQUEST_TIMEOUT", 30)), # Allow timeout from env
            temperature=float(os.getenv("OPENAI_TEMPERATURE", 0.3)) # Allow temperature from env
        )

        # 图谱配置 (默认值)
        # These will be used if allowed_nodes/relationships are not provided by the frontend
        self.DEFAULT_ALLOWED_NODES = ["Company", "Risk Factor", "Legal Proceeding", "Business Segment"]
        self.DEFAULT_ALLOWED_RELATIONS = ["AFFECTS", "INVOLVED_IN", "WORKED_AT", "POSES_RISK"]


        # 初始化 WhyHow 客户端
        self.whyhow_client = AsyncWhyHow(api_key=os.getenv("WHYHOW_API_KEY", "b9M9sNycotNRAYUzCWFv3vnSQD6LjIEjrOK12HVz"), base_url=os.getenv("WHYHOW_BASE_URL", "http://localhost:8000"),httpx_kwargs={"timeout": DEFAULT_TIMEOUT}) # 设置超时时间) # 从环境变量或硬编码读取

        # Removed workspace_id from instance state as it will be passed per request


    def count_tokens(self, text: str) -> int:
        """使用 tiktoken 计算 token 数量"""
        try:
            enc = tiktoken.encoding_for_model(self.MODEL_NAME)
            return len(enc.encode(text))
        except Exception as e:
            # logging.warning(f"Could not get encoding for model {self.MODEL_NAME}, using fallback token count. Error: {e}")
            # Fallback token count - less accurate but prevents failure
            return len(text) // 4 # Rough estimate (assuming 4 chars per token)


    def format_triple(self, triple):
        """格式化三元组为 WhyHow 兼容格式"""
        # Ensure node names are strings, handle potential issues with non-string IDs
        # Also handle potential None values for source/target/type
        # Added checks for hasattr as well, for robustness with varying object structures
        head_name = str(triple.source.id) if triple.source and hasattr(triple.source, 'id') and triple.source.id is not None else "UnknownNode"
        head_label = str(triple.source.type) if triple.source and hasattr(triple.source, 'type') and triple.source.type is not None else "Entity"
        tail_name = str(triple.target.id) if triple.target and hasattr(triple.target, 'id') and triple.target.id is not None else "UnknownNode"
        tail_label = str(triple.target.type) if triple.target and hasattr(triple.target, 'type') and triple.target.type is not None else "Entity"
        relation_name = str(triple.type) if hasattr(triple, 'type') and triple.type is not None else "UNKNOWN_RELATION"

        # Basic validation to prevent empty node names/labels or relation names
        # Add unique suffix for UnknownNodes to prevent id conflicts
        if not head_name or head_name.strip() == "" or head_name == "None":
             head_name = "UnknownNode_" + str(uuid.uuid4())[:8] # Add unique suffix
             # logging.warning(f"Formatted head name was empty/None, replaced with {head_name}")
        if not head_label or head_label.strip() == "" or head_label == "None":
             head_label = "Entity"
             # logging.warning(f"Formatted head label was empty/None, replaced with {head_label}")
        if not tail_name or tail_name.strip() == "" or tail_name == "None":
             tail_name = "UnknownNode_" + str(uuid.uuid4())[:8] # Add unique suffix
             # logging.warning(f"Formatted tail name was empty/None, replaced with {tail_name}")
        if not tail_label or tail_label.strip() == "" or tail_label == "None":
             tail_label = "Entity"
             # logging.warning(f"Formatted tail label was empty/None, replaced with {tail_label}")
        if not relation_name or relation_name.strip() == "" or relation_name == "None":
             relation_name = "UNKNOWN_RELATION"
             # logging.warning(f"Formatted relation name was empty/None, replaced with {relation_name}") # Log this


        return schemas.Triple(
            head=schemas.Node(name=head_name, label=head_label),
            relation=schemas.Relation(name=relation_name),
            tail=schemas.Node(name=tail_name, label=tail_label)
        )

    async def process_pdf_to_triples(
        self,
        filepath: str,
        prompt: Optional[str] = None,
        allowed_nodes: Optional[List[str]] = None, # Added parameter
        allowed_relations: Optional[List[str]] = None # Added parameter
    ) -> List[schemas.Triple]:
        """将 PDF 文件处理为兼容的三元组"""
        # Use provided allowed_nodes/relations or defaults
        nodes_to_use = allowed_nodes if allowed_nodes is not None else self.DEFAULT_ALLOWED_NODES
        relations_to_use = allowed_relations if allowed_relations is not None else self.DEFAULT_ALLOWED_RELATIONS

        logging.info(f"Using Allowed Nodes: {nodes_to_use}")
        logging.info(f"Using Allowed Relations: {relations_to_use}")


        # 1. 加载和分割 PDF
        loader = PyPDFLoader(filepath)
        docs = loader.load()
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500, # Increased chunk size slightly
            chunk_overlap=100, # Increased overlap
            separators=["\n\n", "\n", "。", "！", "？", ".", "!", "?"], # Added more separators
            length_function=self.count_tokens
        )
        # Filter out chunks that are too large after splitting
        # Adjust max_tokens based on model context window and prompt size
        max_chunk_tokens = int(os.getenv("MAX_CHUNK_TOKENS", 1500)) # Limit chunk tokens sent to LLM
        split_docs = [doc for doc in text_splitter.split_documents(docs)
                      if self.count_tokens(doc.page_content) < max_chunk_tokens]

        logging.info(f"Split PDF into {len(split_docs)} chunks (max {max_chunk_tokens} tokens per chunk).")


        # 2. 初始化图转换器
        # Pass the custom prompt and allowed nodes/relations here
        llm_transformer = LLMGraphTransformer(
            llm=self.llm,
            # Use the lists passed to the method
            allowed_nodes=nodes_to_use,
            allowed_relationships=relations_to_use,
            prompt=prompt if prompt else None # Use custom prompt if not None or empty, otherwise use default
        )
        if prompt:
            logging.info(f"Using custom prompt for graph extraction.")
        else:
             logging.info(f"Using default prompt for graph extraction.")


        # 3. 分批处理文档
        semaphore = asyncio.Semaphore(self.MAX_CONCURRENT)
        all_triples = []
        # Process chunks individually for better rate limiting and error isolation
        small_batch_size = 1
        for i in range(0, len(split_docs), small_batch_size):
            batch = split_docs[i:i + small_batch_size]
            logging.info(f"Processing chunk {i + 1}/{len(split_docs)}")
            async with semaphore:
                tokens_used = sum(self.count_tokens(doc.page_content) for doc in batch)
                # Add estimated prompt tokens to usage calculation
                # Note: This is a rough estimate. The exact prompt sent by Langchain might differ.
                if prompt:
                    tokens_used += self.count_tokens(prompt)
                # Add estimated output tokens (rough guess, maybe 10-20% of input?)
                tokens_used += int(tokens_used * 0.2) # Estimate output tokens

                await self.rate_limiter.wait(tokens_used)
                try:
                    # Use aconvert_to_graph_documents for async batch processing
                    graph_docs = await llm_transformer.aconvert_to_graph_documents(batch)
                    # Flatten list of relationships from all documents in the batch
                    batch_triples = list(itertools.chain(*[doc.relationships for doc in graph_docs]))
                    all_triples.extend(batch_triples)
                    logging.info(f"Extracted {len(batch_triples)} triples from chunk {i + 1}")
                except Exception as e:
                    logging.error(f"处理 chunk {i + 1} 失败: {str(e)}", exc_info=True) # Log traceback
                    # Consider retrying or skipping based on error type
                    await asyncio.sleep(5) # Wait before next attempt/chunk if error
                    continue # Skip this chunk

        # 4. 转换为 兼容的三元组
        formatted_triples = []
        for triple in all_triples:
            try:
                formatted = self.format_triple(triple)
                # Basic validation for formatted triple (check if components are not just placeholders)
                # A triple is considered valid if head, relation, and tail names are not empty
                # and do not look like the placeholder we generated.
                if formatted.head.name and formatted.relation.name and formatted.tail.name and \
                   "UnknownNode_" not in formatted.head.name and \
                   "UnknownNode_" not in formatted.tail.name and \
                   formatted.relation.name != "UNKNOWN_RELATION":
                     formatted_triples.append(formatted)
                else:
                     # Log triples that were potentially formatted with placeholders or are incomplete
                     logging.warning(f"Skipping potentially invalid triple: {formatted}")

            except Exception as e:
                 logging.warning(f"跳过无效的三元组 (Formatting Error: {e}, Triple: {triple})", exc_info=True)
                 continue

        logging.info(f"Formatted {len(formatted_triples)} valid triples.")
        return formatted_triples

    async def create_whyhow_graph(self, triples: List[schemas.Triple], graph_name: str, workspace_id: str) -> Dict[str, Any]:
        """在 WhyHow 中创建知识图谱"""
        if not workspace_id:
             logging.error("Workspace ID is required to create a graph.")
             raise HTTPException(status_code=400, detail="未指定工作区 ID，无法创建图谱。")


        if not triples:
            logging.warning(f"No valid triples to create graph in workspace {workspace_id}.")
            return {
                "workspace_id": workspace_id,
                "graph_id": None,
                "triple_count": 0,
                "query_answer": "没有生成任何三元组，无法创建图谱。"
            }

        try:
            # 1. 创建图谱
            try:
                graph =await self.whyhow_client.graphs.create_graph_from_triples(
                    workspace_id=workspace_id,
                    triples=triples,
                    name=graph_name,
                )
                logging.info(f"Created graph {graph.graph_id} in workspace {workspace_id}")
                graph_id = graph.graph_id # Store graph_id for potential immediate use
            except Exception as e:
                 logging.error(f"Failed to create graph in WhyHow workspace {workspace_id}: {e}", exc_info=True)

                 raise HTTPException(status_code=500, detail=f"无法在创建图谱: {e}")


            # 2. 执行示例查询 (Optional but good for testing)
            query_answer = "未能执行示例查询。"
            if graph_id: # Only query if graph was created
                try:
                    query_result =await self.whyhow_client.graphs.query_unstructured(
                        graph_id=graph_id,
                        query="药品种类?"
                    )
                    query_answer = query_result.answer
                    logging.info(f"Sample query result for graph {graph_id}: {query_answer[:200]}...")
                except Exception as e:
                     logging.warning(f"Failed to execute sample query for graph {graph_id}: {e}", exc_info=True)


            return {
                "workspace_id": workspace_id,
                "graph_id": graph_id,
                "triple_count": len(triples),
                "query_answer": query_answer
            }
        except HTTPException:
             raise # Re-raise HTTPException
        except Exception as e:
            logging.exception(f"创建图谱时发生未知错误 in workspace {workspace_id}")
            raise HTTPException(status_code=500, detail=f"创建图谱时发生未知错误: {e}")

    async def list_available_workspaces(self) -> List[Dict[str, str]]:
        """列出 WhyHow 中的可用工作区"""
        try:
            # 使用 async for 异步迭代 workspaces_iterator
            workspaces_iterator = self.whyhow_client.workspaces.get_all()
            workspace_list_data = []
            async for ws in workspaces_iterator:
                workspace_list_data.append({"workspace_id": ws.workspace_id, "name": ws.name})

            logging.info(f"Listed {len(workspace_list_data)} workspaces.")
            return workspace_list_data
        except Exception as e:
            logging.error(f"Failed to list WhyHow workspaces: {e}", exc_info=True)
            raise HTTPException(status_code=500, detail=f"无法列出WhyHow工作区: {e}")

    async def create_new_workspace(self, name: str) -> Dict[str, str]:
        """在 WhyHow 中创建新的工作区"""
        if not name or not name.strip():
             raise HTTPException(status_code=400, detail="工作区名称不能为空。")

        try:
            workspace = await self.whyhow_client.workspaces.create(name=name)
            logging.info(f"Created new workspace: {workspace.name} ({workspace.workspace_id})")
            return {"workspace_id": workspace.workspace_id, "name": workspace.name}
        except Exception as e:
            logging.error(f"Failed to create workspace '{name}': {e}", exc_info=True)
             # Check for specific errors like name conflict if needed
            raise HTTPException(status_code=500, detail=f"无法创建工作区: {e}")

    async def list_available_graphs(self, workspace_id: str) -> List[Dict[str, str]]:
        """列出指定 WhyHow 工作空间中的可用图谱"""
        if not workspace_id:
            logging.error("Workspace ID is required to list graphs.")
            raise HTTPException(status_code=400, detail="未指定工作区 ID，无法列出图谱。")

        try:
            # 使用 async for 异步迭代 graphs_iterator
            graphs_iterator = self.whyhow_client.graphs.get_all(workspace_id=workspace_id)
            graph_list_data = []
            async for g in graphs_iterator:
                graph_list_data.append({"graph_id": g.graph_id, "name": g.name})

            logging.info(f"Listed {len(graph_list_data)} graphs for workspace {workspace_id}")
            return graph_list_data
        except Exception as e:
            logging.error(f"Failed to list graphs for workspace {workspace_id}: {e}", exc_info=True)
            raise HTTPException(status_code=500, detail=f"无法列出WhyHow图谱: {e}")

    async def query_graph(self, graph_id: str, query: str) -> str:
        """对指定的图谱进行非结构化查询"""
        if not graph_id:
            raise HTTPException(status_code=400, detail="缺少图谱 ID")
        if not query:
            raise HTTPException(status_code=400, detail="缺少查询消息")

        logging.info(f"Querying graph {graph_id} with query: {query[:100]}...")

        try:
            query_result = await self.whyhow_client.graphs.query_unstructured(  # <-- 添加 await 关键字！
                graph_id=graph_id,
                query=query
            )
            logging.info(f"Query successful for graph {graph_id}.")
            return query_result.answer
        except Exception as e:
            logging.error(f"Failed to query graph {graph_id}: {e}", exc_info=True)
            # Check for specific error types from WhyHow API if possible
            # e.g., if graph_id is invalid, WhyHow API might return a specific error structure
            # For now, catch all and return generic 500
            raise HTTPException(status_code=500, detail=f"查询图谱失败: {e}")


# ----------------------------------------
# FastAPI 应用及接口定义
# ----------------------------------------
app = FastAPI()

# 配置 CORS，允许前端跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"], # Adjust this in production
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 实例化 GraphProcessor
graph_processor = GraphProcessor()


@app.get("/list_workspaces")
async def list_workspaces():
    """列出 WhyHow 中的可用工作区"""
    try:
        workspaces = await graph_processor.list_available_workspaces()
        return JSONResponse(content=workspaces)
    except HTTPException as e:
        raise e
    except Exception as e:
        logging.exception("列出工作区出错")
        raise HTTPException(status_code=500, detail=f"列出工作区时发生错误: {e}")

@app.post("/create_workspace")
async def create_workspace(name: str = Form(...)):
    """在 WhyHow 中创建新的工作区"""
    try:
        workspace = await graph_processor.create_new_workspace(name=name)
        return JSONResponse(content=workspace, status_code=201) # Return 201 Created
    except HTTPException as e:
        raise e
    except Exception as e:
        logging.exception(f"创建工作区 '{name}' 出错")
        raise HTTPException(status_code=500, detail=f"创建工作区时发生错误: {e}")


@app.post("/upload")
async def upload_pdf(
    pdf: UploadFile = File(...),
    workspace_id: str = Form(...), # Require workspace_id
    prompt: Optional[str] = Form(None), # Add prompt parameter using Form
    # Add new parameters for allowed nodes/relations strings
    allowed_nodes_str: Optional[str] = Form(None),
    allowed_relations_str: Optional[str] = Form(None)
):
    if pdf.content_type != "application/pdf":
        raise HTTPException(status_code=400, detail="仅支持 PDF 文件。")
    if not workspace_id:
        raise HTTPException(status_code=400, detail="未指定目标工作区 ID。")

    # Parse allowed_nodes_str and allowed_relations_str into lists
    # Use default if string is None or empty after stripping
    allowed_nodes = [node.strip() for node in allowed_nodes_str.split(',') if node.strip()] if allowed_nodes_str else None
    allowed_relations = [rel.strip() for rel in allowed_relations_str.split(',') if rel.strip()] if allowed_relations_str else None


    # Log the received prompt and allowed types (for debugging)
    if prompt:
        logging.info(f"Received custom prompt: {prompt[:100]}...")
    else:
        logging.info("No custom prompt received, using default.")

    logging.info(f"Received allowed nodes string: '{allowed_nodes_str}' -> Parsed: {allowed_nodes if allowed_nodes is not None else 'Using default'}")
    logging.info(f"Received allowed relations string: '{allowed_relations_str}' -> Parsed: {allowed_relations if allowed_relations is not None else 'Using default'}")


    # Save the file first
    upload_dir = "uploads"
    os.makedirs(upload_dir, exist_ok=True)
    file_path = os.path.join(upload_dir, pdf.filename)
    try:
        with open(file_path, "wb") as f:
            content = await pdf.read()
            f.write(content)
        logging.info(f"Saved received file to: {file_path}")
    except Exception as e:
        logging.error(f"Failed to save file {pdf.filename}: {e}")
        raise HTTPException(status_code=500, detail=f"无法保存文件: {e}")

    try:
        # 处理 PDF 生成三元组并创建图谱
        # Pass the received prompt, workspace_id, and allowed types to the processing function
        triples = await graph_processor.process_pdf_to_triples(
            filepath=file_path,
            prompt=prompt,
            allowed_nodes=allowed_nodes,      # Pass parsed list
            allowed_relations=allowed_relations # Pass parsed list
        )
        logging.info(f"Extracted {len(triples)} triples from PDF")

        # Remove temporary file after processing if needed
        # os.remove(file_path) # Uncomment if you want to delete the file after processing

        result = await graph_processor.create_whyhow_graph(
            triples=triples,
            graph_name=os.path.splitext(pdf.filename)[0] + "_Analysis",
            workspace_id=workspace_id # Pass workspace_id here
        )

        return JSONResponse(content={
            "workspace_id": result.get("workspace_id"),
            "graph_id": result.get("graph_id"),
            "triple_count": result.get("triple_count", 0),
            "query_answer": result.get("query_answer", "未能获取示例查询结果。"),
            "graph_name": os.path.splitext(pdf.filename)[0] + "_Analysis"
        })
    except HTTPException as e:
         raise e
    except Exception as e:
        logging.exception("处理文件出错")
        raise HTTPException(status_code=500, detail=f"处理文件时发生内部错误: {e}")

@app.get("/list_graphs")
async def list_graphs(workspace_id: str = Query(...)): # Require workspace_id as query parameter
    """列出指定 WhyHow 工作空间中的可用图谱"""
    if not workspace_id:
        raise HTTPException(status_code=400, detail="未指定工作区 ID，无法列出图谱。")
    try:
        graphs = await graph_processor.list_available_graphs(workspace_id=workspace_id)
        return JSONResponse(content=graphs)
    except HTTPException as e:
        raise e
    except Exception as e:
        logging.exception(f"列出工作区 {workspace_id} 的图谱出错")
        raise HTTPException(status_code=500, detail=f"列出图谱时发生错误: {e}")


@app.post("/chat")
async def chat_with_graph(request: Request):
    """对指定的图谱进行聊天式查询"""
    try:
        data = await request.json()
        message = data.get("message")
        graph_id = data.get("graph_id")

        if not message or not graph_id:
            raise HTTPException(status_code=400, detail="缺少消息或图谱ID")

        logging.info(f"Received chat message for graph {graph_id}: {message[:100]}...")

        # Use the graph_processor to query the graph
        answer = await graph_processor.query_graph(graph_id=graph_id, query=message)

        return JSONResponse(content={"reply": answer})

    except HTTPException as e:
        raise e # Re-raise the HTTPException
    except Exception as e:
        logging.exception("处理聊天消息出错")
        raise HTTPException(status_code=500, detail=f"处理聊天消息时发生错误: {e}")


@app.get("/health")
async def health_check():
    return JSONResponse(content={"status": "healthy"})


# 启动 FastAPI 服务
if __name__ == "__main__":
    logging.info("Starting FastAPI server...")
    uvicorn.run(app, host="0.0.0.0", port=8001)

