from fastapi import FastAPI, HTTPException, File, UploadFile, Depends
from pydantic import BaseModel, Field
import os
from lightrag import LightRAG, QueryParam
from lightrag.llm import openai_embedding
from lightrag.utils import EmbeddingFunc, xml_to_json
import numpy as np
from typing import Optional
import asyncio
import nest_asyncio
import aiohttp
import random
from asyncio import Semaphore
from neo4j import AsyncGraphDatabase
import json
import aiofiles
from fastapi.middleware.cors import CORSMiddleware
import logging
import functools
import time

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)


# 定义 Response 模型
class Response(BaseModel):
    status: str
    data: Optional[str] = None
    message: Optional[str] = None

# 使用 nest_asyncio 解决事件循环问题
# nest_asyncio.apply()

# Neo4j 连接常量
NEO4J_URI = "bolt://localhost:7687"
NEO4J_USERNAME = "neo4j"
NEO4J_PASSWORD = "zhaolong22"

neo4j_connection = {
    "uri": "bolt://localhost:7687",  # 默认值
    "username": "neo4j",
    "password": "zhaolong22",
    "driver": None
}


# 工作目录常量
DEFAULT_RAG_DIR = "index_default"
# 动态获取当前脚本所在的目录
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
# 使用相对路径或绝对路径
WORKING_DIR = os.path.join(SCRIPT_DIR, "index_default")  # 或使用绝对路径
BATCH_SIZE_NODES = 500
BATCH_SIZE_EDGES = 100

# 创建 FastAPI 应用
app = FastAPI(
    title="知识图谱智能问答服务",
    description="""
    🚀 全面的知识图谱和智能问答平台
    
    主要功能：
    - 🔍 语义检索
    - 📊 知识图谱管理
    - 💬 智能问答
    - 🔐 数据库连接管理
    
    支持多种数据导入方式和灵活的查询模式
    """,
)
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)
# 创建速率限制器
rate_limiter = Semaphore(10)  # 根据速率限制调整此值

# 创建 Neo4j 驱动
# driver = AsyncGraphDatabase.driver(NEO4J_URI, auth=(NEO4J_USERNAME, NEO4J_PASSWORD))

print(f"WORKING_DIR: {WORKING_DIR}")
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o-mini")
print(f"LLM_MODEL: {LLM_MODEL}")
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large")
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")

if not os.path.exists(WORKING_DIR):
    os.mkdir(WORKING_DIR)

def performance_monitor(func):
    @functools.wraps(func)
    async def wrapper(*args, **kwargs):
        start_time = time.time()
        try:
            result = await func(*args, **kwargs)
            end_time = time.time()
            logger.info(f"接口 {func.__name__} 执行耗时: {end_time - start_time:.4f} 秒")
            return result
        except Exception as e:
            logger.error(f"接口 {func.__name__} 执行错误: {str(e)}")
            raise
    return wrapper

# 添加一个用于更新连接信息的数据模型
class Neo4jConnectionConfig(BaseModel):
    uri: str = Field(..., description="Neo4j 连接地址")
    username: str = Field(..., description="用户名")
    password: str = Field(..., description="密码")
    database: Optional[str] = Field(default="neo4j", description="数据库名称")

# 添加一个用于管理 Neo4j 连接的类
class Neo4jConnectionManager:
    @staticmethod
    async def initialize_driver(uri: str, username: str, password: str, database: str = "neo4j"):
        try:
            # 验证数据库的有效���
            if not database or not database.strip():
                raise ValueError("Database name cannot be empty")
            if neo4j_connection["driver"]:
                await neo4j_connection["driver"].close()
            
            # 创建新的驱动实例
            driver = AsyncGraphDatabase.driver(uri, auth=(username, password))
            
            # 测试连接并切换到指定数据库
            async with driver.session(database=database) as session:
                await session.run("RETURN 1")
            
            # 更新全局连接信息
            neo4j_connection.update({
                "uri": uri,
                "username": username,
                "password": password,
                "database": database,
                "driver": driver
            })
            
            return True
        except Exception as e:
            print(f"Failed to connect to Neo4j: {str(e)}")
            raise
    @staticmethod
    def get_driver():
        if not neo4j_connection["driver"]:
            raise ValueError("Neo4j connection not initialized")
        return neo4j_connection["driver"]

# 添加新的 API 端点来管理连接
@app.post("/connect_neo4j", tags=["数据库管理"], summary="连接 Neo4j 数据库")
@performance_monitor
async def connect_neo4j(config: Neo4jConnectionConfig):
    """
    连接到指定的 Neo4j 数据库
    参数:
    - config: Neo4j 连接配置
        - uri: 数据库地址
        - username: 用户名
        - password: 密码
        - database: 数据库名（可选）
    返回:
    - 连接状态和详细信息
    """
    try:
        await Neo4jConnectionManager.initialize_driver(
            config.uri,
            config.username,
            config.password,
            config.database
        )
        
        return Response(
            status="success",
            message=f"成功连接到 Neo4j 数据库: {config.database}",
            data=json.dumps({
                "uri": config.uri,
                "username": config.username,
                "database": config.database,
                "connected": True
            })
        )
    except Exception as e:
        logger.error(f"连接数据库错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/list_databases", tags=["数据库管理"], summary="列出可用数据库")
async def list_databases():
    """
    获取 Neo4j 中所有可用的数据库列表
    返回:
    - 数据库名称列表
    """
    try:
        driver = Neo4jConnectionManager.get_driver()
        
        # 添加���的连接信息和诊断日志
        print("Neo4j Connection Diagnostics:")
        print(f"URI: {neo4j_connection['uri']}")
        print(f"Username: {neo4j_connection['username']}")
        print(f"Current Database: {neo4j_connection.get('database', 'neo4j')}")
        
        async with driver.session() as session:
            # 尝试执行系统信息查询
            try:
                # 使用系统管理员权限查询
                result = await session.run(
                    "CALL dbms.components() YIELD name, versions"
                )
                system_info = await result.fetch(-1)
                print("System Info:", system_info)
            except Exception as e:
                print(f"Error getting system info: {e}")
            
            # 尝试多种查询方式获取数据库列表
            database_queries = [
                "SHOW DATABASES",
                "CALL dbms.database.list()",
                "CALL dbms.databases()"
            ]
            
            databases = []
            for query in database_queries:
                try:
                    print(f"Attempting query: {query}")
                    result = await session.run(query)
                    records = await result.fetch(-1)
                    
                    print(f"Records for {query}:", records)
                    
                    # 根据不同查询提取数据库名
                    if query == "SHOW DATABASES":
                        databases = [record["name"] for record in records]
                    elif query.startswith("CALL dbms.database"):
                        databases = [record.get("database", record.get("name")) for record in records]
                    
                    if databases:
                        print(f"Found databases using {query}: {databases}")
                        break
                except Exception as e:
                    print(f"Query {query} failed: {e}")
                    continue
            
            # 如果没有找到数据库，尝试直接查询当前数据库
            if not databases:
                try:
                    current_db_query = "CALL db.info()"
                    result = await session.run(current_db_query)
                    records = await result.fetch(-1)
                    print("DB Info Query Records:", records)
                    
                    # 尝试从记录中提取数据库名
                    if records:
                        databases = [record.get("name", "neo4j") for record in records]
                except Exception as e:
                    print(f"Current DB query failed: {e}")
            
            # 如果仍然没有数据库，默认使用 neo4j
            if not databases:
                databases = ["neo4j"]
            
            return Response(
                status="success",
                data=json.dumps(databases),
                message="Database list retrieved"
            )
    except Exception as e:
        error_message = f"Comprehensive error in listing databases: {str(e)}"
        print(error_message)
        
        # 如果是连接错误，提供更详细的诊断信息
        if "connection" in str(e).lower():
            error_message += "\n请检查：\n1. Neo4j 服务是否运行\n2. 连接地址和端口\n3. 用户名和密码"
        
        raise HTTPException(
            status_code=500, 
            detail=error_message
        )
@app.post("/create_database", tags=["数据库管理"], summary="创建新数据库")
async def create_database(database_name: str):
    """
    在 Neo4j 中创建新的数据库
    参数:
    - database_name: 新数据库的名称
    """
    try:
        driver = Neo4jConnectionManager.get_driver()
        
        async with driver.session() as session:
            # 创数据库
            await session.run(f"CREATE DATABASE {database_name}")
            
            return Response(
                status="success",
                message=f"Database '{database_name}' created successfully"
            )
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/neo4j_connection_diagnostics", tags=["系统诊断"], summary="Neo4j 连接诊断")
async def neo4j_connection_diagnostics():
    """
    提供 Neo4j 数据库连接的详细诊断信息
    返回:
    - 数据库连接状态
    - 服务器信息
    - 系统组件信息
    """
    try:
        driver = Neo4jConnectionManager.get_driver()
        database = neo4j_connection.get("database", "neo4j")
        
        async with driver.session(database=database) as session:
            # 执行多个诊断查询
            server_info_query = "CALL dbms.components() YIELD name, versions"
            db_info_query = "CALL db.info()"
            
            # 获取服务器信息
            server_result = await session.run(server_info_query)
            server_record = await server_result.single()
            
            # 获取数据库信息
            db_result = await session.run(db_info_query)
            db_record = await db_result.single()
            
            # 准备诊断信息
            diagnostics = {
                "connection_uri": neo4j_connection["uri"],
                "username": neo4j_connection["username"],
                "database": database,
                "server_name": server_record.get("name", "Unknown"),
                "server_version": server_record.get("versions", ["Unknown"])[0],
                "database_info": dict(db_record) if db_record else {}
            }
            
            return Response(
                status="success",
                message="Neo4j connection diagnostics",
                data=json.dumps(diagnostics, ensure_ascii=False)
            )
    
    except Exception as e:
        print(f"Connection diagnostics error: {e}")
        return Response(
            status="error",
            message=f"Diagnostics failed: {str(e)}",
            data=None
        )
# LLM 模型函数
async def llm_model_func(
    prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
    url = "http://1.15.125.13:11434/v1/chat/completions"
    headers = {
        "Authorization": "Bearer sk-3033&5004",
        "Content-Type": "application/json"
    }
    
    messages = []
    if system_prompt:
        messages.append({"role": "system", "content": system_prompt})
    
    messages.extend(history_messages)
    messages.append({"role": "user", "content": prompt})
    
    payload = {
        "model": "qwen2.5:14b",
        "messages": messages,
        "temperature": kwargs.get("temperature", 0.7),
        "max_tokens": kwargs.get("max_tokens", 768),
        "stream": False
    }
    max_retries = 20
    base_delay = 1  # 初始延迟 1 秒
    for attempt in range(max_retries):
        try:
            async with rate_limiter:
                async with aiohttp.ClientSession() as session:
                    async with session.post(url, headers=headers, json=payload) as response:
                        if response.status == 200:
                            result = await response.json()
                            if not result.get("choices"):
                                raise ValueError(f"Unexpected API response format: {result}")
                            return result["choices"][0]["message"]["content"]
                        elif response.status == 429:
                            delay = (base_delay * 2 ** attempt) + (random.randint(0, 1000) / 1000)
                            print(f"Rate limited. Retrying in {delay:.2f} seconds...")
                            await asyncio.sleep(delay)
                        else:
                            raise ValueError(f"API request failed with status {response.status}")
        except Exception as e:
            if attempt == max_retries - 1:
                raise
            print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying...")
            await asyncio.sleep(1)
    raise ValueError("Max retries reached. Unable to complete the request.")
async def embedding_func(texts: list[str]) -> np.ndarray:
    url = "http://1.15.125.13:5004/v1/embeddings"
    headers = {
        "Authorization": "Bearer sk-3033&5004",
        "Content-Type": "application/json"
    }
    
    payload = {
        "model": "bge-large-zh-v1.5-local",
        "input": texts,
        "encoding_format": "float"
    }
    max_retries = 5
    base_delay = 1
    for attempt in range(max_retries):
        try:
            async with rate_limiter:
                async with aiohttp.ClientSession() as session:
                    async with session.post(url, headers=headers, json=payload) as response:
                        if response.status == 200:
                            result = await response.json()
                            if 'data' not in result or not isinstance(result['data'], list):
                                raise ValueError(f"Unexpected API response format: {result}")
                            embeddings = [item['embedding'] for item in result['data'] if 'embedding' in item]
                            if not embeddings:
                                raise ValueError("No valid embeddings found in the response")
                            return np.array(embeddings)
                        elif response.status == 429:
                            delay = (base_delay * 2 ** attempt) + (random.randint(0, 1000) / 1000)
                            print(f"Rate limited. Retrying in {delay:.2f} seconds...")
                            await asyncio.sleep(delay)
                        else:
                            error_text = await response.text()
                            print(f"API request failed with status {response.status}")
                            print(f"Error response: {error_text}")
                            raise ValueError(f"API request failed with status {response.status}: {error_text}")
        except Exception as e:
            if attempt == max_retries - 1:
                raise
            print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying...")
            await asyncio.sleep(1)
    raise ValueError("Max retries reached. Unable to complete the request.")
# 初始化 RAG 实例时，使用本地模型的参数
rag = LightRAG(
    working_dir=WORKING_DIR,
    llm_model_func=llm_model_func,
    embedding_func=EmbeddingFunc(
        embedding_dim=1024,  # bge-large-zh-v1.5-local 的维度
        max_token_size=512,  # 对应模型的 max_tokens
        func=embedding_func
    ),
)

# 导出到 Neo4j 的函数
async def export_to_neo4j(database: str = None, working_dir: str = None):
    try:
        driver = Neo4jConnectionManager.get_driver()
        database = database or neo4j_connection.get("database", "neo4j")
        working_dir = working_dir or WORKING_DIR
        
        # 准备图数据
        json_file = await prepare_graph_data(working_dir)
        print(f"Using graph data from: {json_file}")
        
        if not os.path.exists(json_file):
            raise ValueError(f"Graph data file not found: {json_file}")
        
        with open(json_file, "r", encoding="utf-8") as f:
            json_data = json.load(f)
        
        nodes = json_data.get("nodes", [])
        edges = json_data.get("edges", [])
        
        print(f"Exporting {len(nodes)} nodes and {len(edges)} edges to database: {database}")
        
        # 使用异步会话，并指定数据库
        async with driver.session(database=database) as session:
            # 分批插入节点
            await process_in_batches(session, """
                UNWIND $nodes AS node
                MERGE (e {id: node.id})
                SET e.entity_type = node.entity_type,
                    e.description = node.description,
                    e.source_id = node.source_id,
                    e.displayName = node.id
                WITH e, node
                CALL apoc.create.addLabels(e, [node.entity_type]) YIELD node AS labeledNode
                RETURN count(*)
            """, nodes, BATCH_SIZE_NODES)
            
            # 只有在存在边的情况下才插入边
            if edges:
                await process_in_batches(session, """
                    UNWIND $edges AS edge
                    MATCH (source {id: edge.source})
                    MATCH (target {id: edge.target})
                    WITH source, target, edge
                    CALL apoc.create.relationship(source, 
                        COALESCE(edge.type, 'RELATED'), 
                        {
                            weight: edge.weight,
                            description: edge.description,
                            keywords: edge.keywords
                        }, 
                        target
                    ) YIELD rel
                    RETURN count(*)
                """, edges, BATCH_SIZE_EDGES)
            else:
                print("No edges found. Skipping edge creation.")
            
            # 设置 displayName 和标签
            await session.run("""
                MATCH (n)
                SET n.displayName = n.id
                WITH n
                CALL apoc.create.setLabels(n, [n.entity_type]) YIELD node
                RETURN count(*)
            """)
        
        print(f"Graph data export to database '{database}' completed successfully.")
    
    except Exception as e:
        print(f"Error in export_to_neo4j: {e}")
        import traceback
        traceback.print_exc()
        raise
# 在应用启动时初始化默认连接
@app.on_event("startup")
async def startup_event():
    """
    应用启动时的初始化事件
    1. 尝试使用默认配置初始化 Neo4j 连接
    2. 打印详细的启动信息
    """
    try:
        print("Starting up application...")
        print(f"Attempting to connect to Neo4j at: {neo4j_connection['uri']}")
        
        # 尝试初始化默认连接
        await Neo4jConnectionManager.initialize_driver(
            neo4j_connection["uri"],
            neo4j_connection["username"],
            neo4j_connection["password"]
        )
        
        print("✅ Neo4j connection initialized successfully.")
        
        # 可以添加其他启动时的初始化操作
        # 例如：预热缓存、加载配置等
        
    except Exception as e:
        print(f"❌ Warning: Failed to initialize default Neo4j connection: {e}")
        # 可以选择：
        # 1. 记录详细日志
        # 2. 发送告警
        # 3. 继续启动但标记连接状态
        import traceback
        traceback.print_exc()
# 在应用关闭时关闭连接
@app.on_event("shutdown")
async def shutdown_event():
    """
    应用关闭时的清理事件
    1. 关闭 Neo4j 驱动连接
    2. 执行其他必要的清理工作
    """
    print("Shutting down application...")
    
    try:
        # 关闭 Neo4j 驱动
        if neo4j_connection["driver"]:
            print("Closing Neo4j driver connection...")
            await neo4j_connection["driver"].close()
            print("✅ Neo4j driver connection closed successfully.")
    except Exception as e:
        print(f"❌ Error closing Neo4j driver: {e}")
    
    # 可以添加其他清理操作
    # 例如：
    # - 关闭数据库连接
    # - 保存状态
    # - 释放资源
    
    print("Application shutdown complete.")

async def process_in_batches(session, query, data, batch_size):
    for i in range(0, len(data), batch_size):
        batch = data[i:i + batch_size]
        # 根据查询中的参数名动态传递
        if 'nodes' in query:
            await session.run(query, nodes=batch)
        elif 'edges' in query:
            await session.run(query, edges=batch)
        else:
            # 如果没有特定的参数名，可以尝试默认传递
            await session.run(query, {"data": batch})

def convert_xml_to_json(xml_path, output_path):
    """Converts XML file to JSON and saves the output."""
    # 检查文件是否存在，支持相对路径和绝对路径
    if not os.path.exists(xml_path):
        # 尝试使用脚本目录作为基准路径
        alternative_path = os.path.join(SCRIPT_DIR, xml_path)
        if os.path.exists(alternative_path):
            xml_path = alternative_path
        else:
            print(f"Error: File not found - {xml_path}")
            print(f"Alternative path checked: {alternative_path}")
            return None
    
    # 使用 lightrag 的 xml_to_json 函数
    json_data = xml_to_json(xml_path)
    
    if json_data:
        # 确保输出目录存在
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        
        with open(output_path, "w", encoding="utf-8") as f:
            json.dump(json_data, f, ensure_ascii=False, indent=2)
        print(f"JSON file created: {output_path}")
        return json_data
    else:
        print("Failed to create JSON data")
        return None

async def prepare_graph_data(working_dir=None):
    """准备图数据，如果 graph_data.json 不存在则从 graphml 转换"""
    if working_dir is None:
        working_dir = WORKING_DIR
    
    json_file = os.path.join(working_dir, "graph_data.json")
    
    # 尝试的 XML 文件路径列表
    xml_paths = [
        os.path.join(working_dir, "graph_chunk_entity_relation.graphml"),
        os.path.join(SCRIPT_DIR, "graph_chunk_entity_relation.graphml"),
        os.path.join(SCRIPT_DIR, "index_default", "graph_chunk_entity_relation.graphml")
    ]
    
    # 如果 JSON 文件不存在，尝试从 XML 文件转换
    if not os.path.exists(json_file):
        print(f"Graph data JSON not found. Attempting to convert from XML.")
        
        # 尝试查找可用的 XML 文件
        xml_file = None
        for path in xml_paths:
            if os.path.exists(path):
                xml_file = path
                break
        
        if xml_file:
            print(f"Found XML file: {xml_file}")
            json_data = convert_xml_to_json(xml_file, json_file)
            if json_data is None:
                raise ValueError(f"Failed to convert XML to JSON: {xml_file}")
        else:
            raise ValueError("No valid XML file found for conversion")
    
    return json_file

@app.delete("/clear_neo4j", tags=["图谱管理"], summary="清空数据库")
async def clear_neo4j_database():
    """
    清当前 Neo4j 数据库中的所有节点和关系
    警告: 这是一个危险操作，将删除所有数据
    """
    try:
        driver = Neo4jConnectionManager.get_driver()
        database = neo4j_connection.get("database", "neo4j")
        
        clear_query = """
        MATCH (n)
        DETACH DELETE n
        """
        async with driver.session(database=database) as session:  # 指定数据库
            await session.run(clear_query)
        
        return Response(
            status="success", 
            message=f"Database '{database}' cleared successfully"
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# 添加一个自定义的 JSON 编码器
class CustomJSONEncoder(json.JSONEncoder):
    def default(self, obj):
        # 处理 Neo4j 的 DateTime 类型
        if hasattr(obj, 'isoformat'):  # DateTime 对象通常有 isoformat 方法
            return obj.isoformat()
        # 处理其他特殊类型
        try:
            return dict(obj)
        except:
            return str(obj)

# 修改 view_graph_data 函数
@app.get("/view_graph", tags=["图谱管理"], summary="查看图数据库内容")
async def view_graph_data():
    """
    获取当前 Neo4j 数据库中的所有节点和关系
    功能描述:
    - 连接到指定的 Neo4j 数据库
    - 检索数据库中所有节点和关系
    - 返回详细的neo4j图数据
    """
    try:
        driver = Neo4jConnectionManager.get_driver()
        database = neo4j_connection.get("database", "neo4j")
        
        # 查询所有节点
        nodes_query = """
        MATCH (n)
        RETURN collect({
            id: n.id,
            type: labels(n)[0],
            properties: properties(n)
        }) as nodes
        """
        
        # 查询所有关系
        relationships_query = """
        MATCH (a)-[r]->(b)
        RETURN collect({
            source: a.id,
            target: b.id,
            type: type(r),
            properties: properties(r)
        }) as relationships
        """
        
        async with driver.session(database=database) as session:
            # 获取节点
            nodes_result = await session.run(nodes_query)
            nodes_data = await nodes_result.single()
            nodes = nodes_data['nodes']
            
            # 获取关系
            rels_result = await session.run(relationships_query)
            rels_data = await rels_result.single()
            relationships = rels_data['relationships']
            
            graph_data = {
                "nodes": nodes,
                "relationships": relationships,
                "database": database
            }
            
            # 使用自定义编码器序列化数据
            json_data = json.dumps(
                graph_data,
                ensure_ascii=False,
                cls=CustomJSONEncoder
            )
            
            return Response(
                status="success",
                data=json_data,
                message=f"Found {len(nodes)} nodes and {len(relationships)} relationships in database: {database}"
            )
    except Exception as e:
        logger.error(f"Error in view_graph_data: {str(e)}")
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))

# 添加一个全局变量来跟踪最近上传的文件夹
last_uploaded_dir = None

# 修改 insert_file 函数，移除清空数据库的操作，改用合并模式
@app.post("/insert_file", tags=["知识管理"], summary="上传插入���件")
async def insert_file(
    file: UploadFile = File(...), 
    query: Optional[str] = None, 
    database: Optional[str] = None,
    merge_mode: bool = True
):
    global last_uploaded_dir
    try:
        # 确定当前数据库
        current_database = database or neo4j_connection.get("database", "neo4j")
        
        # 生成工作目录
        file_stem = os.path.splitext(file.filename)[0]
        unique_dir_name = f"{file_stem}_{current_database}"
        working_dir = os.path.join(SCRIPT_DIR, "index_default", unique_dir_name)
        
        # 设置文档存储目录
        documents_dir = os.path.join(working_dir, "documents")
        os.makedirs(documents_dir, exist_ok=True)
        
        # 记录最后上传的目录名
        last_uploaded_dir = unique_dir_name
        
        # 初始化 RAG 目录
        initialize_rag_directory(working_dir)
        
        # 文件完整路径
        file_path = os.path.join(documents_dir, file.filename)
        
        # 检查文件是否已存在
        if os.path.exists(file_path):
            print(f"File {file.filename} already exists, using existing file")
            # 读取已存在的文件
            try:
                with open(file_path, "r", encoding="utf-8") as f:
                    file_content = f.read()
            except UnicodeDecodeError:
                with open(file_path, "r", encoding="gbk") as f:
                    file_content = f.read()
        else:
            # 保存新上传的文件
            async with aiofiles.open(file_path, 'wb') as out_file:
                content = await file.read()
                await out_file.write(content)
            
            # 读取新上传的文件内容
            try:
                with open(file_path, "r", encoding="utf-8") as f:
                    file_content = f.read()
            except UnicodeDecodeError:
                with open(file_path, "r", encoding="gbk") as f:
                    file_content = f.read()
        
        # 使用工作目录初���化 RAG 实例
        rag_instance = LightRAG(
            working_dir=working_dir,
            llm_model_func=llm_model_func,
            embedding_func=EmbeddingFunc(
                embedding_dim=1024,
                max_token_size=512,
                func=embedding_func
            ),
        )
        
        # 插入文件内容
        await rag_instance.insert(file_content)
        
        # 获取 Neo4j 驱动并使用合并模式导出
        driver = Neo4jConnectionManager.get_driver()
        async with driver.session(database=current_database) as session:
            # 直接调用合并函数，不清空数据库
            await merge_graph_data(session, working_dir, current_database)
        
        # 可选：执行查询
        query_result = None
        if query:
            query_result = await rag_instance.query(query, param=QueryParam(mode="hybrid"))
        
        # 获取新导入的图谱数据进行验证
        try:
            view_response = await view_graph_data()
        except Exception as e:
            logger.error(f"Error getting graph data: {str(e)}")
            # 即使获取图数据失败，也继续返回成功响应
            view_response = Response(
                status="success",
                message="File processed successfully, but graph view failed",
                data=None
            )
        
        return Response(
            status="success", 
            message=f"File {file.filename} processed and merged into database '{current_database}' successfully. File stored in {documents_dir}",
            data=view_response.data
        )
    
    except Exception as e:
        logger.error(f"Error in insert_file: {str(e)}")
        import traceback
        traceback.print_exc()
        return Response(status="error", message=f"Error processing file: {str(e)}")

# 修改 merge_graph_data 函数中的边处理逻辑，使其更智能地处理关系
async def merge_graph_data(session, working_dir, database):
    """
    合并图数据到 Neo4j 数据库，保持已有数据并智能添加新数据
    """
    try:
        json_file = os.path.join(working_dir, "graph_data.json")
        if not os.path.exists(json_file):
            raise ValueError(f"graph_data.json not found in {working_dir}")
        
        with open(json_file, "r", encoding="utf-8") as f:
            json_data = json.load(f)
        
        nodes = json_data.get("nodes", [])
        edges = json_data.get("edges", [])
        
        print(f"Merging {len(nodes)} nodes and {len(edges)} edges from {working_dir}")
        
        # 使用 MERGE 处理节点，保���现有属性
        nodes_query = """
        UNWIND $nodes AS node
        MERGE (e {id: node.id})
        ON CREATE SET 
            e.entity_type = node.entity_type,
            e.description = node.description,
            e.source_id = node.source_id,
            e.displayName = node.id,
            e.origin_file = $origin_file
        ON MATCH SET
            e.description = CASE 
                WHEN e.description IS NULL THEN node.description 
                WHEN e.description = node.description THEN e.description
                ELSE e.description + '; ' + node.description 
            END,
            e.origin_file = CASE
                WHEN e.origin_file IS NULL THEN $origin_file
                WHEN NOT e.origin_file CONTAINS $origin_file THEN e.origin_file + '; ' + $origin_file
                ELSE e.origin_file
            END
        WITH e, node
        CALL apoc.create.addLabels(e, [node.entity_type]) YIELD node AS labeledNode
        RETURN count(*)
        """
        
        # 修改 edges_query 的处理逻辑
        edges_query = """
        UNWIND $edges AS edge
        MATCH (source {id: edge.source})
        MATCH (target {id: edge.target})
        OPTIONAL MATCH (source)-[existing:RELATED]->(target)
        WITH source, target, edge, existing,
             CASE
                WHEN edge.keywords CONTAINS 'lead' THEN 'LEADS'
                WHEN edge.keywords CONTAINS 'participate' THEN 'PARTICIPATES_IN'
                WHEN edge.keywords CONTAINS 'uses' THEN 'USES'
                WHEN edge.keywords CONTAINS 'located' THEN 'LOCATED_IN'
                WHEN edge.keywords CONTAINS 'occurs' THEN 'OCCURS_IN'
                ELSE 'RELATED'
             END AS relType
        MERGE (source)-[r:RELATED]->(target)
        ON CREATE SET
            r.type = relType,
            r.weight = edge.weight,
            r.description = edge.description,
            r.keywords = edge.keywords,
            r.source_id = edge.source_id,
            r.origin_file = $origin_file
        ON MATCH SET
            r.weight = CASE 
                WHEN r.weight IS NULL THEN edge.weight
                ELSE (r.weight + edge.weight) / 2
            END,
            r.description = CASE
                WHEN r.description IS NULL THEN edge.description
                WHEN NOT r.description CONTAINS edge.description THEN r.description + '; ' + edge.description
                ELSE r.description
            END,
            r.keywords = CASE
                WHEN r.keywords IS NULL THEN edge.keywords
                WHEN NOT r.keywords CONTAINS edge.keywords THEN r.keywords + '; ' + edge.keywords
                ELSE r.keywords
            END,
            r.origin_file = CASE
                WHEN r.origin_file IS NULL THEN $origin_file
                WHEN NOT r.origin_file CONTAINS $origin_file THEN r.origin_file + '; ' + $origin_file
                ELSE r.origin_file
            END
        RETURN count(*)
        """
        
        # 获取文件夹名作为来源标识
        origin_file = os.path.basename(working_dir)
        
        # 分批处理节点
        await process_in_batches(session, nodes_query, nodes, BATCH_SIZE_NODES, origin_file=origin_file)
        
        # 分批处理边
        if edges:
            await process_in_batches(session, edges_query, edges, BATCH_SIZE_EDGES, origin_file=origin_file)
        
        return True
    except Exception as e:
        print(f"Error in merge_graph_data: {e}")
        raise

async def process_in_batches(session, query, data, batch_size, **kwargs):
    """增强的批处理函数支持额外参数"""
    for i in range(0, len(data), batch_size):
        batch = data[i:i + batch_size]
        params = {"nodes": batch} if "nodes" in query else {"edges": batch}
        params.update(kwargs)  # 添加额外参数
        await session.run(query, params)

@app.get("/export_to_neo4j", tags=["图谱管理"], summary="导出并合并图谱数据到 Neo4j")
async def export_to_neo4j_endpoint(
    database: Optional[str] = None,
    folder_name: Optional[str] = None,
    merge_mode: bool = True  # 新增参数：是否使用合并模式
):
    """
    将图谱数据导出到 Neo4j 数据库
    参数:
    - database: 可选，指定目标数据库名称
    - folder_name: 可选，指定要导出的文件夹名
    - merge_mode: 是否使用合并模式（默认为True）
    """
    try:
        target_database = database or "neo4j"
        index_dir = os.path.join(SCRIPT_DIR, "index_default")
        
        # 获取要处理的目录列表
        dirs_to_process = []
        if folder_name:
            # 处理指定文件夹
            if os.path.exists(os.path.join(index_dir, folder_name)):
                dirs_to_process.append(folder_name)
            else:
                raise ValueError(f"Specified folder '{folder_name}' not found")
        elif last_uploaded_dir:
            # 处理最近上传的文件夹
            if os.path.exists(os.path.join(index_dir, last_uploaded_dir)):
                dirs_to_process.append(last_uploaded_dir)
            else:
                raise ValueError(f"Last uploaded directory '{last_uploaded_dir}' not found")
        else:
            # 处理所有匹配的文件夹
            for dirname in os.listdir(index_dir):
                if os.path.isdir(os.path.join(index_dir, dirname)) and dirname.endswith(f"_{target_database}"):
                    dirs_to_process.append(dirname)
        
        if not dirs_to_process:
            raise ValueError(f"No suitable directories found for database '{target_database}'")
        
        # 获取 Neo4j 驱动
        driver = Neo4jConnectionManager.get_driver()
        
        async with driver.session(database=target_database) as session:
            if not merge_mode:
                # 如果不是合并模式，清空数据库
                await session.run("MATCH (n) DETACH DELETE n")
            
            # 处理每个目录
            for dir_name in dirs_to_process:
                working_dir = os.path.join(index_dir, dir_name)
                print(f"Processing directory: {working_dir}")
                await merge_graph_data(session, working_dir, target_database)
        
        # 验证导入的数据
        view_response = await view_graph_data()
        
        return Response(
            status="success",
            message=f"Graph data from {len(dirs_to_process)} directories merged into database '{target_database}' successfully",
            data=view_response.data
        )
    
    except Exception as e:
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=f"Export failed: {str(e)}")
# 数据模型
class QueryRequest(BaseModel):
    query: str
    mode: str = "hybrid"
    only_need_context: bool = False

class InsertRequest(BaseModel):
    text: str

class InsertFileRequest(BaseModel):
    file_path: str

# API 路由
@app.post("/query", tags=["知识检索"], summary="语义检索")
async def query_endpoint(request: QueryRequest):
    try:
        # 获取 Neo4j 驱动
        driver = Neo4jConnectionManager.get_driver()
        database = neo4j_connection.get("database", "neo4j")
        
        # 优化的查询语句
        source_query = """
        MATCH (n)
        WHERE toLower(n.description) CONTAINS toLower($query_text)
           OR toLower(n.id) CONTAINS toLower($query_text)
           OR EXISTS {
               MATCH (n)-[r]-()
               WHERE toLower(r.description) CONTAINS toLower($query_text)
           }
        WITH n, 
             CASE 
                WHEN toLower(n.id) = toLower($query_text) THEN 5
                WHEN toLower(n.id) CONTAINS toLower($query_text) THEN 4
                WHEN toLower(n.description) CONTAINS toLower($query_text) THEN 3
                WHEN EXISTS { MATCH (n)-[r]-() WHERE toLower(r.description) CONTAINS toLower($query_text) } THEN 2
                ELSE 1
             END as relevance
        
        // 获取相关的边信息
        OPTIONAL MATCH (n)-[r]->(m)
        WITH n, relevance, collect(DISTINCT r.description) as edge_descriptions
        
        // 获取相邻节点的描述
        OPTIONAL MATCH (n)-[]->(neighbor)
        WITH n, relevance, edge_descriptions, collect(DISTINCT neighbor.description) as neighbor_descriptions
        
        RETURN DISTINCT 
            n.origin_file as source_file,
            n.description as context,
            n.id as node_id,
            relevance,
            edge_descriptions,
            neighbor_descriptions
        ORDER BY relevance DESC
        LIMIT 5
        """
        
        async with driver.session(database=database) as session:
            source_result = await session.run(source_query, {"query_text": request.query})
            source_records = await source_result.fetch(-1)
            
            # 整理上下文信息
            contexts = []
            sources = []
            
            for record in source_records:
                context = record.get("context", "").strip('"')
                edge_descs = [desc.strip('"') for desc in record.get("edge_descriptions", []) if desc]
                neighbor_descs = [desc.strip('"') for desc in record.get("neighbor_descriptions", []) if desc]
                
                # 添加主要上下文
                if context and context not in contexts:
                    contexts.append(context)
                
                # 添加关系描述
                for desc in edge_descs:
                    if desc and desc not in contexts:
                        contexts.append(desc)
                
                # 添加相邻节点描述
                for desc in neighbor_descs:
                    if desc and desc not in contexts:
                        contexts.append(desc)
                
                # 记录来源信息
                if record.get("source_file"):
                    sources.append({
                        "folder": record.get("source_file"),
                        "node_id": record.get("node_id"),
                        "context": context,
                        "relevance": record.get("relevance")
                    })
        
        # 如果找到了相关上下文，使用它们生成答案
        if contexts:
            # 优化的系统提示
            system_prompt = """你是一个专业的知识库助手。请根据提供的上下文信息，准确回答用户的问题。
            要求：
            1. 直接给出明确的答案，使用肯定的语气
            2. 必须引用具体的数据和标准
            3. 如果有异常情况，也要说明
            4. 保持专业和简洁
            5. 按照重要性组织信息
            6. 不要添加未在上下文中提供的信息"""
            
            # 优化的上下文提示
            context_prompt = "相关信息：\n" + "\n".join(f"- {ctx}" for ctx in contexts)
            
            # 完整提示
            full_prompt = f"""请基于以下信息回答问题：

问题：{request.query}

{context_prompt}

请提供准确的答案："""
            
            # 生成答案
            result = await llm_model_func(
                prompt=full_prompt,
                system_prompt=system_prompt,
                temperature=0.3
            )
        else:
            result = "抱歉，没有找到相关信息。"
        
        # 构建响应
        enhanced_result = {
            "query": request.query,
            "answer": result,
            "sources": sources,
            "contexts": contexts,
            "source_summary": f"查询结果来自以下文件：" + 
                            ", ".join([f"'{s['folder']}'" for s in sources])
        }
        
        return Response(
            status="success", 
            data=json.dumps(enhanced_result, ensure_ascii=False, cls=CustomJSONEncoder),
            message="Query executed successfully with source information"
        )
    except Exception as e:
        logger.error(f"Error in query_endpoint: {str(e)}")
        import traceback
        traceback.print_exc()
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/insert", tags=["知识管理"], summary="插入文本")
async def insert_endpoint(request: InsertRequest):
    """
    向知识库插入文本内容
    参数:
    - text: 待插入的文本
    """
    try:
        await rag.insert(request.text)
        return Response(status="success", message="Text inserted successfully")
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

def initialize_rag_directory(working_dir):
    """
    初始化 RAG 工作目录和必要的索引文件
    """
    # 确保工作目录存在
    os.makedirs(working_dir, exist_ok=True)
    
    # 创建 kv_store_full_docs.json 文件的完整路径
    kv_store_path = os.path.join(working_dir, "kv_store_full_docs.json")
    
    # 确保文件存在，如果不存在则创建
    if not os.path.exists(kv_store_path):
        try:
            # 创建一个空的 JSON 文件
            with open(kv_store_path, 'w', encoding='utf-8') as f:
                json.dump({
                    "documents": [],
                    "metadata": {}
                }, f, ensure_ascii=False, indent=2)
            print(f"Created initial kv_store_full_docs.json at {kv_store_path}")
        except Exception as e:
            print(f"Error creating kv_store_full_docs.json: {e}")
    
    # 创建索引目录
    index_dir = os.path.join(working_dir, "index")
    os.makedirs(index_dir, exist_ok=True)
    
    # 创建其他可能需要的子目录
    subdirs = ["embeddings", "documents", "metadata"]
    for subdir in subdirs:
        os.makedirs(os.path.join(working_dir, subdir), exist_ok=True)
# 在初始化 RAG 实例之前调用初始化函数
try:
    initialize_rag_directory(WORKING_DIR)
except Exception as e:
    print(f"Critical error initializing RAG directory: {e}")
    # 可以选择抛出异常或采取其他处理方式
    raise
# 初始化 RAG 实例时，使用本地模型的参数
rag = LightRAG(
    working_dir=WORKING_DIR,
    llm_model_func=llm_model_func,
    embedding_func=EmbeddingFunc(
        embedding_dim=1024,  # bge-large-zh-v1.5-local 的维度
        max_token_size=512,  # 对应模型的 max_tokens
        func=embedding_func
    ),
)

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8020)
