# backend/app.py

import os
import queue
from datetime import datetime
import numpy as np
from functools import partial
import logging
import asyncio
import json
import threading
from time import time
from flask import Flask, request, jsonify, Response
from flask_cors import CORS

from openai import AsyncOpenAI

from nano_graphrag import GraphRAG, QueryParam
from nano_graphrag._op import chunking_by_seperators, chunking_by_token_size, extract_entities
from nano_graphrag.prompt import PROMPTS
from nano_graphrag._utils import compute_args_hash, wrap_embedding_func_with_attrs
from nano_graphrag.base import BaseKVStorage

# ============ 反应器领域实体类型（可按需扩充） ============
PROMPTS["DEFAULT_ENTITY_TYPES"] = [
    # 反应器类别与配置
    "reactor", "batch_reactor", "cstr", "pfr", "loop_reactor",
    "fluidized_bed", "trickle_bed", "fixed_bed", "slurry_bed",
    "autoclave", "microreactor", "photoreactor", "electrochemical_reactor",
    
    # 工艺单元与设备
    "agitator", "baffle", "heat_exchanger", "cooling_system", "heating_system",
    "separator", "compressor", "pump", "valve",
    
    # 反应/催化/物流
    "reaction", "kinetics", "catalyst", "feedstock", "product", "byproduct",
    "solvent", "inhibitor", "promoter",
    
    # 运行参数与物性
    "temperature", "pressure", "flow_rate", "residence_time", "conversion",
    "selectivity", "yield", "viscosity", "density", "heat_capacity",
    "heat_of_reaction", "mass_transfer", "heat_transfer",
    
    # 控制与监测
    "sensor", "controller", "control_strategy", "safety_instrumented_function",
    
    # 安全与法规
    "hazard", "relief_system", "interlock", "regulation",
    
    # 组织与人员
    "plant", "organization", "person"
]

# ============ DashScope(OpenAI 兼容) 客户端 ============
DASHSCOPE_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
BEST_MODEL = os.getenv("DASHSCOPE_BEST_MODEL", "qwen3-max-2025-09-23")
CHEAP_MODEL = os.getenv("DASHSCOPE_CHEAP_MODEL", "qwen-plus-2025-04-28")
EMBED_MODEL = os.getenv("DASHSCOPE_EMBEDDING_MODEL", "text-embedding-v4")

# ============ Embedding 配置 ============
EMBEDDING_BATCH_SIZE = 25
DASHSCOPE_EMBED_API_BATCH_LIMIT = int(os.getenv("DASHSCOPE_EMBED_API_BATCH_LIMIT", "10"))

def _qwen_client():
    """创建LLM客户端"""
    return AsyncOpenAI(
        api_key=_get_llm_api_key(),
        base_url=_get_llm_base_url(),
    )

def _embedding_client():
    """创建嵌入模型客户端"""
    return AsyncOpenAI(
        api_key=_get_embedding_api_key(),
        base_url=_get_embedding_base_url(),
    )

# ============ 全局变量 ============
_PRINTED_EMBED_HEADER = False
_DETECTED_EMBEDDING_DIM = None
_EMBEDDING_CALL_COUNTER = 0
_ENTITY_EXTRACTION_CALL_COUNTER = 0

# ============ 运行时配置 ============
RUNTIME_CONFIG = {
    'llm_model': BEST_MODEL,
    'llm_api_key': os.getenv("DASHSCOPE_API_KEY", ""),
    'llm_base_url': DASHSCOPE_BASE_URL,
    'embedding_model': EMBED_MODEL,
    'embedding_api_key': os.getenv("DASHSCOPE_API_KEY", ""),
    'embedding_base_url': DASHSCOPE_BASE_URL,
    'embedding_batch_size': DASHSCOPE_EMBED_API_BATCH_LIMIT
}

def _get_embedding_model():
    """获取当前配置的嵌入模型"""
    return RUNTIME_CONFIG.get('embedding_model', EMBED_MODEL)

def _get_embedding_api_key():
    """获取当前配置的嵌入模型API密钥"""
    return RUNTIME_CONFIG.get('embedding_api_key', os.getenv("DASHSCOPE_API_KEY", ""))

def _get_embedding_base_url():
    """获取当前配置的嵌入模型基础URL"""
    return RUNTIME_CONFIG.get('embedding_base_url', DASHSCOPE_BASE_URL)

def _get_embedding_batch_size():
    """获取当前配置的嵌入批次大小"""
    return RUNTIME_CONFIG.get('embedding_batch_size', DASHSCOPE_EMBED_API_BATCH_LIMIT)

def _get_llm_model():
    """获取当前配置的LLM模型"""
    return RUNTIME_CONFIG.get('llm_model', BEST_MODEL)

def _get_llm_api_key():
    """获取当前配置的LLM API密钥"""
    return RUNTIME_CONFIG.get('llm_api_key', os.getenv("DASHSCOPE_API_KEY", ""))

def _get_llm_base_url():
    """获取当前配置的LLM基础URL"""
    return RUNTIME_CONFIG.get('llm_base_url', DASHSCOPE_BASE_URL)

def _create_dynamic_llm_func():
    """创建使用运行时配置的LLM函数"""
    def dynamic_llm_complete(prompt, system_prompt=None, history_messages=[], **kwargs):
        # 使用当前配置的模型，而不是硬编码
        current_model = _get_llm_model()
        return qwen_complete(prompt, system_prompt, history_messages, model=current_model, **kwargs)
    return dynamic_llm_complete

def _create_cheap_llm_func():
    """创建LLM函数（与主模型使用相同配置）"""
    def cheap_llm_complete(prompt, system_prompt=None, history_messages=[], **kwargs):
        # 不再使用cheap模型，所有调用都使用前端配置的相同模型
        current_model = _get_llm_model()
        return qwen_complete(prompt, system_prompt, history_messages, model=current_model, **kwargs)
    return cheap_llm_complete

# 构建进度跟踪
BUILD_PROGRESS = {
    "active": False,
    "stage": "idle",  # idle | chunk | extract | embedding | done | error
    "build_id": None,
    "current_step": "",
    "chunk_count": 0,
    "current_chunk_index": 0,  # 当前处理的chunk索引
    "extract_progress": {"total": 0, "done": 0},
    "embedding_progress": {"total": 0, "done": 0},
    "entity_progress": {"total": 0, "done": 0, "entities": 0, "relations": 0},
    "llm_calls": {"total": 0, "done": 0},  # LLM调用计数统计
    "updated_at": 0.0,
    "error_message": "",
}

# SSE 通道
SSE_CHANNELS = {}

def _sse_publish(build_id: str, data: dict):
    """发布SSE消息"""
    if build_id in SSE_CHANNELS:
        try:
            # 添加时间戳和完整进度快照
            message = {
                **data,
                "timestamp": time(),
                "progress": BUILD_PROGRESS.copy()
            }
            SSE_CHANNELS[build_id].put_nowait(message)
        except Exception as e:
            print(f"[SSE] 发布消息失败: {e}")

async def qwen_complete(prompt, system_prompt=None, history_messages=[], **kwargs) -> str:
    """Qwen 对话补全函数"""
    hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
    kwargs.pop("response_format", None)
    model = kwargs.pop("model", _get_llm_model())

    # 获取当前运行时配置
    base_url = _get_llm_base_url()
    api_key = _get_llm_api_key()
    api_key_display = f"{api_key[:8]}..." if api_key else "未设置"
    
    print(f"【LLM模型调用】")
    print(f"  ├── 模型: {model}")
    print(f"  ├── 端点: {base_url}")
    print(f"  ├── 密钥: {api_key_display}")
    print(f"  └── 参数: {kwargs}")

    messages = []
    if system_prompt:
        messages.append({"role": "system", "content": system_prompt})
    messages.extend(history_messages or [])
    messages.append({"role": "user", "content": prompt})

    if hashing_kv is not None:
        args_hash = compute_args_hash(model, messages)
        cached = await hashing_kv.get_by_id(args_hash)
        if cached is not None:
            print(f"  └── 状态: 缓存命中")
            # 缓存命中时也更新LLM调用计数
            if BUILD_PROGRESS.get("active") and BUILD_PROGRESS.get("stage") == "extract":
                BUILD_PROGRESS["llm_calls"]["done"] += 1
                _sse_publish(BUILD_PROGRESS["build_id"], {
                    "stage": "extract",
                    "message": f"LLM调用 {BUILD_PROGRESS['llm_calls']['done']} 次 (缓存命中)"
                })
            return cached["return"]

    # 更新LLM调用进度（在实体抽取阶段）
    if BUILD_PROGRESS.get("active") and BUILD_PROGRESS.get("stage") == "extract":
        BUILD_PROGRESS["llm_calls"]["done"] += 1
        current_chunk = BUILD_PROGRESS.get("current_chunk_index", 0)
        
        _sse_publish(BUILD_PROGRESS["build_id"], {
            "stage": "extract", 
            "message": f"处理chunk {current_chunk}/{BUILD_PROGRESS['chunk_count']} - LLM调用 {BUILD_PROGRESS['llm_calls']['done']} 次"
        })

    client = _qwen_client()
    try:
        resp = await client.chat.completions.create(model=model, messages=messages, **kwargs)
        print(f"  └── 状态: API调用成功")
        text = resp.choices[0].message.content
    except Exception as e:
        print(f"  └── 状态: API调用失败 - {str(e)}")
        raise

    if hashing_kv is not None:
        await hashing_kv.upsert({args_hash: {"return": text, "model": model}})
        await hashing_kv.index_done_callback()
    return text

async def _detect_embedding_dimension():
    """预先检测嵌入模型的维度"""
    global _DETECTED_EMBEDDING_DIM
    if _DETECTED_EMBEDDING_DIM is None:
        # 获取当前配置的嵌入模型信息
        model = _get_embedding_model()
        base_url = _get_embedding_base_url()
        api_key = _get_embedding_api_key()
        api_key_display = f"{api_key[:8]}..." if api_key else "未设置"
        
        print(f"【嵌入模型维度检测】")
        print(f"  ├── 模型: {model}")
        print(f"  ├── 端点: {base_url}")
        print(f"  ├── 密钥: {api_key_display}")
        print(f"  └── 测试输入: ['test']")
        
        try:
            client = _embedding_client()
            resp = await client.embeddings.create(
                model=model, 
                input=["test"], 
                encoding_format="float"
            )
            _DETECTED_EMBEDDING_DIM = len(resp.data[0].embedding)
            print(f"  └── 检测结果: 维度 {_DETECTED_EMBEDDING_DIM}")
        except Exception as e:
            print(f"  └── 检测失败: {str(e)}")
            print(f"  └── 使用默认维度: 1536")
            _DETECTED_EMBEDDING_DIM = 1536
    return _DETECTED_EMBEDDING_DIM

async def qwen_embedding(texts: list[str]) -> np.ndarray:
    """Qwen 向量接口，返回 (N, D) numpy 数组"""
    global _PRINTED_EMBED_HEADER, _DETECTED_EMBEDDING_DIM, _EMBEDDING_CALL_COUNTER
    
    _EMBEDDING_CALL_COUNTER += 1
    call_id = _EMBEDDING_CALL_COUNTER
    
    # 获取当前配置的嵌入模型信息
    model = _get_embedding_model()
    base_url = _get_embedding_base_url()
    api_key = _get_embedding_api_key()
    batch_size = _get_embedding_batch_size()
    api_key_display = f"{api_key[:8]}..." if api_key else "未设置"
    
    if not _PRINTED_EMBED_HEADER:
        print(f"【嵌入模型配置】")
        print(f"  ├── 模型: {model}")
        print(f"  ├── 端点: {base_url}")
        print(f"  ├── 密钥: {api_key_display}")
        print(f"  ├── 批次大小限制: {batch_size}")
        print(f"  └── 说明: 将文本转为向量用于检索/索引/查询匹配")
        _PRINTED_EMBED_HEADER = True
    
    client = _embedding_client()
    all_vecs = []
    total = len(texts)
    total_batches = (total + batch_size - 1) // batch_size
    
    # 更新进度
    BUILD_PROGRESS["embedding_progress"]["total"] += total
    _sse_publish(BUILD_PROGRESS["build_id"], {
        "stage": "embedding",
        "message": f"【嵌入调用#{call_id}】处理 {total} 个文本，分 {total_batches} 批"
    })
    
    print(f"【嵌入调用#{call_id}】处理 {total} 个文本，分 {total_batches} 批")
    
    for i in range(0, total, batch_size):
        batch = texts[i:i+batch_size]
        batch_num = i//batch_size + 1
        
        print(f"  ├── 批次{batch_num}/{total_batches} | 大小: {len(batch)} | 模型: {model}")
        
        try:
            resp = await client.embeddings.create(
                model=model, input=batch, encoding_format="float"
            )
            batch_embeddings = [dp.embedding for dp in resp.data]
            print(f"  └── 批次{batch_num}/{total_batches} 成功 | 生成向量: {len(batch_embeddings)}")
        except Exception as e:
            print(f"  └── 批次{batch_num}/{total_batches} 失败: {str(e)}")
            raise
        
        # 第一次调用时检测维度
        if _DETECTED_EMBEDDING_DIM is None and batch_embeddings:
            _DETECTED_EMBEDDING_DIM = len(batch_embeddings[0])
            print(f"【自动检测】嵌入模型维度: {_DETECTED_EMBEDDING_DIM}")
        
        all_vecs.extend(batch_embeddings)
        
        # 更新进度
        BUILD_PROGRESS["embedding_progress"]["done"] += len(batch)
        BUILD_PROGRESS["updated_at"] = time()
        
        _sse_publish(BUILD_PROGRESS["build_id"], {
            "stage": "embedding",
            "batch_progress": f"调用#{call_id} 批次{batch_num}/{total_batches}完成"
        })
    
    result = np.array(all_vecs)
    print(f"【嵌入调用#{call_id}完成】生成向量形状: {result.shape}")
    return result

def chunking_with_progress(tokens_list, doc_keys, tokenizer_wrapper, overlap_token_size=128, max_token_size=1024, **kwargs):
    """带进度统计的chunk分割函数"""
    print("=============== 开始文档分块处理")
    print(f"【说明】将文档分割成小块以便后续处理。目标大小: {max_token_size} tokens，重叠: {overlap_token_size} tokens")
    
    total_docs = len(tokens_list)
    total_tokens = sum(len(tokens) for tokens in tokens_list)
    
    print(f"输入文档数量: {total_docs} 个")
    print(f"总token数量: {total_tokens:,} tokens")
    
    if total_tokens > 0:
        estimated_chunks = max(1, total_tokens // max_token_size)
        print(f"预计生成chunk数量: ~{estimated_chunks:,} 个")
    
    # 更新进度
    BUILD_PROGRESS["stage"] = "chunk"
    BUILD_PROGRESS["current_step"] = "文档分块处理"
    BUILD_PROGRESS["updated_at"] = time()
    _sse_publish(BUILD_PROGRESS["build_id"], {
        "stage": "chunk",
        "message": f"正在处理 {total_docs} 个文档，总计 {total_tokens:,} tokens"
    })
    
    chunks = chunking_by_token_size(tokens_list, doc_keys, tokenizer_wrapper, 
                                  overlap_token_size=overlap_token_size, 
                                  max_token_size=max_token_size, **kwargs)
    
    actual_chunks = len(chunks)
    BUILD_PROGRESS["chunk_count"] = actual_chunks
    print(f"=============== 分块完成")
    print(f"实际生成chunk数量: {actual_chunks:,} 个")
    
    _sse_publish(BUILD_PROGRESS["build_id"], {
        "stage": "chunk",
        "message": f"分块完成，生成 {actual_chunks} 个chunk"
    })
    
    return chunks

async def extract_entities_with_progress(chunks, knwoledge_graph_inst, entity_vdb, tokenizer_wrapper, global_config, using_amazon_bedrock=False):
    """带精确进度统计的实体抽取函数 - 重写原函数来监控每个chunk的处理"""
    global _ENTITY_EXTRACTION_CALL_COUNTER
    
    _ENTITY_EXTRACTION_CALL_COUNTER += 1
    call_id = _ENTITY_EXTRACTION_CALL_COUNTER
    
    chunk_count = len(chunks) if chunks else 0
    
    print(f"=============== 开始实体抽取 #{call_id}")
    print(f"【说明】从文本chunk中识别和提取实体及其关系，构建知识图谱")
    print(f"待处理chunk数量: {chunk_count:,} 个")
    
    # 更新进度
    BUILD_PROGRESS.update({
        "stage": "extract",
        "current_step": f"实体抽取 #{call_id}",
        "current_chunk_index": 0,
        "extract_progress": {"total": chunk_count, "done": 0},  # 修正字段名
        "entity_progress": {"total": 0, "done": 0, "entities": 0, "relations": 0},  # 保留实体统计
        "llm_calls": {"total": 0, "done": 0},  # 实时调用计数
        "updated_at": time()
    })
    
    if chunk_count > 0:
        chunk_lengths = []
        for chunk_key, chunk_data in chunks.items():
            if hasattr(chunk_data, 'content'):
                chunk_lengths.append(len(chunk_data.content))
            elif isinstance(chunk_data, dict) and 'content' in chunk_data:
                chunk_lengths.append(len(chunk_data['content']))
        
        if chunk_lengths:
            avg_length = sum(chunk_lengths) / len(chunk_lengths)
            print(f"平均chunk长度: {avg_length:.0f} 字符")
            print(f"最长chunk: {max(chunk_lengths)} 字符，最短chunk: {min(chunk_lengths)} 字符")
    
    max_gleaning = global_config.get('entity_extract_max_gleaning', 1)
    if max_gleaning > 1:
        print(f"多轮抽取设置: 最多 {max_gleaning} 轮补充抽取")
    
    print(f"【实体抽取 #{call_id}】开始处理...")
    
    _sse_publish(BUILD_PROGRESS["build_id"], {
        "stage": "extract",
        "message": f"开始实体抽取: {chunk_count} 个chunk，实时监控进度"
    })
    
    # 重写extract_entities逻辑，添加精确的进度监控
    if chunk_count == 0:
        return None
        
    # 导入必要的模块和函数 (直接复制nano-graphrag的实现)
    import re
    import json
    from collections import Counter, defaultdict
    from nano_graphrag.prompt import GRAPH_FIELD_SEP, PROMPTS
    from nano_graphrag._utils import (
        clean_str, is_float_regex, split_string_by_multi_markers,
        pack_user_ass_to_openai_messages
    )
    
    # 获取配置
    use_llm_func = global_config["best_model_func"]
    entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"]
    ordered_chunks = list(chunks.items())
    
    # 实体抽取相关函数 (从nano-graphrag复制)
    async def _handle_single_entity_extraction(record_attributes, chunk_key):
        if len(record_attributes) < 4 or record_attributes[0] != '"entity"':
            return None
        entity_name = clean_str(record_attributes[1].upper())
        if not entity_name.strip():
            return None
        entity_type = clean_str(record_attributes[2].upper())
        entity_description = clean_str(record_attributes[3])
        entity_source_id = chunk_key
        return dict(
            entity_name=entity_name,
            entity_type=entity_type,
            description=entity_description,
            source_id=entity_source_id,
        )

    async def _handle_single_relationship_extraction(record_attributes, chunk_key):
        if len(record_attributes) < 5 or record_attributes[0] != '"relationship"':
            return None
        source = clean_str(record_attributes[1].upper())
        target = clean_str(record_attributes[2].upper())
        edge_description = clean_str(record_attributes[3])
        edge_source_id = chunk_key
        weight = (
            float(record_attributes[-1]) if is_float_regex(record_attributes[-1]) else 1.0
        )
        return dict(
            src_id=source,
            tgt_id=target,
            weight=weight,
            description=edge_description,
            source_id=edge_source_id,
        )
    
    # 进度追踪变量
    already_processed = 0
    already_entities = 0
    already_relations = 0
    
    # 单个chunk处理函数 (添加进度更新)
    async def _process_single_content_with_progress(chunk_key_dp):
        nonlocal already_processed, already_entities, already_relations
        chunk_key = chunk_key_dp[0]
        chunk_dp = chunk_key_dp[1]
        content = chunk_dp["content"]
        
        # 构建提示词
        entity_extract_prompt = PROMPTS["entity_extraction"]
        context_base = dict(
            tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
            record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
            completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
            entity_types=",".join(PROMPTS["DEFAULT_ENTITY_TYPES"]),
        )
        continue_prompt = PROMPTS["entiti_continue_extraction"]
        if_loop_prompt = PROMPTS["entiti_if_loop_extraction"]
        
        hint_prompt = entity_extract_prompt.format(**context_base, input_text=content)
        final_result = await use_llm_func(hint_prompt)
        
        if isinstance(final_result, list):
            final_result = final_result[0]["text"]

        history = pack_user_ass_to_openai_messages(hint_prompt, final_result, using_amazon_bedrock)
        
        # 多轮补充抽取
        for now_glean_index in range(entity_extract_max_gleaning):
            glean_result = await use_llm_func(continue_prompt, history_messages=history)
            history += pack_user_ass_to_openai_messages(continue_prompt, glean_result, using_amazon_bedrock)
            final_result += glean_result
            
            if now_glean_index == entity_extract_max_gleaning - 1:
                break

            if_loop_result = await use_llm_func(if_loop_prompt, history_messages=history)
            if_loop_result = if_loop_result.strip().strip('"').strip("'").lower()
            if if_loop_result != "yes":
                break

        # 解析结果
        records = split_string_by_multi_markers(
            final_result,
            [context_base["record_delimiter"], context_base["completion_delimiter"]],
        )

        maybe_nodes = defaultdict(list)
        maybe_edges = defaultdict(list)
        
        for record in records:
            record = re.search(r"\((.*)\)", record)
            if record is None:
                continue
            record = record.group(1)
            record_attributes = split_string_by_multi_markers(
                record, [context_base["tuple_delimiter"]]
            )
            
            if_entities = await _handle_single_entity_extraction(record_attributes, chunk_key)
            if if_entities is not None:
                maybe_nodes[if_entities["entity_name"]].append(if_entities)
                continue

            if_relation = await _handle_single_relationship_extraction(record_attributes, chunk_key)
            if if_relation is not None:
                maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(if_relation)
        
        # 更新进度
        already_processed += 1
        already_entities += len(maybe_nodes)
        already_relations += len(maybe_edges)
        
        # 实时更新进度到前端
        BUILD_PROGRESS["extract_progress"]["done"] = already_processed  # 修正字段名
        BUILD_PROGRESS["entity_progress"]["entities"] = already_entities  # 更新实体统计
        BUILD_PROGRESS["entity_progress"]["relations"] = already_relations  # 更新关系统计
        BUILD_PROGRESS["current_chunk_index"] = already_processed
        BUILD_PROGRESS["updated_at"] = time()
        
        progress_percent = int((already_processed / chunk_count) * 100)
        _sse_publish(BUILD_PROGRESS["build_id"], {
            "stage": "extract",
            "message": f"chunk {already_processed}/{chunk_count} ({progress_percent}%) | 实体: {already_entities}, 关系: {already_relations}"
        })
        
        print(f"✓ 已处理chunk {already_processed}/{chunk_count} ({progress_percent}%) | 实体: {already_entities}, 关系: {already_relations}")
        
        return dict(maybe_nodes), dict(maybe_edges)

    # 并行处理所有chunks
    print("正在并行处理所有chunk...")
    results = await asyncio.gather(*[_process_single_content_with_progress(c) for c in ordered_chunks])
    
    # 合并结果 (复制nano-graphrag逻辑)
    maybe_nodes = defaultdict(list)
    maybe_edges = defaultdict(list)
    for m_nodes, m_edges in results:
        for k, v in m_nodes.items():
            maybe_nodes[k].extend(v)
        for k, v in m_edges.items():
            maybe_edges[tuple(sorted(k))].extend(v)
    
    # 导入merge函数
    from nano_graphrag._op import _merge_nodes_then_upsert, _merge_edges_then_upsert
    
    # 合并并插入实体和关系
    _sse_publish(BUILD_PROGRESS["build_id"], {
        "stage": "extract",
        "message": f"正在合并和存储实体关系..."
    })
    
    all_entities_data = await asyncio.gather(*[
        _merge_nodes_then_upsert(k, v, knwoledge_graph_inst, global_config, tokenizer_wrapper)
        for k, v in maybe_nodes.items()
    ])
    
    await asyncio.gather(*[
        _merge_edges_then_upsert(k[0], k[1], v, knwoledge_graph_inst, global_config, tokenizer_wrapper)
        for k, v in maybe_edges.items()
    ])
    
    if not len(all_entities_data):
        print("警告: 未提取到任何实体，可能LLM配置有问题")
        return None
    
    # 更新向量数据库
    if entity_vdb is not None:
        from nano_graphrag._utils import compute_mdhash_id
        data_for_vdb = {
            compute_mdhash_id(dp["entity_name"], prefix="ent-"): {
                "content": dp["entity_name"] + dp["description"],
                "entity_name": dp["entity_name"],
            }
            for dp in all_entities_data
        }
        await entity_vdb.upsert(data_for_vdb)
    
    # 统计最终结果
    entity_count = len(all_entities_data)
    relation_count = len(maybe_edges)
    
    print(f"【实体抽取 #{call_id} 完成】")
    print(f"提取到实体数量: {entity_count:,} 个")
    print(f"提取到关系数量: {relation_count:,} 个")
    
    # 更新最终进度
    BUILD_PROGRESS.update({
        "extract_progress": {
            "total": chunk_count,
            "done": chunk_count
        },
        "entity_progress": {
            "total": chunk_count,
            "done": chunk_count,
            "entities": entity_count,
            "relations": relation_count
        },
        "current_chunk_index": chunk_count,
        "updated_at": time()
    })
    
    _sse_publish(BUILD_PROGRESS["build_id"], {
        "stage": "extract",
        "message": f"实体抽取完成: {entity_count} 个实体，{relation_count} 个关系"
    })
    
    return knwoledge_graph_inst

def create_adaptive_embedding_func():
    """创建自适应维度的embedding函数"""
    import asyncio
    
    async def detect_dim():
        return await _detect_embedding_dimension()
    
    try:
        detected_dim = asyncio.run(detect_dim())
    except:
        detected_dim = 1536
    
    @wrap_embedding_func_with_attrs(embedding_dim=detected_dim, max_token_size=8192)
    async def adaptive_qwen_embedding(texts: list[str]) -> np.ndarray:
        return await qwen_embedding(texts)
    
    return adaptive_qwen_embedding

# ================= Flask 应用 =================
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}})

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# GraphRAG实例
rag_instance = None

def _init_rag(working_dir: str):
    """初始化GraphRAG实例"""
    global rag_instance
    
    print("【准备】创建自适应维度的嵌入函数...")
    adaptive_embedding_func = create_adaptive_embedding_func()
    
    rag_instance = GraphRAG(
        working_dir=working_dir,
        chunk_func=chunking_with_progress,
        chunk_token_size=1000,
        chunk_overlap_token_size=120,
        entity_extract_max_gleaning=2,
        best_model_func=_create_dynamic_llm_func(),
        cheap_model_func=_create_cheap_llm_func(),  # 注意：现在与best_model使用相同配置
        best_model_max_async=2,
        cheap_model_max_async=2,
        graph_cluster_algorithm="labelprop",
        embedding_func=adaptive_embedding_func,
        embedding_batch_num=EMBEDDING_BATCH_SIZE,
        embedding_func_max_async=8,
        entity_extraction_func=extract_entities_with_progress,
        enable_naive_rag=True,
    )
    print(f"[GraphRAG] 实例初始化完成，工作目录: {working_dir}")

def _build_task(md_content: str, build_id: str):
    """后台构建任务"""
    try:
        # 创建工作目录
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        working_dir = f"./cache/build_{timestamp}"
        os.makedirs(working_dir, exist_ok=True)
        
        # 初始化GraphRAG
        _init_rag(working_dir)
        
        # 重置进度
        BUILD_PROGRESS.update({
            "active": True,
            "stage": "chunk",
            "build_id": build_id,
            "current_step": "开始构建",
            "chunk_count": 0,
            "current_chunk_index": 0,
            "extract_progress": {"total": 0, "done": 0},
            "embedding_progress": {"total": 0, "done": 0},
            "entity_progress": {"total": 0, "done": 0, "entities": 0, "relations": 0},
            "llm_calls": {"total": 0, "done": 0},  # LLM调用计数
            "updated_at": time(),
            "error_message": "",
        })
        
        _sse_publish(build_id, {"stage": "start", "message": "开始构建知识图谱"})
        
        # 执行构建
        print(f"[Build] 开始处理文档，长度: {len(md_content)} 字符")
        asyncio.run(rag_instance.insert([md_content]))
        
        # 构建完成
        BUILD_PROGRESS.update({
            "active": False,
            "stage": "done",
            "current_step": "构建完成",
            "updated_at": time(),
        })
        
        _sse_publish(build_id, {"stage": "done", "message": "知识图谱构建完成"})
        print(f"[Build] 构建完成，工作目录: {working_dir}")
        
    except Exception as e:
        error_msg = str(e)
        BUILD_PROGRESS.update({
            "active": False,
            "stage": "error", 
            "error_message": error_msg,
            "updated_at": time(),
        })
        _sse_publish(build_id, {"stage": "error", "error": error_msg})
        logger.error(f"构建失败: {error_msg}")

# ================= API 路由 =================

@app.route('/build', methods=['POST'])
def build():
    """构建知识图谱"""
    try:
        # 检查文件上传
        if 'file' not in request.files:
            return jsonify({"error": "未找到文件"}), 400
        
        file = request.files['file']
        if file.filename == '':
            return jsonify({"error": "未选择文件"}), 400
        
        if not file.filename.endswith('.md'):
            return jsonify({"error": "只支持Markdown文件"}), 400
        
        # 读取文件内容
        content = file.read().decode('utf-8')
        
        # 生成构建ID
        build_id = str(int(time() * 1000))
        
        # 创建SSE通道
        SSE_CHANNELS[build_id] = queue.Queue()
        
        # 启动后台构建任务
        thread = threading.Thread(target=_build_task, args=(content, build_id))
        thread.daemon = True
        thread.start()
        
        return jsonify({
            "message": "构建任务已启动",
            "build_id": build_id
        }), 202
        
    except Exception as e:
        return jsonify({"error": str(e)}), 500

@app.route('/build/stream')
def build_stream():
    """SSE流，获取构建进度"""
    build_id = request.args.get('build_id')
    if not build_id or build_id not in SSE_CHANNELS:
        return jsonify({"error": "无效的build_id"}), 400
    
    def event_stream():
        q = SSE_CHANNELS[build_id]
        try:
            # 发送初始状态
            yield f"data: {json.dumps({'init': True, 'progress': BUILD_PROGRESS})}\n\n"
            
            while True:
                try:
                    message = q.get(timeout=30)  # 30秒超时
                    yield f"data: {json.dumps(message)}\n\n"
                    
                    # 如果是结束消息，退出循环
                    if message.get('stage') in ['done', 'error']:
                        break
                        
                except queue.Empty:
                    # 发送保活消息
                    yield f"data: {json.dumps({'type': 'keepalive'})}\n\n"
                    
        except Exception as e:
            yield f"data: {json.dumps({'stage': 'error', 'error': str(e)})}\n\n"
        finally:
            # 清理通道
            SSE_CHANNELS.pop(build_id, None)
    
    return Response(event_stream(), mimetype='text/event-stream', headers={
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Access-Control-Allow-Origin': '*',
    })

@app.route('/build/status')
def build_status():
    """获取构建状态"""
    return jsonify(BUILD_PROGRESS)

@app.route('/query', methods=['POST'])
def query():
    """查询接口"""
    try:
        if not rag_instance:
            return jsonify({"error": "GraphRAG未初始化，请先构建知识图谱"}), 400
        
        data = request.get_json()
        question = data.get('question', '').strip()
        query_mode = data.get('query_mode', 'local')
        response_type = data.get('response_type', '要点式，尽量精炼')
        
        if not question:
            return jsonify({"error": "问题不能为空"}), 400
        
        # 执行查询
        result = asyncio.run(rag_instance.query(
            question,
            param=QueryParam(mode=query_mode, response_type=response_type)
        ))
        
        return jsonify({"answer": str(result)})
        
    except Exception as e:
        logger.error(f"查询失败: {e}")
        return jsonify({"error": str(e)}), 500

@app.route('/health')
def health():
    """健康检查"""
    return jsonify({"status": "ok", "timestamp": time()})

@app.route('/health/llm', methods=['POST'])
def health_llm():
    """测试LLM连接"""
    try:
        data = request.get_json() or {}
        api_key = data.get('api_key', '').strip()
        base_url = data.get('base_url', '').strip() 
        model = data.get('model', '').strip()
        
        if not api_key:
            return jsonify({"ok": False, "error": "缺少API Key"}), 400
        if not model:
            return jsonify({"ok": False, "error": "缺少模型名称"}), 400
            
        # 标准化base_url
        if not base_url:
            base_url = DASHSCOPE_BASE_URL
        else:
            base_url = base_url.rstrip('/')
            if 'compatible-mode/v1' not in base_url and not base_url.endswith('/v1'):
                base_url = base_url + '/v1'
        
        # 创建临时客户端测试连接
        client = AsyncOpenAI(api_key=api_key, base_url=base_url)
        
        # 发送测试请求
        asyncio.run(client.chat.completions.create(
            model=model,
            messages=[{"role": "user", "content": "ping"}],
            max_tokens=1
        ))
        
        return jsonify({"ok": True})
        
    except Exception as e:
        error_msg = str(e)
        logger.error(f"LLM健康检查失败: {error_msg}")
        return jsonify({"ok": False, "error": error_msg}), 500

@app.route('/health/embedding', methods=['POST'])
def health_embedding():
    """测试嵌入模型连接"""
    try:
        data = request.get_json() or {}
        api_key = data.get('api_key', '').strip()
        base_url = data.get('base_url', '').strip()
        model = data.get('model', '').strip()
        
        if not api_key:
            return jsonify({"ok": False, "error": "缺少API Key"}), 400
        if not model:
            return jsonify({"ok": False, "error": "缺少模型名称"}), 400
            
        # 标准化base_url
        if not base_url:
            base_url = DASHSCOPE_BASE_URL
        else:
            base_url = base_url.rstrip('/')
            if 'compatible-mode/v1' not in base_url and not base_url.endswith('/v1'):
                base_url = base_url + '/v1'
        
        # 创建临时客户端测试连接
        client = AsyncOpenAI(api_key=api_key, base_url=base_url)
        
        # 发送测试请求
        asyncio.run(client.embeddings.create(
            model=model,
            input=["test"]
        ))
        
        return jsonify({"ok": True})
        
    except Exception as e:
        error_msg = str(e)
        logger.error(f"嵌入模型健康检查失败: {error_msg}")
        return jsonify({"ok": False, "error": error_msg}), 500

@app.route('/config', methods=['GET'])
def get_config():
    """获取当前后端配置"""
    return jsonify({
        "current_config": RUNTIME_CONFIG
    })

@app.route('/config', methods=['POST'])
def update_config():
    """更新后端配置"""
    global RUNTIME_CONFIG, _PRINTED_EMBED_HEADER, _DETECTED_EMBEDDING_DIM
    try:
        data = request.get_json() or {}
        
        # 更新全局配置
        config_updates = {}
        
        # 允许更新的配置项
        allowed_keys = {
            'llm_model', 'llm_api_key', 'llm_base_url',
            'embedding_model', 'embedding_api_key', 'embedding_base_url',
            'embedding_batch_size'
        }
        
        for key, value in data.items():
            if key in allowed_keys and value is not None:
                if key == 'embedding_batch_size':
                    try:
                        new_value = int(value)
                        RUNTIME_CONFIG[key] = new_value
                        config_updates[key] = new_value
                    except (ValueError, TypeError):
                        continue
                else:
                    new_value = str(value).strip()
                    RUNTIME_CONFIG[key] = new_value
                    config_updates[key] = new_value
        
        # 如果模型配置发生变化，重置相关状态
        if any(key.startswith('embedding') for key in config_updates.keys()):
            _PRINTED_EMBED_HEADER = False
            _DETECTED_EMBEDDING_DIM = None
            logger.info("嵌入模型配置已变更，重置相关状态")
        
        if any(key.startswith('llm') for key in config_updates.keys()):
            logger.info("LLM模型配置已变更")
        
        logger.info(f"配置已更新: {config_updates}")
        logger.info(f"当前运行时配置: {RUNTIME_CONFIG}")
        
        return jsonify({
            "message": "配置更新成功",
            "updated": config_updates,
            "current_config": RUNTIME_CONFIG
        })
        
    except Exception as e:
        logger.error(f"更新配置失败: {e}")
        return jsonify({"error": str(e)}), 500

if __name__ == '__main__':
    # 创建缓存目录
    os.makedirs('./cache', exist_ok=True)
    
    print("启动GraphRAG后端服务...")
    app.run(host='0.0.0.0', port=5001, debug=True)

