import os
import argparse
from datetime import datetime
import numpy as np
from functools import partial
import logging

from openai import AsyncOpenAI

from nano_graphrag import GraphRAG, QueryParam
from nano_graphrag._op import chunking_by_seperators, chunking_by_token_size, extract_entities
from nano_graphrag.prompt import PROMPTS
from nano_graphrag._utils import compute_args_hash, wrap_embedding_func_with_attrs
from nano_graphrag.base import BaseKVStorage


# ============ 反应器领域实体类型（可按需扩充） ============
# 说明：提示词使用小写，底层存储会统一为大写。
PROMPTS["DEFAULT_ENTITY_TYPES"] = [
    # 反应器类别与配置
    "reactor",              # 反应器本体（通用）
    "batch_reactor",        # 间歇反应器（釜式）
    "cstr",                 # 连续搅拌釜式反应器
    "pfr",                  # 活塞流反应器（管式）
    "loop_reactor",         # 环路反应器
    "fluidized_bed",        # 流化床反应器
    "trickle_bed",          # 滴流床反应器
    "fixed_bed",            # 固定床反应器
    "slurry_bed",           # 浆态床反应器
    "autoclave",            # 高压釜
    "microreactor",         # 微反应器
    "photoreactor",         # 光反应器
    "electrochemical_reactor",  # 电化学反应器

    # 工艺单元与设备
    "agitator",             # 搅拌器
    "baffle",               # 挡板
    "heat_exchanger",       # 换热器/夹套/内盘管
    "cooling_system",       # 冷却系统
    "heating_system",       # 加热系统
    "separator",            # 分离设备
    "compressor",           # 压缩机
    "pump",                 # 泵
    "valve",                # 阀门

    # 反应/催化/物流
    "reaction",             # 反应类型/网络
    "kinetics",             # 动力学/速率表达式
    "catalyst",             # 催化剂
    "feedstock",            # 原料/进料
    "product",              # 产物
    "byproduct",            # 副产物
    "solvent",              # 溶剂
    "inhibitor",            # 抑制剂
    "promoter",             # 促进剂

    # 运行参数与物性
    "temperature",          # 温度
    "pressure",             # 压力
    "flow_rate",            # 流量/空速
    "residence_time",       # 停留时间
    "conversion",           # 转化率
    "selectivity",          # 选择性
    "yield",                # 收率
    "viscosity",            # 粘度
    "density",              # 密度
    "heat_capacity",        # 比热
    "heat_of_reaction",     # 反应热
    "mass_transfer",        # 传质系数/受限
    "heat_transfer",        # 传热系数/受限

    # 控制与监测
    "sensor",               # 传感器
    "controller",           # 控制器（PID、MPC 等）
    "control_strategy",     # 控制策略
    "safety_instrumented_function",  # 安全仪表功能

    # 安全与法规
    "hazard",               # 危险源（超温、超压、跑料、爆聚等）
    "relief_system",        # 泄放系统（安全阀/爆破片）
    "interlock",            # 连锁
    "regulation",           # 法规/标准（如 API/ASME/IEC）

    # 组织与人员
    "plant",                # 工厂/装置
    "organization",         # 组织
    "person"                # 人员
]


# ============ DashScope(OpenAI 兼容) 客户端 ============
DASHSCOPE_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
BEST_MODEL = os.getenv("DASHSCOPE_BEST_MODEL", "qwen3-max-2025-09-23")
CHEAP_MODEL = os.getenv("DASHSCOPE_CHEAP_MODEL", "qwen-plus-2025-04-28")
EMBED_MODEL = os.getenv("DASHSCOPE_EMBEDDING_MODEL", "text-embedding-v4")  # 请按模型列表调整

# ============ Embedding 配置 ============
# GraphRAG 一次传递给 embedding 函数的文本数量（可直接修改）
EMBEDDING_BATCH_SIZE = 25
# DashScope API 单次嵌入向量请求的最大文本条数限制
DASHSCOPE_EMBED_API_BATCH_LIMIT = int(os.getenv("DASHSCOPE_EMBED_API_BATCH_LIMIT", "10"))


def _qwen_client():
    return AsyncOpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),
        base_url=DASHSCOPE_BASE_URL,
    )


async def qwen_complete(prompt, system_prompt=None, history_messages=[], **kwargs) -> str:
    """Qwen 对话补全。支持缓存，剔除不兼容字段，并打印本次调用的输入与输出。"""
    hashing_kv: BaseKVStorage = kwargs.pop("hashing_kv", None)
    # DashScope 可能不支持 response_format
    kwargs.pop("response_format", None)
    model = kwargs.pop("model", BEST_MODEL)

    messages = []
    if system_prompt:
        messages.append({"role": "system", "content": system_prompt})
    messages.extend(history_messages or [])
    messages.append({"role": "user", "content": prompt})

    if hashing_kv is not None:
        args_hash = compute_args_hash(model, messages)
        cached = await hashing_kv.get_by_id(args_hash)
        if cached is not None:
            # 命中缓存时不再调用模型
            return cached["return"]

    client = _qwen_client()
    resp = await client.chat.completions.create(model=model, messages=messages, **kwargs)
    text = resp.choices[0].message.content

    if hashing_kv is not None:
        await hashing_kv.upsert({args_hash: {"return": text, "model": model}})
        await hashing_kv.index_done_callback()
    return text


def chunking_with_progress(tokens_list, doc_keys, tokenizer_wrapper, overlap_token_size=128, max_token_size=1024, **kwargs):
    """带进度统计的chunk分割函数，包装原始的chunking_by_token_size函数"""
    print("=============== 开始文档分块处理")
    print(f"【说明】将文档分割成小块以便后续处理。目标大小: {max_token_size} tokens，重叠: {overlap_token_size} tokens")
    
    # 计算文档统计
    total_docs = len(tokens_list)
    total_tokens = sum(len(tokens) for tokens in tokens_list)
    
    print(f"输入文档数量: {total_docs} 个")
    print(f"总token数量: {total_tokens:,} tokens")
    if total_tokens > 0:
        estimated_chunks = max(1, total_tokens // max_token_size)
        print(f"预计生成chunk数量: ~{estimated_chunks:,} 个")
    
    # 调用原始的chunk分割函数
    chunks = chunking_by_token_size(tokens_list, doc_keys, tokenizer_wrapper, 
                                  overlap_token_size=overlap_token_size, 
                                  max_token_size=max_token_size, **kwargs)
    
    # 显示实际结果
    actual_chunks = len(chunks)
    print(f"=============== 分块完成")
    print(f"实际生成chunk数量: {actual_chunks:,} 个")
    
    if actual_chunks > 0:
        # 转换chunks回文本来计算长度统计
        chunk_lengths = []
        for chunk in chunks:
            try:
                # chunks是字典，尝试获取tokens或content字段
                if isinstance(chunk, dict):
                    if 'tokens' in chunk and hasattr(tokenizer_wrapper, 'decode'):
                        text = tokenizer_wrapper.decode(chunk['tokens'])
                        chunk_lengths.append(len(text))
                    elif 'content' in chunk:
                        chunk_lengths.append(len(chunk['content']))
                elif hasattr(tokenizer_wrapper, 'decode'):
                    # 如果是token列表
                    text = tokenizer_wrapper.decode(chunk)
                    chunk_lengths.append(len(text))
            except Exception as e:
                # 如果转换失败，跳过这个chunk的长度统计
                continue
        
        if chunk_lengths:
            avg_length = sum(chunk_lengths) / len(chunk_lengths)
            print(f"平均chunk长度: {avg_length:.0f} 字符")
            print(f"最小chunk长度: {min(chunk_lengths)} 字符")  
            print(f"最大chunk长度: {max(chunk_lengths)} 字符")
        else:
            print("无法计算chunk长度统计")
    
    return chunks


_PRINTED_EMBED_HEADER = False
_DETECTED_EMBEDDING_DIM = None
_EMBEDDING_CALL_COUNTER = 0

async def _detect_embedding_dimension():
    """预先检测嵌入模型的维度"""
    global _DETECTED_EMBEDDING_DIM
    if _DETECTED_EMBEDDING_DIM is None:
        print("【初始化】检测嵌入模型维度...")
        try:
            client = _qwen_client()
            # 使用一个简短的测试文本
            resp = await client.embeddings.create(
                model=EMBED_MODEL, 
                input=["test"], 
                encoding_format="float"
            )
            _DETECTED_EMBEDDING_DIM = len(resp.data[0].embedding)
            print(f"【检测完成】嵌入模型 '{EMBED_MODEL}' 维度: {_DETECTED_EMBEDDING_DIM}")
        except Exception as e:
            print(f"【检测失败】无法检测嵌入模型维度: {e}")
            print("【回退】使用默认维度: 1536")
            _DETECTED_EMBEDDING_DIM = 1536
    return _DETECTED_EMBEDDING_DIM

async def qwen_embedding(texts: list[str]) -> np.ndarray:
    """Qwen 向量接口，返回 (N, D) numpy 数组。DashScope 单次 API 调用文本条数受限。自适应嵌入维度。"""
    global _PRINTED_EMBED_HEADER, _DETECTED_EMBEDDING_DIM, _EMBEDDING_CALL_COUNTER
    
    # 为每次调用分配唯一ID
    _EMBEDDING_CALL_COUNTER += 1
    call_id = _EMBEDDING_CALL_COUNTER
    
    if not _PRINTED_EMBED_HEADER:
        print(f"【说明】将调用嵌入模型：把文本转为向量用于检索/索引/查询匹配。本步骤可能批量循环调用（每批≤{DASHSCOPE_EMBED_API_BATCH_LIMIT}）。")
        _PRINTED_EMBED_HEADER = True
    
    client = _qwen_client()
    all_vecs = []
    total = len(texts)
    batch_size = DASHSCOPE_EMBED_API_BATCH_LIMIT
    total_batches = (total + batch_size - 1)//batch_size
    
    # 显示本次调用的总体信息
    if total_batches > 1:
        print(f"【嵌入调用#{call_id}】处理 {total} 个文本，分 {total_batches} 批")
    
    for i in range(0, total, batch_size):
        batch = texts[i:i+batch_size]
        batch_num = i//batch_size + 1
        
        if total_batches > 1:
            print(f"  └── 调用#{call_id} 批次{batch_num}/{total_batches} | 本批大小: {len(batch)}")
        else:
            print(f"【嵌入调用#{call_id}】处理 {len(batch)} 个文本")
        
        resp = await client.embeddings.create(
            model=EMBED_MODEL, input=batch, encoding_format="float"
        )
        batch_embeddings = [dp.embedding for dp in resp.data]
        
        # 第一次调用时检测维度
        if _DETECTED_EMBEDDING_DIM is None and batch_embeddings:
            _DETECTED_EMBEDDING_DIM = len(batch_embeddings[0])
            print(f"【自动检测】嵌入模型维度: {_DETECTED_EMBEDDING_DIM}")
            # 动态设置函数属性
            qwen_embedding.embedding_dim = _DETECTED_EMBEDDING_DIM
            qwen_embedding.max_token_size = 8192
        
        all_vecs.extend(batch_embeddings)
    
    result = np.array(all_vecs)
    print(f"【嵌入调用#{call_id}完成】生成向量形状: {result.shape}")
    return result

# ============ 实体抽取进度统计 ============
_ENTITY_EXTRACTION_CALL_COUNTER = 0

async def extract_entities_with_progress(chunks, knwoledge_graph_inst, entity_vdb, tokenizer_wrapper, global_config, using_amazon_bedrock=False):
    """带进度统计的实体抽取函数，包装原始的extract_entities函数"""
    global _ENTITY_EXTRACTION_CALL_COUNTER
    
    _ENTITY_EXTRACTION_CALL_COUNTER += 1
    call_id = _ENTITY_EXTRACTION_CALL_COUNTER
    
    # 统计chunk信息
    chunk_count = len(chunks) if chunks else 0
    
    print(f"=============== 开始实体抽取 #{call_id}")
    print(f"【说明】从文本chunk中识别和提取实体及其关系，构建知识图谱")
    print(f"待处理chunk数量: {chunk_count:,} 个")
    
    if chunk_count > 0:
        # 显示chunk内容统计（不显示具体内容）
        chunk_lengths = []
        for chunk_key, chunk_data in chunks.items():
            if hasattr(chunk_data, 'content'):
                chunk_lengths.append(len(chunk_data.content))
            elif isinstance(chunk_data, dict) and 'content' in chunk_data:
                chunk_lengths.append(len(chunk_data['content']))
        
        if chunk_lengths:
            avg_length = sum(chunk_lengths) / len(chunk_lengths)
            print(f"平均chunk长度: {avg_length:.0f} 字符")
            print(f"最长chunk: {max(chunk_lengths)} 字符，最短chunk: {min(chunk_lengths)} 字符")
    
    # 获取配置信息
    max_gleaning = global_config.get('entity_extract_max_gleaning', 1)
    if max_gleaning > 1:
        print(f"多轮抽取设置: 最多 {max_gleaning} 轮补充抽取")
    
    print(f"【实体抽取 #{call_id}】开始处理...")
    
    # 调用原始的实体抽取函数
    result = await extract_entities(
        chunks=chunks,
        knwoledge_graph_inst=knwoledge_graph_inst,
        entity_vdb=entity_vdb,
        tokenizer_wrapper=tokenizer_wrapper,
        global_config=global_config,
        using_amazon_bedrock=using_amazon_bedrock
    )
    
    # 统计抽取结果
    if result is not None:
        try:
            # 尝试获取实体和关系数量
            nodes = result.nodes() if hasattr(result, 'nodes') else []
            edges = result.edges() if hasattr(result, 'edges') else []
            entity_count = len(list(nodes)) if nodes else 0
            relation_count = len(list(edges)) if edges else 0
            
            print(f"【实体抽取 #{call_id} 完成】")
            print(f"提取到实体数量: {entity_count:,} 个")
            print(f"提取到关系数量: {relation_count:,} 个")
        except Exception as e:
            print(f"【实体抽取 #{call_id} 完成】无法统计详细结果")
    else:
        print(f"【实体抽取 #{call_id} 完成】未产生结果")
    
    return result

def create_adaptive_embedding_func():
    """创建自适应维度的embedding函数（同步版本）"""
    import asyncio
    
    # 同步检测维度
    async def detect_dim():
        return await _detect_embedding_dimension()
    
    try:
        # 尝试在现有事件循环中运行
        loop = asyncio.get_event_loop()
        if loop.is_running():
            # 如果循环已在运行，创建一个新任务
            import concurrent.futures
            with concurrent.futures.ThreadPoolExecutor() as executor:
                future = executor.submit(asyncio.run, detect_dim())
                detected_dim = future.result()
        else:
            detected_dim = loop.run_until_complete(detect_dim())
    except:
        # 如果没有事件循环，创建新的
        detected_dim = asyncio.run(detect_dim())
    
    # 创建带正确维度属性的embedding函数
    @wrap_embedding_func_with_attrs(embedding_dim=detected_dim, max_token_size=8192)
    async def adaptive_qwen_embedding(texts: list[str]) -> np.ndarray:
        return await qwen_embedding(texts)
    
    return adaptive_qwen_embedding


if __name__ == "__main__":
    # 降噪：屏蔽 httpx/httpcore 的 INFO 日志
    logging.getLogger("httpx").setLevel(logging.WARNING)
    logging.getLogger("httpcore").setLevel(logging.WARNING)
    logging.getLogger("openai").setLevel(logging.WARNING)
    parser = argparse.ArgumentParser(description="Qwen GraphRAG for Reactor domain: prep (build) or qa (ask)")
    parser.add_argument("--mode", choices=["prep", "qa"], default="qa", help="prep: 仅构建图谱与索引；qa: 仅问答（复用缓存）")
    parser.add_argument("--md", type=str, default=None, help="Markdown 文档路径（prep 模式可指定）")
    parser.add_argument("--question", type=str, default=None, help="qa 模式问题文本；未指定时使用示例问题")
    parser.add_argument("--query-mode", choices=["local", "global", "naive"], default="local", help="问答检索模式")
    parser.add_argument("--response-type", type=str, default="要点式，尽量精炼", help="生成目标长度/格式提示")
    parser.add_argument("--only-context", action="store_true", help="仅返回检索上下文，不生成答案")
    parser.add_argument("--workdir", type=str, default=None, help="工作目录（缓存路径）。qa 默认 ./chem_reactor_cache；prep 默认基于文件名+时间戳自动生成")
    args = parser.parse_args()

    # ============ 选择工作目录（prep 自动新目录；qa 默认或自定） ============
    chosen_workdir = None
    md_path_for_prep = None
    if args.mode == "prep":
        md_path_for_prep = args.md
        if md_path_for_prep is None:
            md_candidates = [
                os.path.join(os.path.dirname(__file__), "..", "反应器选型及设计说明书_MinerU__20250921073803.md"),
                os.path.join(os.getcwd(), "反应器选型及设计说明书_MinerU__20250921073803.md"),
            ]
            md_path_for_prep = next((p for p in md_candidates if os.path.exists(p)), None)
        if md_path_for_prep is None or not os.path.exists(md_path_for_prep):
            raise FileNotFoundError("未找到 Markdown 文件：请通过 --md 指定或将文件放在项目根目录")

        if args.workdir:
            chosen_workdir = args.workdir
        else:
            stem = os.path.splitext(os.path.basename(md_path_for_prep))[0]
            ts = datetime.now().strftime("%Y%m%d-%H%M%S")
            chosen_workdir = os.path.join(".", f"chem_reactor_cache__{stem}_{ts}")
    else:
        chosen_workdir = args.workdir or "./chem_reactor_cache"

    # ============ 构建自适应嵌入函数 ============
    print("【准备】创建自适应维度的嵌入函数...")
    adaptive_embedding_func = create_adaptive_embedding_func()

    # ============ 构建 GraphRAG ============
    rag = GraphRAG(
        working_dir=chosen_workdir,
        # 中文/技术文友好的分隔符拆分（支持中英标点与段落）
        chunk_func=chunking_with_progress,
        chunk_token_size=1000,
        chunk_overlap_token_size=120,

        # 抽取时多轮补采，提升召回
        entity_extract_max_gleaning=2,

        # 接入 Qwen：分别指定"好模型/便宜模型"
        best_model_func=partial(qwen_complete, model=BEST_MODEL),
        cheap_model_func=partial(qwen_complete, model=CHEAP_MODEL),
        # 限流：降低并发，避免 429
        best_model_max_async=2,
        cheap_model_max_async=2,

        # 使用不依赖 graspologic 的聚类算法，避免 gensim/numpy ABI 冲突
        graph_cluster_algorithm="labelprop",

        # 文本向量（使用自适应维度的函数）
        embedding_func=adaptive_embedding_func,
        embedding_batch_num=EMBEDDING_BATCH_SIZE,
        embedding_func_max_async=8,

        # 实体抽取（使用带进度统计的函数）
        entity_extraction_func=extract_entities_with_progress,

        # 若需要对比朴素向量 RAG
        enable_naive_rag=True,
    )

    if args.mode == "prep":
        # ============ 读取 Markdown 文档并插入 ============
        print(f"[Prep] 读取文档: {os.path.abspath(md_path_for_prep)}")
        print(f"[Prep] 工作目录: {os.path.abspath(rag.working_dir)}")
        with open(md_path_for_prep, "r", encoding="utf-8") as f:
            md_content = f.read()
        rag.insert([md_content])
        print("[Prep] 已完成构建，工作目录：", os.path.abspath(rag.working_dir))
        raise SystemExit(0)

    # ============ 问答（复用缓存） ============
    if args.mode == "qa":
        print(f"[QA] 工作目录: {os.path.abspath(rag.working_dir)} | 检索模式: {args.query_mode}")
        if args.question:
            print(rag.query(
                args.question,
                param=QueryParam(mode=args.query_mode, response_type=args.response_type, only_need_context=args.only_context)
            ))
        else:
            # 演示：若未提供问题，则运行示例问答
            print(rag.query(
                "这些文本涉及哪些反应器类型、关键设备与运行参数？",
                param=QueryParam(mode="local", response_type=args.response_type)
            ))
            print(rag.query(
                "概括主要反应器配置、热量管理策略与潜在安全风险",
                param=QueryParam(mode="global", response_type="50-100字要点")
            ))
            print(rag.query(
                "示例文本中涉及哪些常见的控制策略或引发剂？",
                param=QueryParam(mode="naive", response_type="简答")
            ))


