from llama_index.core.graph_stores import SimpleGraphStore
from llama_index.core.tools import FunctionTool
from model_config import dashscope_embed_model
import os, traceback
from utils.error_handler import ErrorHandler
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, \
    Settings, load_index_from_storage

from llama_index.core.node_parser import SemanticSplitterNodeParser

Settings.embed_model = dashscope_embed_model()
# 语义分块配置
Settings.node_parser = SemanticSplitterNodeParser(
    buffer_size=128, # 分块时保留128 tokens的重叠缓冲区，提升上下文连贯性
    breakpoint_percentile_threshold=95, #通过百分位阈值（95%）自动确定最佳分割点
    embed_model = dashscope_embed_model()
)

BATCH_SIZE = 10  # 添加批处理大小常量

def get_storage_context():
    """ 获得存储上下文"""
    try:
        storage_args = {
            # "vector_store": get_chroma_vector_store(),
            "graph_store": SimpleGraphStore()
        }
        #如果已经有了持久化目录，则从持久化目录加载存储上下文;否则创建一个新的存储上下文（暂未持久化）
        if os.path.exists(os.getenv("PERSIST_DIR", "./storage")):
            storage_args["persist_dir"] = os.getenv("PERSIST_DIR", "./storage")
        storage_context = StorageContext.from_defaults(**storage_args)
        return storage_context
    except Exception as e:
        ErrorHandler().handle_error(f"存储上下文获取失败", e)
        traceback.print_exc()
        return None

def load_index(index_id: str):
    """从存储上下文中加载某个索引"""
    try:
        storage_context = get_storage_context()
        index = load_index_from_storage(index_id=index_id, storage_context=storage_context)
        return index
    except ValueError as e:
        # 该索引不存在，则返回None
        return None
    except Exception as e:
        ErrorHandler().handle_error(f"索引加载失败", e)
        traceback.print_exc()
        return None

def query_tool(index_id: str, description: str):
    """把索引封装成查询工具给agent使用"""
    index = load_index(index_id)
    if index:
        def query_fn(question):
            engine = index.as_query_engine()
            return engine.query(question)
        return FunctionTool.from_defaults(
            name=index_id,
            fn=query_fn,
            description=description
        )
    return None

def build_index(index_id: str, file_path: str):
    """从临时目录中加载文档，解析后生成索引或者更新索引"""
    try:
        docs = SimpleDirectoryReader(input_files=[file_path]).load_data()
        print(f"已经读取的文档数：{len(docs)}")
        # 语义分块处理
        nodes = Settings.node_parser.get_nodes_from_documents(docs)
        print(f"语义分块后的节点数：{len(nodes)}")

        for node in nodes:
            node.metadata["file_path"] = file_path

        for i in range(0, len(nodes), BATCH_SIZE):
            batch_nodes = nodes[i:i + BATCH_SIZE]
            # 这里判断: 如果已经存在索引，则向索引中插入文件，否则从头创建索引
            index = load_index(index_id)
            if index:
                index.insert_nodes(batch_nodes)
            else:
                index = VectorStoreIndex(nodes=batch_nodes,
                                         storage_context=get_storage_context())
                index.set_index_id(index_id)
            # 将storage context持久化（嵌入也会被持久化）
            index.storage_context.persist(persist_dir=os.getenv("PERSIST_DIR", "./storage"))
    except Exception as e:
        ErrorHandler().handle_error(f"索引构建失败", e)
        traceback.print_exc()


def delete_index(index_id: str, file_path: str):
    try:
        index = load_index(index_id)
        # 获取所有节点
        node_ids = index.index_struct.nodes_dict.keys()
        nodes = [
            index.storage_context.docstore.get_node(node_id)
            for node_id in node_ids
        ]
        for node in nodes:
            if node.metadata["file_path"] == file_path:
                print(node.metadata)
                doc_id = node.ref_doc_id
                index.storage_context.vector_store.delete(doc_id)
                index.delete_ref_doc(doc_id, delete_from_docstore=True)
        index.storage_context.persist(persist_dir=os.getenv("PERSIST_DIR", "./storage"))
    except Exception as e:
        ErrorHandler().handle_error(f"删除索引或文档失败", e)
        traceback.print_exc()


