import logging
import os
import sys
import uuid
from typing import Dict

from llama_index.core import StorageContext, SimpleDirectoryReader, VectorStoreIndex, Settings
from llama_index.core.callbacks import LlamaDebugHandler, CallbackManager
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai.utils import ALL_AVAILABLE_MODELS, CHAT_MODELS
from llama_index.vector_stores.milvus import MilvusVectorStore

logger_level=logging.DEBUG
logger_level=logging.ERROR
logging.basicConfig(stream=sys.stdout, level=logger_level,
                    format='%(asctime)s %(name)s [%(pathname)s line:%(lineno)d] %(levelname)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

llama_debug = LlamaDebugHandler(print_trace_on_end=True)
from langfuse.llama_index import LlamaIndexCallbackHandler

langfuse_callback_handler = LlamaIndexCallbackHandler(
    trace_name='demo',
    user_id='sks',
    session_id=str(uuid.uuid1()),
    public_key=os.getenv('LANGFUSE_PUBLIC_KEY'),
    secret_key=os.getenv('LANGFUSE_SECRET_KEY'),
    host="https://us.cloud.langfuse.com"
)

Settings.callback_manager = CallbackManager([langfuse_callback_handler,llama_debug])
# Settings.callback_manager = CallbackManager([langfuse_callback_handler])

# 定义模型及其对应的上下⽂⼤⼩
LLM_MODELS_CONF: Dict[str, int] = {
    "ishumilin/deepseek-r1-coder-tools:14b": 64000,  #设置上下⽂⼤⼩为64000
    "qwen2.5-coder:7b": 131072,
    "qwen-max-latest": 64000,
}
# 更新所有可⽤模型的字典，添加DeepSeeK模型
ALL_AVAILABLE_MODELS.update(LLM_MODELS_CONF)
# 更新聊天模型的字典，添加DeepSeeK模型
CHAT_MODELS.update(LLM_MODELS_CONF)

# 设置密集向量嵌入模型,模型维度要跟milvus建表时候的向量维度保持一致
embedding = HuggingFaceEmbedding(
    # model_name="BAAI/bge-small-en-v1.5" #384维度
    model_name="BAAI/bge-base-en-v1.5"  # 768维度
)
Settings.embed_model = embedding

from llama_index.llms.openai import OpenAI

llm_url = "http://localhost:11434/v1"
llm_model = "qwen2.5-coder:7b"

llm_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
llm_model = "qwen-max-latest"
# 阿里云 API-KEY, 需要设置环境变量ALIYUN-API-KEY
# 因为调用本机的ollama模型,资源消耗太大,容易卡,还会导致 milvus 数据库宕机
llm_api_key = os.environ['ALIYUN-API-KEY'] or 'aa'

Settings.llm = OpenAI(
    model=llm_model,
    api_base=llm_url,
    api_key=llm_api_key,
    temperature=0.7)


def init_vector_store(collection_name, milvus_url, milvus_token, dim):
    #官方用的默认稀疏向量检索函数
    from llama_index.vector_stores.milvus.utils import get_default_sparse_embedding_function
    sparse_embedding_function=get_default_sparse_embedding_function()
    vector_store = MilvusVectorStore(
        dim=dim,
        uri=milvus_url,
        token=milvus_token,
        collection_name=collection_name,
        overwrite=True,
        hybrid_ranker="RRFRanker",
        hybrid_ranker_params={"k": 60},
        enable_sparse=True,
        sparse_embedding_function=sparse_embedding_function
    )
    storage_context = StorageContext.from_defaults(vector_store=vector_store)
    return storage_context

def add_file_to_index(input_files, storage_context):
    docs = SimpleDirectoryReader(input_files=input_files).load_data()
    index = VectorStoreIndex.from_documents(docs, storage_context=storage_context)
    index.storage_context.persist()

def delete_file_from_index(file_path, storage_context):
    # 获取索引
    index = VectorStoreIndex.from_vector_store(storage_context.vector_store)
    # 获取文档ID
    doc_ids = [doc.doc_id for doc in index.docstore.docs.values() if doc.metadata.get('file_path') == file_path]
    # 删除文档
    for doc_id in doc_ids:
        index.delete(doc_id)
    # 持久化存储
    index.storage_context.persist()

def update_file_in_index(file_path, storage_context):
    # 更新文件的逻辑需要根据实际情况实现，这里假设有一个方法可以更新特定文件
    # 这里只是一个占位符
    delete_file_from_index(file_path, storage_context)
    add_file_to_index([file_path], storage_context)

def delete_folder_from_index(folder_path, storage_context):
    for root, dirs, files in os.walk(folder_path, topdown=False):
        for name in files:
            file_path = os.path.join(root, name)
            delete_file_from_index(file_path, storage_context)
        for name in dirs:
            dir_path = os.path.join(root, name)
            delete_folder_from_index(dir_path, storage_context)

def add_folder_to_index(folder_path, storage_context):
    for root, dirs, files in os.walk(folder_path):
        for name in files:
            file_path = os.path.join(root, name)
            add_file_to_index([file_path], storage_context)

def query_index(query_text, storage_context):
    # 获取索引
    index = VectorStoreIndex.from_vector_store(storage_context.vector_store)
    # 创建查询引擎
    query_engine = index.as_query_engine(vector_store_query_mode="hybrid")
    # 执行查询
    response = query_engine.query(query_text)
    return response

if __name__ == '__main__':
    collection_name = 'knowledge_base'
    milvus_url = "http://localhost:19530"
    milvus_token = 'root:Milvus'
    dim = 768
    input_dir = 'Z:/pywork/python-demo/knowledge_files'
    input_files = [input_dir + '/knowledge.txt', input_dir + '/paul_graham_essay.txt', ]

    storage_context = init_vector_store(collection_name, milvus_url, milvus_token, dim)

    add_file_to_index(input_files, storage_context)

    print(f'{"-" * 30} 开始查询 {"-" * 30} ')
    query = '"你都学到了什么?"'
    query = '广州大学'
    res=query_index(query, storage_context)

    print(f'{"-" * 30} 答案 {"-" * 30} ')
    print(res)


    print(f'{"-" * 30} aaa1 {"-" * 30} ')
    # 获取索引
    index = VectorStoreIndex.from_vector_store(storage_context.vector_store)
    print(f'{"-" * 30} aaa2 {"-" * 30} ')
    # 删除文档
    for doc in index.docstore.docs.values():
        print(doc.doc_id)
    print(f'{"-" * 30} index.docstore.docs {"-" * 30} ')
    print(index.docstore.docs)
    # 示例操作
    # add_file_to_index('path/to/your/file.txt', storage_context)
    # delete_file_from_index('path/to/your/file.txt', storage_context)
    # update_file_in_index('path/to/your/file.txt', storage_context)
    # delete_folder_from_index('path/to/your/folder', storage_context)
    # add_folder_to_index('path/to/your/folder', storage_context)