import logging
import os
import sys
import uuid
from typing import Dict

from llama_index.vector_stores.milvus import MilvusVectorStore
from llama_index.core import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding

from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.core.tools import QueryEngineTool, ToolMetadata

from llama_index.llms.openai.utils import ALL_AVAILABLE_MODELS, CHAT_MODELS

from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler

from limma_index_milvus_demo.sks_lamma_index_openai_embedding import SksOllamaEmbedding

logger_level=logging.DEBUG
logging.basicConfig(stream=sys.stdout, level=logger_level,
                    format='%(asctime)s %(name)s [%(pathname)s line:%(lineno)d] %(levelname)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

llama_debug = LlamaDebugHandler(print_trace_on_end=True)
from langfuse.llama_index import LlamaIndexCallbackHandler

langfuse_callback_handler = LlamaIndexCallbackHandler(
    trace_name='demo',
    user_id='sks',
    session_id=str(uuid.uuid1()),
    public_key=os.getenv('LANGFUSE_PUBLIC_KEY'),
    secret_key=os.getenv('LANGFUSE_SECRET_KEY'),
    host="https://us.cloud.langfuse.com"
)

Settings.callback_manager = CallbackManager([langfuse_callback_handler,llama_debug])
# Settings.callback_manager = CallbackManager([langfuse_callback_handler])

# 定义模型及其对应的上下⽂⼤⼩
LLM_MODELS_CONF: Dict[str, int] = {
    "ishumilin/deepseek-r1-coder-tools:14b": 64000,  #设置上下⽂⼤⼩为64000
    "qwen2.5-coder:7b": 131072,
    "qwen-max-latest": 64000,
}
# 更新所有可⽤模型的字典，添加DeepSeeK模型
ALL_AVAILABLE_MODELS.update(LLM_MODELS_CONF)
# 更新聊天模型的字典，添加DeepSeeK模型
CHAT_MODELS.update(LLM_MODELS_CONF)


def test01():

    # Define the default Embedding model used in this Notebook.
    # bge-small-en-v1.5 is a small Embedding model, it's perfect to use locally
    Settings.embed_model = HuggingFaceEmbedding(
        model_name="BAAI/bge-small-en-v1.5"
    )
    from llama_index.llms.openai import OpenAI

    llm_url="http://192.168.56.1:11434/v1"
    llm_model="qwen2.5-coder:7b"

    llm_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    llm_model="qwen-max-latest"
    llm_api_key=os.environ['ALIYUN-API-KEY'] or 'aa'

    Settings.llm = OpenAI(model=llm_model,
                          api_base=llm_url,
                          api_key=llm_api_key, temperature=0.7)
    collection_name='advance_rag_docs'
    input_dir = 'Z:/pywork/python-demo/knowledge_files'
    input_files = [input_dir + '/knowledge.txt', input_dir + '/paul_graham_essay.txt', ]

    milvus_url="http://localhost:19530"
    milvus_token='root:Milvus'

    # https://cloud.zilliz.com.cn/ 的milvus数据库
    milvus_url=os.environ['ZILLIZ-MILVUS-URL']
    milvus_token=os.environ['ZILLIZ-MILVUS-TOKEN']

    # Create a single Milvus vector store
    vector_store = MilvusVectorStore(
        uri=milvus_url,
        # uri="F:/Dockerwork/DockerDesktopWSL/milvus/locdb/milvus_demo_metadata.db",
        token=milvus_token,
        collection_name=collection_name,
        dim = 384,
        overwrite = False,
    )
    # Create a storage context with the Milvus vector store
    storage_context = StorageContext.from_defaults(vector_store=vector_store)

    # Load data
    docs = SimpleDirectoryReader(input_files=input_files).load_data()
    # Build index
    index = VectorStoreIndex.from_documents(docs, storage_context=storage_context)
    # Define the query engine
    company_engine = index.as_query_engine(similarity_top_k=3)

    print(f'{"-"*30} 开始查询 {"-"*30} ')
    query='"你都学到了什么?"'
    res = company_engine.query(query)

    print(f'{"-"*30} 答案 {"-"*30} ')
    print(res)


def test02():
    from llama_index.llms.openai import OpenAI

    llm_url = "http://192.168.56.1:11434/v1"
    llm_model = "qwen2.5-coder:7b"

    llm_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
    llm_model = "qwen-max-latest"
    llm_api_key = os.environ['ALIYUN-API-KEY'] or 'aa'

    Settings.llm = OpenAI(
                        model=llm_model,
                          api_base=llm_url,
                          api_key=llm_api_key,
                          temperature=0.7)

    # Define the default Embedding model used in this Notebook.
    # bge-small-en-v1.5 is a small Embedding model, it's perfect to use locally
    # milvus_dim = 384
    milvus_dim = 768
    Settings.embed_model = HuggingFaceEmbedding(
        # model_name="BAAI/bge-small-en-v1.5" #384维度
        model_name = "BAAI/bge-base-en-v1.5"  # 768维度
    )

    collection_name = 'advance_rag_docs_768'
    input_dir = 'Z:/pywork/python-demo/knowledge_files'
    input_files = [input_dir + '/knowledge.txt', input_dir + '/paul_graham_essay.txt', ]

    milvus_url = "http://localhost:19530"
    milvus_token = 'root:Milvus'

    # https://cloud.zilliz.com.cn/ 的milvus数据库
    # milvus_url = os.environ['ZILLIZ-MILVUS-URL']
    # milvus_token = os.environ['ZILLIZ-MILVUS-TOKEN']

    # Create a single Milvus vector store
    vector_store = MilvusVectorStore(
        dim=milvus_dim,
        uri=milvus_url,
        # uri="F:/Dockerwork/DockerDesktopWSL/milvus/locdb/milvus_demo_metadata.db",
        token=milvus_token,
        collection_name=collection_name,
        overwrite=True,
        hybrid_ranker="RRFRanker",
        hybrid_ranker_params={"k": 60},
    )
    # Create a storage context with the Milvus vector store
    storage_context = StorageContext.from_defaults(vector_store=vector_store)

    # Load data
    docs = SimpleDirectoryReader(input_files=input_files).load_data()
    # Build index
    index = VectorStoreIndex.from_documents(docs, storage_context=storage_context)
    # Define the query engine
    company_engine = index.as_query_engine(similarity_top_k=3)

    print(f'{"-" * 30} 开始查询 {"-" * 30} ')
    query = '"你都学到了什么?"'
    res = company_engine.query(query)

    print(f'{"-" * 30} 答案 {"-" * 30} ')
    print(res)


def test03():
    from llama_index.llms.openai import OpenAI

    llm_url = "http://localhost:11434/v1"
    llm_model = "qwen2.5-coder:7b"

    llm_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
    llm_model = "qwen-max-latest"
    #阿里云 API-KEY, 需要设置环境变量ALIYUN-API-KEY
    #因为调用本机的ollama模型,资源消耗太大,容易卡,还会导致 milvus 数据库宕机
    llm_api_key = os.environ['ALIYUN-API-KEY'] or 'aa'

    Settings.llm = OpenAI(
        model=llm_model,
        api_base=llm_url,
        api_key=llm_api_key,
        temperature=0.7)

    milvus_dim = 768
    # 设置密集向量嵌入模型,模型维度要跟milvus建表时候的向量维度保持一致
    embedding = HuggingFaceEmbedding(
        # model_name="BAAI/bge-small-en-v1.5" #384维度
        model_name="BAAI/bge-base-en-v1.5"  # 768维度
    )
    Settings.embed_model = embedding

    collection_name = 'advance_rag_docs_test3'
    input_dir = 'Z:/pywork/python-demo/knowledge_files'
    input_files = [input_dir + '/knowledge.txt', input_dir + '/paul_graham_essay.txt', ]

    milvus_url = "http://localhost:19530"
    milvus_token = 'root:Milvus'

    # https://cloud.zilliz.com.cn/ 的milvus数据库
    # milvus_url = os.environ['ZILLIZ-MILVUS-URL']
    # milvus_token = os.environ['ZILLIZ-MILVUS-TOKEN']

    #官方用的默认稀疏向量检索函数
    from llama_index.vector_stores.milvus.utils import get_default_sparse_embedding_function
    sparse_embedding_function=get_default_sparse_embedding_function()
    # Create a single Milvus vector store
    vector_store = MilvusVectorStore(
        #如果使用与嵌入模型不同的维度创建过集合,需要删除之后才能正常运行
        dim=milvus_dim,
        uri=milvus_url,
        # uri="F:/Dockerwork/DockerDesktopWSL/milvus/locdb/milvus_demo_metadata.db",
        token=milvus_token,
        collection_name=collection_name,
        overwrite=True,
        hybrid_ranker="RRFRanker",
        hybrid_ranker_params={"k": 60},
        enable_sparse=True,  # 该值为True时，使用稀疏向量检索,需要设置 sparse_embedding_function，为False时，使用稠密向量检索
        sparse_embedding_function=sparse_embedding_function,
    )
    # 使用Milvus向量存储创建存储上下文
    storage_context = StorageContext.from_defaults(vector_store=vector_store)

    # Load data
    docs = SimpleDirectoryReader(input_files=input_files).load_data()
    # Build index
    index = VectorStoreIndex.from_documents(docs, storage_context=storage_context)
    #用该方式搜索无法查到有效信息
    # company_engine = index.as_query_engine(
    #     enable_sparse=True,
    #     sparse_embedding_function=sparse_embedding_function,
    #     similarity_top_k=3)
    #设置了稀疏向量模型,需要使用混合检索才能找到答案
    company_engine = index.as_query_engine(vector_store_query_mode="hybrid")

    print(f'{"-" * 30} 开始查询 {"-" * 30} ')
    query = '"你都学到了什么?"'
    query = '广州大学'
    res = company_engine.query(query)

    print(f'{"-" * 30} 答案 {"-" * 30} ')
    print(res)
if __name__ == '__main__':
    # test01()
    # test02()
    test03()
