from llama_index.core import GPTVectorStoreIndex
import logging
import os
import sys
import uuid
from typing import Dict

from llama_index.core import StorageContext, SimpleDirectoryReader, VectorStoreIndex, Settings
from llama_index.core.callbacks import LlamaDebugHandler, CallbackManager
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.openai.utils import ALL_AVAILABLE_MODELS, CHAT_MODELS
from llama_index.vector_stores.milvus import MilvusVectorStore

logger_level = logging.DEBUG
logger_level = logging.ERROR
logging.basicConfig(stream=sys.stdout, level=logger_level,
                    format='%(asctime)s %(name)s [%(pathname)s line:%(lineno)d] %(levelname)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

llama_debug = LlamaDebugHandler(print_trace_on_end=True)
from langfuse.llama_index import LlamaIndexCallbackHandler

langfuse_callback_handler = LlamaIndexCallbackHandler(
    trace_name='demo',
    user_id='sks',
    session_id=str(uuid.uuid1()),
    public_key=os.getenv('LANGFUSE_PUBLIC_KEY'),
    secret_key=os.getenv('LANGFUSE_SECRET_KEY'),
    host="https://us.cloud.langfuse.com"
)

Settings.callback_manager = CallbackManager([langfuse_callback_handler, llama_debug])
# Settings.callback_manager = CallbackManager([langfuse_callback_handler])

# 定义模型及其对应的上下⽂⼤⼩
LLM_MODELS_CONF: Dict[str, int] = {
    "ishumilin/deepseek-r1-coder-tools:14b": 64000,  #设置上下⽂⼤⼩为64000
    "qwen2.5-coder:7b": 131072,
    "qwen-max-latest": 64000,
}
# 更新所有可⽤模型的字典，添加DeepSeeK模型
ALL_AVAILABLE_MODELS.update(LLM_MODELS_CONF)
# 更新聊天模型的字典，添加DeepSeeK模型
CHAT_MODELS.update(LLM_MODELS_CONF)

# 设置密集向量嵌入模型,模型维度要跟milvus建表时候的向量维度保持一致
embedding = HuggingFaceEmbedding(
    # model_name="BAAI/bge-small-en-v1.5" #384维度
    model_name="BAAI/bge-base-en-v1.5"  # 768维度
)
dim = 768
Settings.embed_model = embedding

from llama_index.llms.openai import OpenAI

llm_url = "http://localhost:11434/v1"
llm_model = "qwen2.5-coder:7b"

llm_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
llm_model = "qwen-max-latest"
# 阿里云 API-KEY, 需要设置环境变量ALIYUN-API-KEY
# 因为调用本机的ollama模型,资源消耗太大,容易卡,还会导致 milvus 数据库宕机
llm_api_key = os.environ['ALIYUN-API-KEY'] or 'aa'

Settings.llm = OpenAI(
    model=llm_model,
    api_base=llm_url,
    api_key=llm_api_key,
    temperature=0.7)
#官方用的默认稀疏向量检索函数
from llama_index.vector_stores.milvus.utils import get_default_sparse_embedding_function

sparse_embedding_function = get_default_sparse_embedding_function()

collection_name = 'test5c'
# collection_name = 'limd_li'
milvus_url = "http://localhost:19530"
milvus_token = 'root:Milvus'
input_dir = 'Z:/pywork/python-demo/knowledge_files'
input_files = [
    input_dir + '/knowledge.txt',
    input_dir + '/paul_graham_essay.txt',
]

vector_store = MilvusVectorStore(
    dim=dim,
    uri=milvus_url,
    token=milvus_token,
    collection_name=collection_name,
    overwrite=False,
    hybrid_ranker="RRFRanker",
    hybrid_ranker_params={"k": 60},
    # enable_sparse=True,
    # sparse_embedding_function=sparse_embedding_function
)



def init_index():
    storage_context = StorageContext.from_defaults(vector_store=vector_store)
    docs = SimpleDirectoryReader(input_files=input_files).load_data()
    index = VectorStoreIndex.from_documents(documents=docs, storage_context=storage_context)
    # index.storage_context.persist()
    return index

def init_index2():
    # 获取索引
    index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
    return index

#test start
from llama_index.core.vector_stores import VectorStoreQuery
from llama_index.core.vector_stores.types import VectorStoreQueryMode



def to_query(index,query):
    query_embedding = embedding.get_query_embedding(query)
    res = vector_store.query(VectorStoreQuery(query_str=query, query_embedding=query_embedding, mode=VectorStoreQueryMode.HYBRID))
    return res
#test end

def to_query2(index,query):
    # 创建查询引擎
    query_engine = index.as_query_engine(vector_store_query_mode=VectorStoreQueryMode.DEFAULT)
    # 执行查询
    res = query_engine.query(query)
    return res

if __name__ == '__main__':
    query = '"你都学到了什么?"'
    query = '广州大学'
    # index=init_index()
    index=init_index2()
    print(f'{"-" * 30} 开始查询 {"-" * 30} ')
    # res = to_query(index,query)
    res = to_query2(index,query)
    print(f'{"-" * 30} 问题 {"-" * 30} ')
    print(query)
    print(f'{"-" * 30} 答案 {"-" * 30} ')
    print(res)
