import logging
import sys
from typing import Dict

from pymilvus.model.hybrid import BGEM3EmbeddingFunction

from sks_lamma.sparse_embedding import SksBGEM3SparseEmbeddingFunction

# from llama_index.embeddings import HuggingFaceEmbedding
logger_level=logging.WARN
logger_level=logging.DEBUG
logging.basicConfig(stream=sys.stdout, level=logger_level,
                    format='%(asctime)s %(name)s [%(pathname)s line:%(lineno)d] %(levelname)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

from pymilvus import MilvusClient
from llama_index.embeddings.huggingface import HuggingFaceEmbedding

from llama_index.llms.openai.utils import ALL_AVAILABLE_MODELS, CHAT_MODELS

def aaa():

    # 定义模型及其对应的上下⽂⼤⼩
    LLM_MODELS_CONF: Dict[str, int] = {
        "ishumilin/deepseek-r1-coder-tools:14b": 64000,  #设置上下⽂⼤⼩为64000
        "qwen2.5-coder:7b": 131072,
    }
    # 更新所有可⽤模型的字典，添加DeepSeeK模型
    ALL_AVAILABLE_MODELS.update(LLM_MODELS_CONF)
    # 更新聊天模型的字典，添加DeepSeeK模型
    CHAT_MODELS.update(LLM_MODELS_CONF)

    embed_model_name = "BAAI/bge-m3"
    embed_model_name = "BAAI/bge-base-en-v1.5"
    milvus_username = 'root'
    milvus_password = 'Milvus'
    fmt = f'\n{"-" * 10} ' + '{:30}' + f' {"-" * 10}\n'
    # print(fmt.format("hello"))

    milvus_collection_name = 'advance_rag'
    # client = MilvusClient(
    #     uri="http://localhost:19530",
    #     token=f"{milvus_username}:{milvus_password}",
    # )
    # if client.has_collection(milvus_collection_name):
    #     client.drop_collection(milvus_collection_name)

    from llama_index.core import StorageContext, SimpleDirectoryReader
    from llama_index.vector_stores.milvus import MilvusVectorStore
    from llama_index.core import VectorStoreIndex

    print('test2')
    kpath = 'Z:/pywork/python-demo/knowledge_files'
    kfiles = [kpath + '/knowledge.txt', kpath + '/paul_graham_essay.txt', ]
    documents = SimpleDirectoryReader(
        input_files=kfiles,
        # input_dir = kpath,
    ).load_data()
    # print(documents[0])

    print("Document ID:", documents[0].doc_id)
    # embedding_fn = pyModel.DefaultEmbeddingFunction()
    print('embedding_fn start')
    # embedding_fn = HuggingFaceEmbedding(model_name=embed_model_name)
    # embedding_fn = pyModel.DefaultEmbeddingFunction(model_name="BAAI/bge-small-zh-v1.5")
    # embedding_fn = ExampleEmbeddingFunction()
    # embedding_fn = BGEM3EmbeddingFunction(
    #     model_name=embed_model_name, # Specify the model name
    #     device='cpu', # Specify the device to use, e.g., 'cpu' or 'cuda:0'
    #     use_fp16=False # Specify whether to use fp16. Set to `False` if `device` is `cpu`.
    # )
    embedding_fn=SksBGEM3SparseEmbeddingFunction(embed_model_name)

    print('embedding_fn end')

    # from llama_index.core.settings import Settings
    from llama_index.core import Settings
    # Settings.llm = OpenAI(model="gpt-4-turbo", temperature=0.1)
    # Settings.llm = None
    from llama_index.llms.openai import OpenAI

    print('Settings.llm start')
    Settings.llm = OpenAI(model="qwen2.5-coder:7b",
                          api_base="http://192.168.56.1:11434/v1",
                          api_key="aa", temperature=0.7)
    print('Settings.llm end')

    # Settings.embed_model = HuggingFaceEmbedding( model_name="BAAI/bge-small-en-v1.5" )
    # Settings.embed_model = HuggingFaceEmbedding( model_name="BAAI/bge-m3")
    print('Settings.embed_model start')
    Settings.embed_model = HuggingFaceEmbedding(model_name=embed_model_name)
    print('Settings.embed_model end')
    # Settings.embed_model = OpenAIEmbedding(model="bge-m3:latest")

    print('vector_store start')
    #多少向量查看模型资料 https://hf-mirror.com/BAAI/bge-m3/blob/main/README.md
    vector_store = MilvusVectorStore(
        # dim=1536,
        # dim=1024,
        dim=768,
        uri="http://localhost:19530",
        token=f"{milvus_username}:{milvus_password}",
        collection_name=milvus_collection_name,
        overwrite=True,
        hybrid_ranker="RRFRanker",
        hybrid_ranker_params={"k": 60},
        enable_sparse=True,#该值为True时，使用稀疏向量检索,需要设置 sparse_embedding_function，为False时，使用稠密向量检索
        sparse_embedding_function=embedding_fn,
    )
    print('vector_store end')


    print('BGEM3EmbeddingFunction start')
    bge_m3_ef = BGEM3EmbeddingFunction(
        # model_name='BAAI/bge-m3', # Specify the model name
        model_name='BAAI/bge-base-en-v1.5',  # Specify the model name
        device='cpu',  # Specify the device to use, e.g., 'cpu' or 'cuda:0'
        use_fp16=False  # Specify whether to use fp16. Set to `False` if `device` is `cpu`.
    )
    print('BGEM3EmbeddingFunction end')

    storage_context = StorageContext.from_defaults(vector_store=vector_store)

    # embed_model = HuggingFaceEmbedding(model_name="/models/bge-m3")
    # embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
    # embed_model=bge_m3_ef
    embed_model = HuggingFaceEmbedding(model_name=embed_model_name)
    print(f'embed_model.get_text_embedding test')
    # tmp=embed_model.get_text_embedding('asus')

    # print(f'tmp={tmp[:100]}'+('' if len(tmp) <=100 else '...'))

    index = VectorStoreIndex.from_documents(
        documents=documents, storage_context=storage_context,
        # enable_sparse=True,
        # sparse_embedding_function= embedding_fn,
        # embed_model=embed_model,
    )

    print(fmt.format(f" to query"))
    query_engine = index.as_query_engine()
    res = query_engine.query("What did the author learn?")
    return res


fmt = f'\n{"-" * 10} ' + '{:30}' + f' {"-" * 10}\n'
def test1():
    res=aaa()
    print(fmt.format("print(res)"))
    print(res)
    while str(res) == 'Empty Response':
        res=aaa()
        print(fmt.format("print(res)"))
        print(res)

def test2():
    res=aaa()
    print(fmt.format("print(res)"))
    print(res)

if __name__ == '__main__':
    # test1()
    test2()
