import logging
import os
import uuid
from typing import Dict

from pymilvus import MilvusClient, model
from llama_index.core import StorageContext, SimpleDirectoryReader
from llama_index.core import VectorStoreIndex

logger = logging.getLogger(__name__)

print('TestMilvus1')
print('TestMilvus2')

from llama_index.core import Settings
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler

llama_debug = LlamaDebugHandler(print_trace_on_end=True)
from langfuse.llama_index import LlamaIndexCallbackHandler

langfuse_callback_handler = LlamaIndexCallbackHandler(
    trace_name='demo',
    user_id='sks',
    session_id=str(uuid.uuid1()),
    public_key=os.getenv('LANGFUSE_PUBLIC_KEY'),
    secret_key=os.getenv('LANGFUSE_SECRET_KEY'),
    host="https://us.cloud.langfuse.com"
)

# Settings.callback_manager = CallbackManager([langfuse_callback_handler,llama_debug])
Settings.callback_manager = CallbackManager([langfuse_callback_handler])

from llama_index.llms.openai.utils import ALL_AVAILABLE_MODELS, CHAT_MODELS

# 定义模型及其对应的上下⽂⼤⼩
LLM_MODELS_CONF: Dict[str, int] = {
    "ishumilin/deepseek-r1-coder-tools:14b": 32768,  #设置上下⽂⼤⼩为,该版本32k tokens
    "qwen2.5-coder:7b": 131072,
    "qwen-max-latest": 131072,#最新版的已支持128k
}
# 更新所有可⽤模型的字典，添加DeepSeeK模型
ALL_AVAILABLE_MODELS.update(LLM_MODELS_CONF)
# 更新聊天模型的字典，添加DeepSeeK模型
CHAT_MODELS.update(LLM_MODELS_CONF)


def stdout_fun(*args):
    print(*args)


class LimmaIndexMilvusDemoRemote:
    stdout: callable = None
    documents =None

    def __init__(self, stdout=stdout_fun, embedding_model_function="OpenAIEmbedding",
                 milvus_url="http://localhost:19530", milvus_token="root:Milvus",
                 collection_name='advance_rag'):
        self.milvus_url=milvus_url
        self.milvus_collection_name = collection_name
        self.milvus_username, self.milvus_password = milvus_token.split(':')
        self.milvus_token=milvus_token
        self.client = MilvusClient(uri=milvus_url, token=milvus_token)
        self.model_dimension = 0
        self.embedding_model = None
        self.embedding_model_function = embedding_model_function
        self.stdout = stdout or stdout_fun

    def init(self):
        self.select_embedding_model(self.embedding_model_function)

        llm_url = "https://dashscope.aliyuncs.com/compatible-mode/v1"
        llm_model = "qwen-max-latest"
        llm_api_key = os.environ['ALIYUN-API-KEY']

        from llama_index.llms.openai import OpenAI
        Settings.llm = OpenAI(model=llm_model,
                              api_base=llm_url,
                              api_key=llm_api_key, temperature=0.7)

    def get_embedding_model_BGEM3EmbeddingFunction(self):
        """需要下载 FlagEmbedding 模块
        直接pip安装依赖项太多了,容易失败,可以去 https://gitcode.com/gh_mirrors/fl/FlagEmbedding 把 FlagEmbedding 目录复制到你所在的项目中
        """
        from limma_index_milvus_demo.SksBGEM3EmbeddingFunction2 import SksBGEM3SparseEmbeddingFunction
        #pymilvus.model.dense 下面的类才能直接使用
        #from pymilvus.model.hybrid import BGEM3EmbeddingFunction
        #该类是根据 BGEM3EmbeddingFunction 进行改的,同时参考的OpenAIEmbeddingFunction

        embedding_model = SksBGEM3SparseEmbeddingFunction(
            # 1024维度
            model_name='BAAI/bge-m3',  # Specify the model name
            # 768维度
            # model_name='BAAI/bge-base-en-v1.5', # Specify the model name
            device='cpu',  # Specify the device to use, e.g., 'cpu' or 'cuda:0'
            use_fp16=False,  # Specify whether to use fp16. Set to `False` if `device` is `cpu`.
            return_sparse=False,

        )
        # {'dense': 1024, 'colbert_vecs': 1024, 'sparse': 250002}
        #{“密集”：1024，“科尔伯特向量”：1024、“稀疏”：250002}
        # print('model.DefaultEmbeddingFunction().dim=', embedding_model.dim)
        return {"embedding_model": embedding_model, "dimension": 1024}

    def get_embedding_model_OpenAIFunction(self):
        """OK,查询正常"""
        EMBEDDING_MODE_API_KEY = 'ollama'
        EMBEDDING_MODE_NAME = 'bge-m3:latest'
        EMBEDDING_MODE_DIMENSION = 1024
        EMBEDDING_MODE_BASE_URL = "http://192.168.56.1:11434/v1"
        # embedding_mode=OpenAI( base_url=EMBEDDING_MODE_BASE_URL, api_key=EMBEDDING_MODE_API_KEY, )

        from pymilvus.model.dense import OpenAIEmbeddingFunction
        embedding_model = OpenAIEmbeddingFunction(
            base_url=EMBEDDING_MODE_BASE_URL,
            model_name=EMBEDDING_MODE_NAME,
            api_key=EMBEDDING_MODE_API_KEY,
            dimensions=EMBEDDING_MODE_DIMENSION,  # ef.dim 的值是根据这里设置为准的,并不能调用 openAI接口获取大模型的维度
        )
        return {"embedding_model": embedding_model, "dimension": EMBEDDING_MODE_DIMENSION}

    def get_embedding_model_OnnxEmbeddingFunction(self):
        """OK,查询正常"""
        # pip install -U huggingface_hub
        # set "HF_ENDPOINT=https://hf-mirror.com"
        # huggingface-cli download --resume-download GPTCache/paraphrase-albert-onnx --local-dir F:\HuggingFaceModels\GPTCache\paraphrase-albert-onnx
        # 运行python 时候增加变量 HF_ENDPOINT=https://hf-mirror.com
        # 下载的模型位于 C:\Users\Administrator\.cache\huggingface\hub
        # 也可以设置默认模型存放目录,增加环境变量 HF_HOME=F:\HuggingFaceModels
        embedding_model = model.DefaultEmbeddingFunction()
        print('model.DefaultEmbeddingFunction().dim=', embedding_model.dim)
        return {"embedding_model": embedding_model, "dimension": 768}

    def get_embedding_model_SentenceTransformerEmbeddingFunction(self):
        """OK 查询正常"""
        embedding_mode = model.dense.SentenceTransformerEmbeddingFunction(
            #384 dimension
            model_name='all-MiniLM-L6-v2',
            device='cpu',
        )
        print('model.dense.SentenceTransformerEmbeddingFunction().dim=', embedding_mode.dim)
        return {"embedding_model": embedding_mode, "dimension": embedding_mode.dim}

    def get_embedding_model_function(self):
        return self.embedding_model_function

    def get_embedding_model_names(self):
        return [
            "OpenAIEmbedding",
            "BGEM3Embedding",
            "OnnxEmbedding",
            "SentenceTransformerEmbedding",
        ]

    def select_embedding_model(self, embedding_model_name):
        """选择嵌入模型"""
        print('select_embedding_model')
        self.embedding_model_function = embedding_model_name
        if embedding_model_name == "OpenAIEmbedding":
            _tmp = self.get_embedding_model_OpenAIFunction()
            logger.info('向量模型切换为 OpenAIEmbedding')

            from llama_index.embeddings.openai import (
                OpenAIEmbedding,
            )  # pants: no-infer-dep
            print('select_embedding_model 11')
            # Settings.embed_model = OpenAIEmbedding(
            #     api_base="http://192.168.56.1:11434/v1",
            #     api_key="aa",
            #     model="bge-m3:latest",
            #     dimensions=self.model_dimension,
            # )
            from limma_index_milvus_demo.sks_lamma_index_openai_embedding import SksOllamaEmbedding
            Settings.embed_model=SksOllamaEmbedding()
            print('select_embedding_model 22')

        elif embedding_model_name == "SentenceTransformerEmbedding":
            _tmp = self.get_embedding_model_SentenceTransformerEmbeddingFunction()
            logger.info('向量模型切换为 SentenceTransformerEmbedding')
            from limma_index_milvus_demo.sks_lamma_index_openai_embedding import SksOllamaEmbedding
            # Settings.embed_model=SksOllamaEmbedding(model='qllama/bge-small-en-v1.5:latest')
            from llama_index.embeddings.huggingface import HuggingFaceEmbedding
            Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
        elif embedding_model_name == "BGEM3Embedding":
            _tmp = self.get_embedding_model_BGEM3EmbeddingFunction()
            from limma_index_milvus_demo.sks_lamma_index_openai_embedding import SksOllamaEmbedding
            Settings.embed_model=SksOllamaEmbedding()
            logger.info('向量模型切换为 BGEM3Embedding')
        elif embedding_model_name == "OnnxEmbedding":
            _tmp = self.get_embedding_model_OnnxEmbeddingFunction()
            logger.info('向量模型切换为 OnnxEmbedding')
            from llama_index.embeddings.huggingface import HuggingFaceEmbedding
            # Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")

            Settings.embed_model = HuggingFaceEmbedding(
                # model_name="BAAI/bge-small-en-v1.5" #384维度
                model_name = "BAAI/bge-base-en-v1.5"  # 768维度
            )
        else:
            raise Exception("embedding_model_name is not support")
        self.embedding_model = _tmp["embedding_model"]
        self.model_dimension = _tmp["dimension"]
        # Settings.llm = OpenAI(model="qwen2.5-coder:7b",
        #                       api_base="http://192.168.56.1:11434/v1",
        #                       api_key="aa", temperature=0.7)

        if self.stdout :
            self.stdout(f"切换模型成功,{embedding_model_name} {self.model_dimension} 维度\n")

        from llama_index.vector_stores.milvus.utils import get_default_sparse_embedding_function
        self.sparse_embedding_function = get_default_sparse_embedding_function()

    def data1(self):
        docs = [
            "Artificial intelligence was founded as an academic discipline in 1956.",
            "Alan Turing was the first person to conduct substantial research in AI.",
            "Born in Maida Vale, London, Turing was raised in southern England.",
        ]
        embedding_model = self.embedding_model
        vectors = embedding_model.encode_documents(docs)
        data = [{"id": i + 3, "vector": vectors[i], "text": docs[i]} for i in range(len(vectors))]
        return data


    def milvus_collection_name_by_li(self):
        return self.milvus_collection_name+'_li'
    def milvus_collection_name_by_OnnxEmbedding(self):
        return self.milvus_collection_name+'_onnx'

    def milvus_collection_name_by_sparse_OnnxEmbedding(self):
        return self.milvus_collection_name+'_sparse'
    def drop_connection(self,milvus_collection_name=None):
        milvus_collection_name=milvus_collection_name or self.milvus_collection_name
        self.client.drop_collection(milvus_collection_name)
        self.stdout(f"删除集合{milvus_collection_name}成功\n")

    def has_connection(self):
        return self.client.has_collection(self.milvus_collection_name)

    def create_connection(self):
        self.client.create_collection(
            collection_name=self.milvus_collection_name,
            dimension=self.model_dimension,
        )

    def clear_data(self):
        # 删除所有向量
        # client.delete(milvus_collection_name, filter="id > 0")  # 修改 filter 参数为有效的布尔表达式字符串
        self.client.delete(self.milvus_collection_name, filter="id is not null")
        self.client.flush(collection_name=self.milvus_collection_name)

    def insert_data(self):
        embedding_fn = self.embedding_model

        docs = [
            "人工智能作为一门学科在1956年成立。",
            "艾伦·图灵是最早进行实质性人工智能研究的人之一。",
            "图灵出生在伦敦的梅达维尔，并在英格兰南部长大。",
        ]
        vectors = embedding_fn.encode_documents(docs)
        print("len(vectors)=", vectors)

        print("维度:", embedding_fn.dim)

        data = [
            {"id": i, "vector": vectors[i], "text": docs[i], "subject": "历史"}
            for i in range(len(vectors))
        ]

        print("数据包含", len(data), "个实体，每个实体包含的字段为：", data[0].keys())
        print("向量维度：", len(data[0]["vector"]))

        self.client.insert(collection_name=self.milvus_collection_name, data=data)
        self.client.flush(collection_name=self.milvus_collection_name)

    def insert_data2(self):
        self.client.insert(collection_name=self.milvus_collection_name, data=self.data1())
        self.client.flush(collection_name=self.milvus_collection_name)

    def embedding_query_str(self, query_str):
        queries = [query_str]
        query_embeddings = self.embedding_model.encode_queries(queries)
        return query_embeddings

    def load_data_to_documents1(self,input_dir=None, input_files=None):
        print(f'input_dir={input_dir}')
        print(f'input_files={input_files}')
        documents = SimpleDirectoryReader(
            input_files=input_files,
            input_dir=input_dir,
        ).load_data()
        self.stdout(documents[0].doc_id)
        self.documents=documents

    def get_documents_inspect(self):
        if not self.documents:
             raise Exception("请先 读取文档")
        return self.documents

    def create_knowledge(self):
        documents=self.get_documents_inspect()
        from llama_index.vector_stores.milvus import MilvusVectorStore
        vector_store = MilvusVectorStore(
            dim=self.model_dimension,
            uri=self.milvus_url,
            token=f"{self.milvus_token}",
            collection_name=self.milvus_collection_name_by_li(),
            overwrite=True,
            hybrid_ranker="RRFRanker",
            hybrid_ranker_params={"k": 60},
            enable_sparse=True,#该值为True时，使用稀疏向量检索,需要设置 sparse_embedding_function，为False时，使用稠密向量检索
            sparse_embedding_function=self.sparse_embedding_function,
        )
        storage_context = StorageContext.from_defaults(vector_store=vector_store)
        from sks_lamma.sparse_embedding import SksBGEM3SparseEmbeddingFunction
        embed_model_name = "BAAI/bge-m3"
        # embedding_fn = SksBGEM3SparseEmbeddingFunction(embed_model_name)
        # from llama_index.embeddings.openai import (
        #     OpenAIEmbedding,
        # )  # pants: no-infer-dep
        # embedding_fn = OpenAIEmbedding(
        #     api_base="http://192.168.56.1:11434/v1",
        #     api_key="aa",
        #     model="bge-m3:latest",
        #     dimensions=self.model_dimension,
        # )
        self.vectorStoreIndex = VectorStoreIndex.from_documents(
            documents=documents, storage_context=storage_context,
            # enable_sparse=True,
            # embed_model= embedding_fn,
            # embed_model=Settings.embed_model ,
        )

    def limma_index_query(self, query):
        query_engine = self.vectorStoreIndex.as_query_engine(similarity_top_k=3,vector_store_query_mode="hybrid")
        res = query_engine.query(query)
        return res

    def set_to_OnnxEmbedding(self):
        """设置为 OnnxEmbedding 模型"""
        _tmp = self.get_embedding_model_OnnxEmbeddingFunction()
        logger.info('向量模型切换为 OnnxEmbedding')
        self.select_embedding_model('OnnxEmbedding')
        # from llama_index.embeddings.huggingface import HuggingFaceEmbedding
        # Settings.embed_model = HuggingFaceEmbedding(
        #     # model_name="BAAI/bge-small-en-v1.5" #384维度
        #     model_name = "BAAI/bge-base-en-v1.5"  # 768维度
        # )
        # self.embedding_model = _tmp["embedding_model"]
        # self.model_dimension = _tmp["dimension"]

    def create_knowledge_by_OnnxEmbedding(self):
        documents=self.get_documents_inspect()

        from llama_index.vector_stores.milvus import MilvusVectorStore
        vector_store = MilvusVectorStore(
            dim=self.model_dimension,
            uri=self.milvus_url,
            token=self.milvus_token,
            collection_name=self.milvus_collection_name_by_OnnxEmbedding(),
            overwrite=True,
            hybrid_ranker="RRFRanker",
            hybrid_ranker_params={"k": 60},
        )
        storage_context = StorageContext.from_defaults(vector_store=vector_store)
        from sks_lamma.sparse_embedding import SksBGEM3SparseEmbeddingFunction
        embed_model_name = "BAAI/bge-m3"
        # embedding_fn = SksBGEM3SparseEmbeddingFunction(embed_model_name)
        # from llama_index.embeddings.openai import (
        #     OpenAIEmbedding,
        # )  # pants: no-infer-dep
        # embedding_fn = OpenAIEmbedding(
        #     api_base="http://192.168.56.1:11434/v1",
        #     api_key="aa",
        #     model="bge-m3:latest",
        #     dimensions=self.model_dimension,
        # )
        self.vectorStoreIndex = VectorStoreIndex.from_documents(
            documents=documents, storage_context=storage_context,
            # enable_sparse=True,
            # embed_model= embedding_fn,
            # embed_model=Settings.embed_model ,
        )
    def query_milvus_by_OnnxEmbedding(self, query):
        company_engine = self.vectorStoreIndex.as_query_engine(similarity_top_k=3)
        res = company_engine.query(query)
        return res

    def set_to_sparse_OnnxEmbedding(self):
        """设置为 OnnxEmbedding 模型"""
        _tmp = self.get_embedding_model_OnnxEmbeddingFunction()
        logger.info('向量模型切换为 OnnxEmbedding')
        from llama_index.embeddings.huggingface import HuggingFaceEmbedding
        Settings.embed_model = HuggingFaceEmbedding(
            # model_name="BAAI/bge-small-en-v1.5" #384维度
            model_name = "BAAI/bge-base-en-v1.5"  # 768维度
        )
        self.embedding_model = _tmp["embedding_model"]
        self.model_dimension = _tmp["dimension"]

    def create_knowledge_by_sparse_OnnxEmbedding(self):
        documents=self.get_documents_inspect()

        # 官方用的默认稀疏向量检索函数

        from llama_index.vector_stores.milvus import MilvusVectorStore
        vector_store = MilvusVectorStore(
            dim=self.model_dimension,
            uri=self.milvus_url,
            token=self.milvus_token,
            collection_name=self.milvus_collection_name_by_sparse_OnnxEmbedding(),
            overwrite=True,
            hybrid_ranker="RRFRanker",
            hybrid_ranker_params={"k": 60},
            enable_sparse=True,  # 该值为True时，使用稀疏向量检索,需要设置 sparse_embedding_function，为False时，使用稠密向量检索
            sparse_embedding_function=self.sparse_embedding_function,
        )
        storage_context = StorageContext.from_defaults(vector_store=vector_store)
        self.vectorStoreIndex = VectorStoreIndex.from_vector_store(vector_store=vector_store, storage_context=storage_context)
        self.vectorStoreIndex = VectorStoreIndex.from_documents(
            documents=documents, storage_context=storage_context,
        )
        # self.vectorStoreIndex.update_ref_doc(documents=documents)
    def query_milvus_by_sparse_OnnxEmbedding(self, query):
        company_engine = self.vectorStoreIndex.as_query_engine(similarity_top_k=3,vector_store_query_mode="hybrid")
        res = company_engine.query(query)
        return res

    def query_milvus(self, query):
        query_str = self.embedding_query_str(query)

        # print(str(query_str))
        # query_vector = query_str
        # print(str(query_vector))
        # res = self.client.search(
        #     collection_name=self.milvus_collection_name,  # Collection name
        #     data=query_vector,  # Replace with your query vector
        #     search_params={
        #         "metric_type": "COSINE",
        #         "params": {"level": 1},  # Search parameters
        #     },  # Search parameters
        #     limit=15,  # Max. number of search results to return
        #     output_fields=["id", "text"],  # Fields to return in the search results
        #     consistency_level="Bounded"
        # )
        dense_emb = query_str
        # Settings.embed_model.get_query_embedding(query_str)
        from pymilvus import AnnSearchRequest
        dense_req = AnnSearchRequest(
            data=[dense_emb[0]],
            anns_field='embedding',
            param={'metric_type': 'IP', 'params': {}},
            limit=2,
            expr='',  # Apply metadata filters to dense search
        )
        # 官方用的默认稀疏向量检索函数
        sparse_emb=self.sparse_embedding_function.encode_queries([query])[0]
        sparse_search_params = {"metric_type": "IP"}
        sparse_req = AnnSearchRequest(
            data=[sparse_emb],
            anns_field="sparse_embedding",
            param=sparse_search_params,
            limit=2,
            expr='',  # Apply metadata filters to sparse search
        )
        from pymilvus import RRFRanker
        ranker = RRFRanker(k=60)
        res = self.client.hybrid_search(
            collection_name= self.milvus_collection_name_by_sparse_OnnxEmbedding(),
            reqs=[dense_req, sparse_req],
            ranker=ranker,
            limit=2,
            output_fields=['*'],
        )
        return res

    def test(self):
        EMBEDDING_MODE_API_KEY = 'ollama'
        EMBEDDING_MODE_NAME = 'bge-m3:latest'
        EMBEDDING_MODE_BASE_URL = "http://192.168.56.1:11434/v1"
        from openai import OpenAI

        ef = model.dense.OpenAIEmbeddingFunction(
            base_url=EMBEDDING_MODE_BASE_URL,
            model_name=EMBEDDING_MODE_NAME,
            api_key=EMBEDDING_MODE_API_KEY,
            dimensions=1024,  # ef.dim 的值是根据这里设置为准的,并不能调用 openAI接口获取大模型的维度
        )
        print(f"ef.dim={ef.dim}")
        query_embeddings = ef.encode_queries(["你好"])
        print(query_embeddings)


if __name__ == '__main__':
    """main"""
    demo = LimmaIndexMilvusDemoRemote()
    # demo.create_connection()
    # demo.test()
    demo.clear_data()
