import logging
import os
from pymilvus import MilvusClient, model
logger = logging.getLogger(__name__)

print('TestMilvus1')
print('TestMilvus2')

class MilvusDemoRemote:
    def __init__(self,embedding_model_function="OpenAIEmbedding", milvus_url="http://localhost:19530", milvus_token ="root:Milvus", collection_name='sks_demo_collection'):
        self.milvus_collection_name = collection_name
        self.milvus_username, self.milvus_password = milvus_token.split(':')
        self.client = MilvusClient(uri=milvus_url, token=milvus_token)
        self.model_dimension = 0
        self.embedding_model = None
        self.embedding_model_function = embedding_model_function
        self.select_embedding_model(embedding_model_function)

    def get_embedding_model_BGEM3EmbeddingFunction(self):
        """OK,查询正常
        需要下载 FlagEmbedding 模块
        直接pip安装依赖项太多了,容易失败,可以去 https://gitcode.com/gh_mirrors/fl/FlagEmbedding 把 FlagEmbedding 目录复制到你所在的项目中
        """
        from milvus_demo.SksBGEM3EmbeddingFunction import SksBGEM3SparseEmbeddingFunction
        #pymilvus.model.dense 下面的类才能直接使用
        #from pymilvus.model.hybrid import BGEM3EmbeddingFunction
        #该类是根据 BGEM3EmbeddingFunction 进行改的,同时参考的OpenAIEmbeddingFunction

        embedding_model = SksBGEM3SparseEmbeddingFunction(
            # 1024维度
            model_name='BAAI/bge-m3',  # Specify the model name
            # 768维度
            # model_name='BAAI/bge-base-en-v1.5', # Specify the model name
            device='cpu',  # Specify the device to use, e.g., 'cpu' or 'cuda:0'
            use_fp16=False,  # Specify whether to use fp16. Set to `False` if `device` is `cpu`.
            return_sparse=False,

        )
        # {'dense': 1024, 'colbert_vecs': 1024, 'sparse': 250002}
        #{“密集”：1024，“科尔伯特向量”：1024、“稀疏”：250002}
        # print('model.DefaultEmbeddingFunction().dim=', embedding_model.dim)
        return {"embedding_model": embedding_model, "dimension": 1024}

    def get_embedding_model_OpenAIFunction(self):
        """OK,查询正常"""
        EMBEDDING_MODE_API_KEY = 'ollama'
        EMBEDDING_MODE_NAME = 'bge-m3:latest'
        EMBEDDING_MODE_DIMENSION = 1024
        EMBEDDING_MODE_BASE_URL = "http://192.168.56.1:11434/v1"
        from openai import OpenAI
        # embedding_mode=OpenAI( base_url=EMBEDDING_MODE_BASE_URL, api_key=EMBEDDING_MODE_API_KEY, )

        from pymilvus.model.dense import OpenAIEmbeddingFunction
        embedding_model = OpenAIEmbeddingFunction(
            base_url=EMBEDDING_MODE_BASE_URL,
            model_name=EMBEDDING_MODE_NAME,
            api_key=EMBEDDING_MODE_API_KEY,
            dimensions=EMBEDDING_MODE_DIMENSION,  # ef.dim 的值是根据这里设置为准的,并不能调用 openAI接口获取大模型的维度
        )
        return {"embedding_model": embedding_model, "dimension": EMBEDDING_MODE_DIMENSION}

    def get_embedding_model_OnnxEmbeddingFunction(self):
        """OK,查询正常"""
        # pip install -U huggingface_hub
        # set "HF_ENDPOINT=https://hf-mirror.com"
        # huggingface-cli download --resume-download GPTCache/paraphrase-albert-onnx --local-dir F:\HuggingFaceModels\GPTCache\paraphrase-albert-onnx
        # 运行python 时候增加变量 HF_ENDPOINT=https://hf-mirror.com
        # 下载的模型位于 C:\Users\Administrator\.cache\huggingface\hub
        # 也可以设置默认模型存放目录,增加环境变量 HF_HOME=F:\HuggingFaceModels
        embedding_model = model.DefaultEmbeddingFunction()
        print('model.DefaultEmbeddingFunction().dim=', embedding_model.dim)
        return {"embedding_model": embedding_model, "dimension": 768}

    def get_embedding_model_SentenceTransformerEmbeddingFunction(self):
        """OK 查询正常"""
        embedding_mode = model.dense.SentenceTransformerEmbeddingFunction(
            #384 dimension
            model_name='all-MiniLM-L6-v2',
            device='cpu',
        )
        print('model.dense.SentenceTransformerEmbeddingFunction().dim=', embedding_mode.dim)
        return {"embedding_model": embedding_mode, "dimension": embedding_mode.dim}

    def get_embedding_model_function(self):
        return self.embedding_model_function
    def get_embedding_model_names(self):
        return [
            "OpenAIEmbedding",
            "BGEM3Embedding",
            "OnnxEmbedding",
            "SentenceTransformerEmbedding",
        ]
    def select_embedding_model(self, embedding_model_name):
        """选择嵌入模型"""
        self.embedding_model_function=embedding_model_name
        if embedding_model_name == "OpenAIEmbedding":
            _tmp = self.get_embedding_model_OpenAIFunction()
            self.embedding_model = _tmp["embedding_model"]
            self.model_dimension = _tmp["dimension"]
            logger.info('向量模型切换为 OpenAIEmbedding')
        elif embedding_model_name == "SentenceTransformerEmbedding":
            _tmp = self.get_embedding_model_SentenceTransformerEmbeddingFunction()
            self.embedding_model = _tmp["embedding_model"]
            self.model_dimension = _tmp["dimension"]
            logger.info('向量模型切换为 SentenceTransformerEmbedding')
        elif embedding_model_name == "BGEM3Embedding":
            _tmp = self.get_embedding_model_BGEM3EmbeddingFunction()
            self.embedding_model = _tmp["embedding_model"]
            self.model_dimension = _tmp["dimension"]
            logger.info('向量模型切换为 BGEM3Embedding')
        elif embedding_model_name == "OnnxEmbedding":
            _tmp = self.get_embedding_model_OnnxEmbeddingFunction()
            self.embedding_model = _tmp["embedding_model"]
            self.model_dimension = _tmp["dimension"]
            logger.info('向量模型切换为 OnnxEmbedding')
        else:
            raise Exception("embedding_model_name is not support")

    def data1(self):
        docs = [
            "Artificial intelligence was founded as an academic discipline in 1956.",
            "Alan Turing was the first person to conduct substantial research in AI.",
            "Born in Maida Vale, London, Turing was raised in southern England.",
        ]
        embedding_model = self.embedding_model
        vectors = embedding_model.encode_documents(docs)
        data = [{"id": i + 3, "vector": vectors[i], "text": docs[i]} for i in range(len(vectors))]
        return data

    def drop_connection(self):
        self.client.drop_collection(self.milvus_collection_name)

    def has_connection(self):
        return self.client.has_collection(self.milvus_collection_name)

    def create_connection(self):
        self.client.create_collection(
            collection_name=self.milvus_collection_name,
            dimension=self.model_dimension,
        )

    def clear_data(self):
        # 删除所有向量
        # client.delete(milvus_collection_name, filter="id > 0")  # 修改 filter 参数为有效的布尔表达式字符串
        self.client.delete(self.milvus_collection_name, filter="id is not null")
        self.client.flush(collection_name=self.milvus_collection_name)

    def insert_data(self):
        embedding_fn = self.embedding_model

        docs = [
            "人工智能作为一门学科在1956年成立。",
            "艾伦·图灵是最早进行实质性人工智能研究的人之一。",
            "图灵出生在伦敦的梅达维尔，并在英格兰南部长大。",
        ]
        vectors = embedding_fn.encode_documents(docs)
        print("len(vectors)=", vectors)

        print("维度:", embedding_fn.dim)

        data = [
            {"id": i, "vector": vectors[i], "text": docs[i], "subject": "历史"}
            for i in range(len(vectors))
        ]

        print("数据包含", len(data), "个实体，每个实体包含的字段为：", data[0].keys())
        print("向量维度：", len(data[0]["vector"]))

        self.client.insert(collection_name=self.milvus_collection_name, data=data)
        self.client.flush(collection_name=self.milvus_collection_name)

    def insert_data2(self):
        self.client.insert(collection_name=self.milvus_collection_name, data=self.data1())
        self.client.flush(collection_name=self.milvus_collection_name)

    def embedding_query_str(self, query_str):
        queries = [query_str]
        query_embeddings = self.embedding_model.encode_queries(queries)
        return query_embeddings

    def query_milvus(self, query):
        query_str = self.embedding_query_str(query)
        print(str(query_str))
        query_vector = query_str
        print(str(query_vector))
        res = self.client.search(
            collection_name=self.milvus_collection_name,  # Collection name
            data=query_vector,  # Replace with your query vector
            search_params={
                "metric_type": "COSINE",
                "params": {"level": 1},  # Search parameters
            },  # Search parameters
            limit=15,  # Max. number of search results to return
            output_fields=["id", "text"],  # Fields to return in the search results
            consistency_level="Bounded"
        )
        return res

    def test(self):
        EMBEDDING_MODE_API_KEY = 'ollama'
        EMBEDDING_MODE_NAME = 'bge-m3:latest'
        EMBEDDING_MODE_BASE_URL = "http://192.168.56.1:11434/v1"
        from openai import OpenAI

        ef = model.dense.OpenAIEmbeddingFunction(
            base_url=EMBEDDING_MODE_BASE_URL,
            model_name=EMBEDDING_MODE_NAME,
            api_key=EMBEDDING_MODE_API_KEY,
            dimensions=1024,  # ef.dim 的值是根据这里设置为准的,并不能调用 openAI接口获取大模型的维度
        )
        print(f"ef.dim={ef.dim}")
        query_embeddings = ef.encode_queries(["你好"])
        print(query_embeddings)

if __name__ == '__main__':
    """main"""
    demo = MilvusDemoRemote()
    # demo.create_connection()
    # demo.test()
    demo.clear_data()