"""
milvus数据入库及混合检索
"""
from typing import List
from pymilvus import MilvusClient, MilvusException
from pymilvus import AnnSearchRequest, RRFRanker
from pymilvus import DataType, Function, FunctionType
from core.document import Document
from core.embedding import Embeddings
from threading import RLock
from core.config import settings
import re
from loguru import logger


class MilvusVector(object):

    lock = RLock()

    def __new__(cls, *args, **kwargs):
        with MilvusVector.lock:
            if not hasattr(MilvusVector, "_instance"):
                MilvusVector._instance = object.__new__(cls)
                client = MilvusClient(uri=settings.MILVUS_URI)
                if settings.MILVUS_DBNAME not in client.list_databases():
                    client.create_database(settings.MILVUS_DBNAME)
                client.using_database(settings.MILVUS_DBNAME)
                cls.client = client
        return MilvusVector._instance

    def __init__(self):
        self._embeddings = Embeddings()

    def create(self, collection_name: str, documents: List[Document], clean: bool = True, **kwargs):
        """创建数据分片"""
        collection_name = '_' + re.sub('[^\dA-Za-z]', '', collection_name)
        embeddings = self._embeddings.embed_documents(documents)
        self.create_collection(collection_name, vector_dim=len(embeddings[0]), clean=clean)
        self.add_texts(collection_name, documents, embeddings)

    def add_texts(self, collection_name, documents: list[Document], embeddings: list[list[float]], batch_size:int = 128, **kwargs):
        """插入数据"""
        insert_dict_list = []
        for i in range(len(documents)):
            insert_dict = {
                'content': documents[i].content,
                'embedding': embeddings[i],
                'metadata': documents[i].metadata,
            }
            insert_dict_list.append(insert_dict)
        total_count = len(insert_dict_list)

        pks: list[str] = []
        for i in range(0, total_count, batch_size):
            batch_insert_list = insert_dict_list[i : i + batch_size]
            try:
                ids = self.client.insert(collection_name=collection_name, data=batch_insert_list)
                pks.extend(ids)
            except MilvusException as e:
                logger.exception("milvus插入数据失败: %s/%s", i, total_count)
                raise e
        return pks

    def fulltext_search(self, collection_name:str, query: str, top_k: int=10):
        collection_name = '_' + re.sub('[^\dA-Za-z]', '', collection_name)

        # BM25检索
        fulltext_search_result = self.client.search(
            collection_name=collection_name,
            data=[query],
            anns_field='sparse_bm25',
            search_params={
                'params': {'drop_ratio_search': 0.2},
            },
            limit=top_k,
            output_fields=['content', 'metadata']
        )

        # 从检索结果中抽取文本
        documents = []
        for hits in fulltext_search_result:
            for hit in hits:
                document = Document(content=hit['entity']['content'], metadata=hit['entity']['metadata'])
                document.distance = hit['distance']
                documents.append(document)
        return documents

    def hybrid_search(self, collection_name:str, query: str, top_k: int=10, **kwargs) -> list[Document]:
        """混合检索"""
        collection_name = '_' + re.sub('[^\dA-Za-z]', '', collection_name)
        # BM25检索
        request_bm25 = AnnSearchRequest(
            data=[query],
            anns_field="sparse_bm25",
            param={"metric_type": "BM25"},
            limit=top_k
        )

        # 向量检索
        query_embedding = self._embeddings.embed_query(query)
        request_dense = AnnSearchRequest(
            data=[query_embedding],
            anns_field= "embedding",
            param={ "metric_type": "IP", "params": {"nprobe": 2}},
            limit=top_k
        )

        # 基于RRF-ranking算法合并结果
        ranker = RRFRanker(100)
        hybrid_search_result = self.client.hybrid_search(
            collection_name=collection_name,
            reqs=[request_dense, request_bm25],
            ranker=ranker,
            limit=top_k,
            output_fields=['content', 'metadata']
        )

        # 从混合检索结果中抽取文本
        documents = []
        for hits in hybrid_search_result:
            for hit in hits:
                document = Document(content=hit['entity']['content'], metadata=hit['entity']['metadata'])
                document.distance = hit['distance']
                documents.append(document)
        return documents

    def fetch_documents(self, collection_name: str, batch_size: int = 128) -> list[Document]:
        """ 获取collection的所有文档 """

        iterator = self.client.query_iterator(
            collection_name=collection_name,
            batch_size=batch_size,
            output_fields=['content', 'metadata']
        )
        documents = []
        while True:
            batch = iterator.next()
            if not batch:
                iterator.close()
                break
            for item in batch:
                documents.append(
                    Document(content=item['content'], metadata=item['metadata'])
                )
        documents = sorted(documents, key=lambda doc: doc.metadata['block_index'])
        return documents


    def create_collection(self, collection_name, vector_dim: int, clean=True):
        """创建索引"""
        if clean:
            self.client.drop_collection(collection_name)

        if not self.client.has_collection(collection_name):
            schema = MilvusClient.create_schema(enable_dynamic_field=True,)
            schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True, auto_id=True)
            # 使用中文jieba分词, https://milvus.io/docs/zh/chinese-analyzer.md
            schema.add_field(field_name="content", datatype=DataType.VARCHAR, max_length=5000, enable_analyzer=True,
                             analyzer_params={
                                "tokenizer": "jieba",
                                "filter": ["cnalphanumonly"],
                                "type": "chinese"
                             }, enable_match=True)
            schema.add_field(field_name="metadata", datatype=DataType.JSON, max_length=20000)
            schema.add_field(field_name="sparse_bm25", datatype=DataType.SPARSE_FLOAT_VECTOR)
            schema.add_field(field_name="embedding", datatype=DataType.FLOAT_VECTOR, dim=vector_dim)
            bm25_function = Function(
                name="bm25",
                function_type=FunctionType.BM25,
                input_field_names=["content"],
                output_field_names="sparse_bm25",
            )
            schema.add_function(bm25_function)
            index_params = self.client.prepare_index_params()
            index_params.add_index(
                field_name="embedding",
                index_name="embedding_index",
                index_type="IVF_FLAT",
                metric_type="IP",
                params={"nlist": 128},
            )
            index_params.add_index(
                field_name="sparse_bm25",
                index_name="sparse_bm25_index",
                index_type="SPARSE_WAND",
                metric_type="BM25"
            )
            self.client.create_collection(
                collection_name=collection_name,
                schema=schema,
                index_params=index_params
            )
