import pickle
from milvus_model.sparse.bm25 import build_default_analyzer, BM25EmbeddingFunction
from pymilvus import MilvusClient
from pymilvus import connections, db, Collection, FieldSchema, CollectionSchema, DataType
from milvus.config import VEC_DIM, DENSE_INDEX_TYPE, METRIC_TYPE, EFC, M, SPARSE_INDEX_TYPE
from preprocess.data_utils import get_bge_embedding, get_costumed_features


# 创建Milvus集合
def create_collection(collection_name):
    print('Creating Collection ...')
    vector_dim = VEC_DIM
    text_max_len = 50000  # 定义文本字段的最大长度
    # 定义集合字段
    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="text_dense_vectors", dtype=DataType.FLOAT_VECTOR, dim=vector_dim),
        FieldSchema(name="name_dense_vectors", dtype=DataType.FLOAT_VECTOR, dim=vector_dim),
        FieldSchema(name="text_sparse_vectors", dtype=DataType.SPARSE_FLOAT_VECTOR),
        FieldSchema(name="name_sparse_vectors", dtype=DataType.SPARSE_FLOAT_VECTOR),
        FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=text_max_len),
        FieldSchema(name="name", dtype=DataType.VARCHAR, max_length=20000),
        FieldSchema(name="added_value", dtype=DataType.VARCHAR, max_length=text_max_len),
        FieldSchema(name="image_path", dtype=DataType.VARCHAR, max_length=25000),
        FieldSchema(name="type", dtype=DataType.VARCHAR, max_length=20),
    ]
    schema = CollectionSchema(fields=fields, enable_dynamic_field=False)
    collection = Collection(name=collection_name, schema=schema)
    # 创建索引
    collection.create_index("added_value", {
        'index_type': 'INVERTED'  # 用于快速前缀搜索和检索的树状数据结构。支持 VARCHAR 字段。
    })
    # 为密集向量字段创建索引
    collection.create_index("text_dense_vectors", {
        'index_type': DENSE_INDEX_TYPE,
        'metric_type': METRIC_TYPE,
        'params': {
            'efConstruction': EFC,  # 增加 efConstruction 参数可能会提高索引质量，但也往往会延长索引时间。
            'M': M  # M 定义图中传出连接的最大数量。在固定的 ef/efConstruction 下，M 越高，精度/运行时间就越高。
        }
    })
    collection.create_index("name_dense_vectors", {
        'index_type': DENSE_INDEX_TYPE,
        'metric_type': METRIC_TYPE,
        'params': {
            'efConstruction': EFC,  # 增加 efConstruction 参数可能会提高索引质量，但也往往会延长索引时间。
            'M': M  # M 定义图中传出连接的最大数量。在固定的 ef/efConstruction 下，M 越高，精度/运行时间就越高。
        }
    })
    # 为稀疏向量字段创建索引
    collection.create_index("text_sparse_vectors", {
        'index_type': SPARSE_INDEX_TYPE,
        'metric_type': METRIC_TYPE,
        'params': {"drop_ratio_build": 0.2},  # the ratio of small vector values to be dropped during indexing.
    })
    collection.create_index("name_sparse_vectors", {
        'index_type': SPARSE_INDEX_TYPE,
        'metric_type': METRIC_TYPE,
        'params': {"drop_ratio_build": 0.2},  # the ratio of small vector values to be dropped during indexing.
    })
    print('Completed!.')
    return collection


# 获取查询表达式
def get_expr(queries):
    query = get_costumed_features(queries)[0]  # 提取特征
    if len(query) == 0:
        return None
    # 使用分词生成查询表达式
    expr = " and ".join([f'added_value like "%{q}%"' for q in query.split('\n')])
    return expr


# Milvus操作类
class MilvusOP:
    def __init__(self, db_name='state_vector_db', collection_name="state_grid_db"):
        super().__init__()
        host = "127.0.0.1"
        port = "19530"
        uri = f'http://{host}:{port}'
        # 连接Milvus
        connections.connect(host=host, port=port)
        if db_name not in db.list_database():
            db.create_database(db_name)  # 如果数据库不存在，创建数据库
        db.using_database(db_name)
        # 初始化Milvus客户端
        self.client = MilvusClient(uri=uri, db_name=db_name)
        # 如果集合不存在，创建集合
        if collection_name not in self.client.list_collections():
            self.collection = create_collection(collection_name)
        else:
            self.collection = Collection(name=collection_name)
        self.collection.load()  # 加载集合数据
        self.output_fields = ["name", 'text', 'type']
        # 初始化BM25分析器和向量嵌入函数
        analyzer = build_default_analyzer(language="zh")  # en zh de jp
        analyzer('稀疏向量函数初始化...')
        self.text_bm25 = BM25EmbeddingFunction(analyzer)
        self.text_bm25.load(r'D:\Users\JHC258\projects\文本检索\vec_db_data\bm25_emf_path\text_bm25_ef.json')
        self.name_bm25 = BM25EmbeddingFunction(analyzer)
        self.name_bm25.load(r'D:\Users\JHC258\projects\文本检索\vec_db_data\bm25_emf_path\name_bm25_ef.json')

    # 标量搜索
    def scalar_search(self, queries, limit=10):
        query = get_costumed_features(queries)[0]
        if len(query) == 0:
            return {}
        # 生成表达式并执行查询
        expr = " and ".join([f'added_value like "%{q}%"' for q in query.split('\n')])
        res = self.collection.query(expr, output_fields=self.output_fields, limit=limit)
        return res

    # 插入数据
    def insert_data(self, db_data_path):
        with open(db_data_path, 'rb') as file:
            vb_data = pickle.load(file)
        self.collection.insert(vb_data)

    # 稀疏向量搜索
    def text_sparse_search(self, queries, limit=10, expr=None):
        search_params = {
            "metric_type": "IP",
            "params": {},
        }
        # 使用BM25编码查询并执行稀疏向量搜索
        hit_res = self.collection.search(
            self.text_bm25.encode_queries(queries),
            anns_field="text_sparse_vectors",
            limit=limit,
            output_fields=['name', 'type', 'text', 'image_path'],
            param=search_params,
            # expr=expr
        )
        return hit_res[0]

    # 名称稀疏向量搜索
    def name_sparse_search(self, queries, limit=10, expr=None):
        search_params = {
            "metric_type": "IP",
            "params": {},
        }
        hit_res = self.collection.search(
            self.name_bm25.encode_queries(queries),
            anns_field="name_sparse_vectors",
            limit=limit,
            output_fields=['name', 'type', 'text', 'image_path'],
            param=search_params,
            # expr=expr
        )
        return hit_res[0]

    # 名称密集向量搜索
    def name_dense_search(self, queries, limit=10, expr=None):
        search_params = {"metric_type": "IP", "params": {}}
        res = self.collection.search(
            get_bge_embedding(queries),
            anns_field="name_dense_vectors",
            limit=limit,
            output_fields=['name', 'type', 'text', 'image_path'],
            param=search_params,
            expr=expr
        )
        return res[0]

    # 文本密集向量搜索
    def text_dense_search(self, queries, limit=10, expr=None):
        search_params = {"metric_type": "IP", "params": {}}
        res = self.collection.search(
            get_bge_embedding(queries),
            anns_field="text_dense_vectors",
            limit=limit,
            output_fields=['name', 'type', 'text', 'image_path'],
            param=search_params,
            expr=expr
        )
        return res[0]

    # 混合搜索
    def hybrid_search(self, queries, limit=10, config=None):
        ctx_limit = 50  # 结果集上下文限制
        res = {}
        # 设置查询表达式，config用于配置各个搜索模式
        expr = get_expr(queries) if config['e'] > 0 else None
        if config is None:
            config = {'ns': 1, 'ts': 1, 'e': 1, 'td': 1}
        # 执行名称稀疏向量搜索并更新结果
        if config['ns'] > 0:
            name_sparse_res = self.name_sparse_search(queries, ctx_limit, expr)
            if len(name_sparse_res) > 0:
                nsr_max_score = max(name_sparse_res.distances)
                for item in name_sparse_res:
                    mid = item.id
                    score = item.score / nsr_max_score * config['ns']
                    if mid in res.keys():
                        res[mid]['score'] = res[mid]['score'] + score
                        res[mid]['remark'] = res[mid]['remark'] + '+NS'
                    else:
                        data_item = item.fields
                        data_item['id'] = item.id
                        data_item['remark'] = 'NS'
                        data_item['score'] = score
                        res[mid] = data_item
        if config['ts'] > 0:
            text_sparse_res = self.text_sparse_search(queries, ctx_limit, expr)
            if len(text_sparse_res) > 0:
                tsr_max_score = max(text_sparse_res.distances)
                for item in text_sparse_res:
                    mid = item.id
                    score = item.score / tsr_max_score * config['ts']
                    if mid in res.keys():
                        res[mid]['score'] = res[mid]['score'] + score
                        res[mid]['remark'] = res[mid]['remark'] + '+TS'
                    else:
                        data_item = item.fields
                        data_item['id'] = item.id
                        data_item['remark'] = 'TS'
                        data_item['score'] = score
                        res[mid] = data_item
        if config['td'] > 0:
            text_dense_res = self.text_dense_search(queries, ctx_limit, expr)
            if len(text_dense_res) > 0:
                tdr_max_score = max(text_dense_res.distances)
                for item in text_dense_res:
                    mid = item.id
                    score = item.score / tdr_max_score * config['td']
                    if mid in res.keys():
                        res[mid]['score'] = res[mid]['score'] + score
                        res[mid]['remark'] = res[mid]['remark'] + '+TD'
                    else:
                        data_item = item.fields
                        data_item['id'] = item.id
                        data_item['remark'] = 'TD'
                        data_item['score'] = score
                        res[mid] = data_item
        rerank_score = sorted(res.values(), key=lambda x: x['score'], reverse=True)
        return rerank_score[0:limit]

    # def search_id(self,ids):
    #     self.collection
