# app/services/search_service.py
import math
import pickle
from typing import List, Tuple

import torch
from sklearn.metrics.pairwise import cosine_similarity

from app.core.database import redis_client, chromadb_client
from app.utils.decorators import timing_decorator

# 全局搜索服务实例
search_service = None


class SearchService:
    def __init__(self, model, bge_rerank_model, bge_rerank_tokenizer, device):
        self.model = model
        self.bge_rerank_model = bge_rerank_model
        self.bge_rerank_tokenizer = bge_rerank_tokenizer
        self.device = device

    def normalize(self, score, min_val, max_val):
        return (score - min_val) / (max_val - min_val + 1e-8)

    @staticmethod
    def get_or_create_collection_with_metadata(client, name):
        try:
            collection = client.get_collection(name=name, embedding_function=None)
            return collection
        except Exception as e:
            # 如果集合不存在，则创建并添加 metadata
            from datetime import datetime
            created_at = datetime.now().isoformat()
            collection = client.create_collection(
                name=name,
                embedding_function=None,
                metadata={
                    "created_at": created_at
                }
            )
            return collection

    @staticmethod
    @timing_decorator
    def get_all_doc_ids_optimized(collection, batch_size=1000):
        doc_ids = []
        offset = 0
        while True:
            batch = collection.get(limit=batch_size, offset=offset, include=[])
            if not batch['ids']:
                break
            doc_ids.extend(batch['ids'])
            offset += batch_size
            # 如果文档数量很大，可以添加一个上限保护
            if offset > 100000:  # 限制最多处理10万条记录
                break
        return doc_ids

    @timing_decorator
    def compute_similarity_for_candidates(self, collection, doc_ids, dense_vecs, docs_text,
                                          query_dense, query_sparse, collection_name, alpha):
        if not doc_ids:
            return [], [], []

        top_results = []
        top_dense = []
        top_sparse = []

        for idx, doc_id in enumerate(doc_ids):
            # 从 Redis 获取稀疏向量
            try:
                redis_key = f"sparse_vector:{collection_name}:{doc_id}"
                sparse_vec_data = redis_client.get(redis_key)
                if not sparse_vec_data:
                    continue
                sparse_vec = pickle.loads(sparse_vec_data)
            except Exception as e:
                print(f"获取文档 {doc_id} 的稀疏向量时出错: {str(e)}")
                continue

            # 计算相似度
            score_dense = cosine_similarity([query_dense], [dense_vecs[idx]])[0][0]
            score_sparse = sum(query_sparse.get(k, 0.0) * v for k, v in sparse_vec.items())

            # 只有在混合检索时才需要归一化和加权
            score_dense_n = self.normalize(score_dense, -1, 1)
            score_sparse_n = self.normalize(score_sparse, 0, 1)
            # 使用默认的混合权重 0.7
            final_score = alpha * score_dense_n + (1 - alpha) * score_sparse_n

            top_results.append((final_score, doc_id, docs_text[idx]))
            top_dense.append((score_dense, doc_id))
            top_sparse.append((score_sparse, doc_id))

        return top_results, top_dense, top_sparse

    @timing_decorator
    def compute_similarity_for_docs(self, collection, doc_ids, query_dense, query_sparse, collection_name, search_type,
                                    alpha):
        if not doc_ids:
            return []

        batch_size = 100  # 根据内存容量调整
        top_results = []

        for i in range(0, len(doc_ids), batch_size):
            batch_ids = doc_ids[i:i + batch_size]
            # 获取稠密向量
            dense_results = collection.get(ids=batch_ids, include=["embeddings", "documents", "metadatas"])
            dense_vecs = dense_results["embeddings"]
            docs_text = dense_results["documents"]

            for idx, doc_id in enumerate(batch_ids):
                # 从 Redis 获取稀疏向量
                try:
                    redis_key = f"sparse_vector:{collection_name}:{doc_id}"
                    sparse_vec_data = redis_client.get(redis_key)
                    if not sparse_vec_data:
                        continue
                    sparse_vec = pickle.loads(sparse_vec_data)
                except Exception as e:
                    print(f"获取文档 {doc_id} 的稀疏向量时出错: {str(e)}")
                    continue

                # 计算相似度
                score_sparse = sum(query_sparse.get(k, 0.0) * v for k, v in sparse_vec.items())

                top_results.append((score_sparse, doc_id))

        return top_results

    @timing_decorator
    def hybrid_search(self, query, top_k=5, collection_name="my_knowledge", search_type="hybrid", alpha=0.7,
                      score_threshold=-100.0, rerank_method="default", fallback_strategy=True) -> List[
        Tuple[str, float]]:
        assert search_type in ["dense", "sparse", "hybrid"], "search_type 不合法"
        assert rerank_method in ["default", "bge", "rrf"], "rerank_method 不合法"
        result = self.model.encode([query], return_dense=True, return_sparse=True, return_colbert_vecs=True)

        query_dense = result["dense_vecs"][0]
        colbert_vecs = result["colbert_vecs"][0]
        lexical_weights = result["lexical_weights"][0]
        query_sparse = {token.tobytes(): weight for token, weight in zip(colbert_vecs, lexical_weights)}
        collection = chromadb_client.get_collection(name=collection_name)

        # 计算候选集大小：用户指定数据量的10倍，但不超过总文档数
        # 先获取总文档数
        total_count = collection.count()
        # 候选集大小为 top_k 的10倍，但不超过总文档数
        candidate_size = min(top_k * 10, total_count)

        # 根据搜索类型执行不同的检索策略
        if search_type == "sparse":
            # 稀疏检索：全量扫描
            doc_ids = self.get_all_doc_ids_optimized(collection)
            top_results = self.compute_similarity_for_docs(
                collection, doc_ids, query_dense, query_sparse, collection_name, search_type, alpha
            )
            # 直接返回稀疏相似度排序的结果
            top_results.sort(key=lambda x: x[0], reverse=True)  # 按稀疏分数排序
            return [(doc, score) for score, _, doc in top_results[:top_k]]

        elif search_type == "dense":
            # 稠密检索：使用 ChromaDB 自身的稠密向量检索
            dense_results = collection.query(
                query_embeddings=[query_dense.tolist()],
                n_results=top_k,  # 直接返回top_k个结果
                include=["embeddings", "documents", "metadatas", "distances"]
            )

            # 提取检索结果
            doc_ids = dense_results["ids"][0]
            docs_text = dense_results["documents"][0]
            distances = dense_results["distances"][0] if "distances" in dense_results else [0] * len(doc_ids)

            # 将距离转换为相似度分数（距离越小相似度越高）
            # 对于余弦距离，相似度 = 1 - distance
            scores = [1 - dist for dist in distances]

            # 直接返回稠密检索结果
            return [(docs_text[i], scores[i]) for i in range(len(doc_ids))]

        else:  # hybrid
            # 混合检索：使用稠密向量召回候选集
            dense_results = collection.query(
                query_embeddings=[query_dense.tolist()],
                n_results=candidate_size,
                include=["embeddings", "documents", "metadatas", "distances"]
            )

            # 提取检索结果
            doc_ids = dense_results["ids"][0]
            dense_vecs = dense_results["embeddings"][0]
            docs_text = dense_results["documents"][0]

            # 计算稀疏相似度并进行混合重排序
            top_results, top_dense, top_sparse = self.compute_similarity_for_candidates(
                collection, doc_ids, dense_vecs, docs_text, query_dense, query_sparse, collection_name, alpha
            )

            # === Rerank ===
            reranked_results = []

            if rerank_method == "default":
                top_results.sort(reverse=True)
                reranked_results = [(doc, score) for score, _, doc in top_results]

            elif rerank_method == "bge":
                docs = [doc for _, _, doc in top_results]
                scores = self.bge_rerank_batch(query, docs)
                reranked_results = [(doc, score) for score, doc in sorted(zip(scores, docs), reverse=True)]

            elif rerank_method == "rrf":
                # reciprocal rank fusion 重排序
                def to_rank_dict(pairs):  # score, id
                    sorted_ids = [doc_id for _, doc_id in sorted(pairs, reverse=True)]
                    return {doc_id: rank for rank, doc_id in enumerate(sorted_ids)}

                rank_dense = to_rank_dict(top_dense)
                rank_sparse = to_rank_dict(top_sparse)

                # 计算 RRF 分数
                rrf_scores = {}
                for doc_id in set(rank_dense) | set(rank_sparse):
                    r1 = rank_dense.get(doc_id, 10000)
                    r2 = rank_sparse.get(doc_id, 10000)
                    rrf_scores[doc_id] = 1 / (60 + r1) + 1 / (60 + r2)

                id_to_text = {doc_id: text for _, doc_id, text in top_results}
                reranked_results = [(id_to_text[doc_id], score) for doc_id, score in
                                    sorted(rrf_scores.items(), key=lambda x: x[1], reverse=True)]

            # 应用阈值过滤
            if score_threshold is not None:
                filtered_results = [(doc, score) for doc, score in reranked_results if score >= score_threshold]

                # 如果启用了回退策略且过滤后结果太少
                if fallback_strategy and len(filtered_results) < min(2, top_k) and reranked_results:
                    # 检查最高分是否足够好
                    best_score = reranked_results[0][1]

                    # 根据方法设定最低可接受分数
                    min_acceptable = 0.01 if rerank_method == "bge" else 0.003 if rerank_method == "rrf" else 0.3
                    if best_score >= min_acceptable:
                        # 返回最高分的几个结果而不是空结果
                        filtered_results = reranked_results[:min(top_k, 3)]
                    else:
                        # 分数太低，返回空结果
                        filtered_results = []
            else:
                # 没有阈值限制，直接返回top_k
                filtered_results = reranked_results[:top_k]

            print("最终结果：", filtered_results[:top_k])
            return filtered_results[:top_k]

    @timing_decorator
    def bge_rerank_batch(self, query: str, documents: List[str]) -> List[float]:
        inputs = self.bge_rerank_tokenizer(
            [query] * len(documents),
            documents,
            return_tensors="pt",
            padding=True,
            truncation=True,
        ).to(self.device)

        # 进入评估模式（torch.no_grad()）
        with torch.no_grad():
            scores = self.bge_rerank_model(**inputs).logits.squeeze(-1)
        scores = scores.cpu().tolist()

        # 使用sigmoid函数归一化到0-1范围
        normalized_scores = [1 / (1 + math.exp(-score)) for score in scores]

        return normalized_scores


def init_search_service(model, rerank_model, rerank_tokenizer, redis_client, chromadb_client):
    """
    初始化搜索服务
    """
    global search_service
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    search_service = SearchService(model, rerank_model, rerank_tokenizer, device)
    print(search_service)
    print("搜索服务初始化完成")
