from typing import List, Dict, Optional, Any, Union
import chromadb
from chromadb.api.models.Collection import Collection

import torch
from transformers import AutoModel, AutoTokenizer
from FlagEmbedding import FlagReranker

from config import CHROMA_HOST, CHROMA_PORT, COLLECTION_NAME, BGE_LARGE_EMBEDDING_MODEL, BGE_RERANKER_EMBEDDING_MODEL


class BGEEncoder:
    """BGE-Large 编码器，用于生成文档向量"""

    def __init__(self, model_name: str = "BAAI/bge-large-zh"):
        self.model_name = model_name
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModel.from_pretrained(model_name)
        self.model.eval()

    def encode(self, texts: List[str], batch_size: int = 32) -> List[List[float]]:
        """编码文本为向量"""
        all_embeddings = []

        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]

            # 为检索任务添加指令
            instruction = "为这个句子生成表示以用于检索相关文章："
            batch_with_instruction = [instruction + text for text in batch_texts]

            inputs = self.tokenizer(
                batch_with_instruction,
                padding=True,
                truncation=True,
                return_tensors="pt",
                max_length=512
            )

            with torch.no_grad():
                outputs = self.model(**inputs)
                embeddings = outputs.last_hidden_state[:, 0]  # 使用[CLS] token
                embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
                all_embeddings.extend(embeddings.cpu().numpy().tolist())

        return all_embeddings

    def encode_query(self, query: str) -> List[float]:
        """编码查询文本为向量"""
        instruction = "为这个句子生成表示以用于检索相关文章："
        text_with_instruction = instruction + query

        inputs = self.tokenizer(
            text_with_instruction,
            padding=True,
            truncation=True,
            return_tensors="pt",
            max_length=512
        )

        with torch.no_grad():
            outputs = self.model(**inputs)
            embedding = outputs.last_hidden_state[:, 0]
            embedding = torch.nn.functional.normalize(embedding, p=2, dim=1)
            return embedding.cpu().numpy()[0].tolist()


class BGEReranker:
    """BGE-Reranker 重排序器，用于精排"""

    def __init__(self, model_name: str = "BAAI/bge-reranker-base"):
        self.reranker = FlagReranker(model_name, use_fp16=True)  # 使用fp16加速

    def rerank(self, query: str, documents: List[str], top_k: int = 5) -> List[Dict[str, Any]]:
        """对文档进行重排序"""
        if not documents:
            return []

        # 构建查询-文档对
        pairs = [[query, doc] for doc in documents]

        # 计算相关性分数
        with torch.no_grad():
            scores = self.reranker.compute_score(pairs, normalize=True)

        # 组合结果并排序
        scored_docs = []
        for i, (doc, score) in enumerate(zip(documents, scores)):
            scored_docs.append({
                "document": doc,
                "score": float(score),
                "original_rank": i
            })

        # 按分数降序排序
        scored_docs.sort(key=lambda x: x["score"], reverse=True)

        return scored_docs[:top_k]


class ChromaClient:
    def __init__(self):
        self.client = chromadb.HttpClient(host=CHROMA_HOST, port=CHROMA_PORT)
        self.collection: Collection = self.client.get_or_create_collection(COLLECTION_NAME)

        # 初始化BGE模型
        self.encoder = BGEEncoder(BGE_LARGE_EMBEDDING_MODEL)
        self.reranker = BGEReranker(BGE_RERANKER_EMBEDDING_MODEL)

    def add_item(self, item_id: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
        """添加项目到集合，使用BGE-Large生成向量"""
        # 使用BGE-Large生成嵌入向量
        embedding = self.encoder.encode([content])[0]

        self.collection.upsert(
            ids=[item_id],
            documents=[content],
            metadatas=[metadata] if metadata else None,
            embeddings=[embedding]
        )

    def get_item(self, item_id: str) -> Optional[Dict[str, Any]]:
        """获取项目"""
        result = self.collection.get(ids=[item_id])
        if not result["ids"]:
            return None

        return {
            "id": result["ids"][0],
            "content": result["documents"][0],
            "metadata": result["metadatas"][0] if result["metadatas"] else None
        }

    def get_items_by_metadata(self, metadata_filter: Dict[str, Any]) -> List[Dict[str, Any]]:
        """根据元数据过滤获取项目"""
        results = self.collection.get(where=metadata_filter)
        items = []
        for i in range(len(results["ids"])):
            items.append({
                "id": results["ids"][i],
                "content": results["documents"][i],
                "metadata": results["metadatas"][i] if results["metadatas"] else None
            })
        return items

    def get_all_items(self) -> List[Dict[str, Any]]:
        """获取所有项目"""
        results = self.collection.get()
        items = []
        for i in range(len(results["ids"])):
            items.append({
                "id": results["ids"][i],
                "content": results["documents"][i],
                "metadata": results["metadatas"][i] if results["metadatas"] else None
            })
        return items

    def delete_item(self, item_id: str) -> None:
        """删除项目"""
        self.collection.delete(ids=[item_id])

    def query(self,
              query_embedding: List[float],
              n_results: int = 5,
              metadata_filter: Optional[Dict[str, Any]] = None,
              document_filter: Optional[str] = None) -> List[str]:
        """
        查询相似项目，保持原有接口（只使用向量检索）

        Args:
            query_embedding: 查询向量
            n_results: 返回结果数量
            metadata_filter: 元数据过滤条件
            document_filter: 文档内容过滤

        Returns:
            List[str]: 文档内容列表
        """
        # 构建查询参数
        query_kwargs = {
            "query_embeddings": [query_embedding],
            "n_results": n_results,
            "include": ["documents"]
        }

        # 添加元数据过滤
        if metadata_filter:
            query_kwargs["where"] = metadata_filter

        # 添加文档内容过滤
        if document_filter:
            query_kwargs["where_document"] = {"$contains": document_filter}

        results = self.collection.query(**query_kwargs)

        # 保持原有返回格式
        return results["documents"][0] if results["documents"] else []

    def query_with_reranker(self,
                            query_text: str,
                            n_results: int = 5,
                            metadata_filter: Optional[Dict[str, Any]] = None,
                            document_filter: Optional[str] = None) -> List[str]:
        """
        使用BGE-Large粗召回 + BGE-Reranker精排的新查询方法

        Args:
            query_text: 查询文本
            n_results: 返回结果数量
            metadata_filter: 元数据过滤条件
            document_filter: 文档内容过滤

        Returns:
            List[str]: 文档内容列表
        """
        # 使用BGE-Large生成查询向量
        query_embedding = self.encoder.encode_query(query_text)

        # 构建查询参数 - 粗召回100个结果
        query_kwargs = {
            "query_embeddings": [query_embedding],
            "n_results": 100,  # 粗召回更多结果用于精排
            "include": ["documents", "metadatas", "distances"]
        }

        # 添加元数据过滤
        if metadata_filter:
            query_kwargs["where"] = metadata_filter

        # 添加文档内容过滤
        if document_filter:
            query_kwargs["where_document"] = {"$contains": document_filter}

        results = self.collection.query(**query_kwargs)

        if not results["documents"] or not results["documents"][0]:
            return []

        documents = results["documents"][0]

        # 如果结果数量很少，直接返回
        if len(documents) <= n_results:
            return documents[:n_results]

        # 使用BGE-Reranker进行精排
        reranked_results = self.reranker.rerank(query_text, documents, top_k=n_results)

        # 返回重排序后的文档内容
        return [result["document"] for result in reranked_results]

    def query_with_scores(self,
                          query_text: str,
                          n_results: int = 5,
                          metadata_filter: Optional[Dict[str, Any]] = None,
                          document_filter: Optional[str] = None) -> List[Dict[str, Any]]:
        """
        查询相似项目并返回带分数的结果

        Returns:
            List[Dict]: 包含文档内容和分数的列表
        """
        # 使用BGE-Large生成查询向量
        query_embedding = self.encoder.encode_query(query_text)

        # 粗召回100个结果
        query_kwargs = {
            "query_embeddings": [query_embedding],
            "n_results": 100,
            "include": ["documents", "metadatas", "distances"]
        }

        if metadata_filter:
            query_kwargs["where"] = metadata_filter

        if document_filter:
            query_kwargs["where_document"] = {"$contains": document_filter}

        results = self.collection.query(**query_kwargs)

        if not results["documents"] or not results["documents"][0]:
            return []

        documents = results["documents"][0]
        distances = results["distances"][0] if results["distances"] else [1.0] * len(documents)

        # 使用BGE-Reranker进行精排
        reranked_results = self.reranker.rerank(query_text, documents, top_k=n_results)

        # 构建带分数的结果
        scored_results = []
        for result in reranked_results:
            scored_results.append({
                "content": result["document"],
                "reranker_score": result["score"],
                "original_rank": result["original_rank"],
                "vector_distance": distances[result["original_rank"]] if result["original_rank"] < len(
                    distances) else 1.0
            })

        return scored_results

    def query_by_type(self,
                      query_embedding: List[float],
                      type_name: str,
                      n_results: int = 5,
                      title_contains: Optional[str] = None,
                      related_contains: Optional[str] = None) -> List[str]:
        """
        按类型过滤查询，保持原有接口

        Args:
            query_embedding: 查询向量
            type_name: 类型名称
            n_results: 返回结果数量
            title_contains: 标题包含的关键词
            related_contains: related字段包含的关键词

        Returns:
            List[str]: 文档内容列表
        """
        # 构建元数据过滤条件
        metadata_filter = {"type": type_name}

        if title_contains:
            metadata_filter["title"] = {"$contains": title_contains}

        if related_contains:
            metadata_filter["related"] = {"$contains": related_contains}

        return self.query(
            query_embedding=query_embedding,
            n_results=n_results,
            metadata_filter=metadata_filter
        )

    def query_by_type_with_reranker(self,
                                    query_text: str,
                                    type_name: str,
                                    n_results: int = 5,
                                    title_contains: Optional[str] = None,
                                    related_contains: Optional[str] = None) -> List[str]:
        """
        按类型过滤查询，使用新的检索流程

        Args:
            query_text: 查询文本
            type_name: 类型名称
            n_results: 返回结果数量
            title_contains: 标题包含的关键词
            related_contains: related字段包含的关键词

        Returns:
            List[str]: 文档内容列表
        """
        # 构建元数据过滤条件
        metadata_filter = {"type": type_name}

        if title_contains:
            metadata_filter["title"] = {"$contains": title_contains}

        if related_contains:
            metadata_filter["related"] = {"$contains": related_contains}

        return self.query_with_reranker(
            query_text=query_text,
            n_results=n_results,
            metadata_filter=metadata_filter
        )

    def get_items_by_type(self, type_name: str) -> List[Dict[str, Any]]:
        """根据类型获取所有项目"""
        return self.get_items_by_metadata({"type": type_name})

    def batch_add_items(self, items: List[Dict[str, Any]]) -> None:
        """批量添加项目"""
        if not items:
            return

        ids = [item["id"] for item in items]
        documents = [item["content"] for item in items]
        metadatas = [item.get("metadata") for item in items]

        # 批量生成嵌入向量
        embeddings = self.encoder.encode(documents)

        self.collection.upsert(
            ids=ids,
            documents=documents,
            metadatas=metadatas,
            embeddings=embeddings
        )


# 创建单例实例
chroma_client = ChromaClient()