import datetime
import math
import redis
import os
import sqlite3
import chromadb
import uuid
import pickle
import docx2txt
import fitz
import torch
import json
import contextlib
import time

from functools import wraps
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from fastapi import FastAPI, UploadFile, Depends, File, HTTPException, Form
from fastapi.middleware.cors import CORSMiddleware
from fastapi.security import APIKeyHeader
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
from typing import List, Optional, Dict, Tuple, Literal, Set
from FlagEmbedding import BGEM3FlagModel
from sklearn.metrics.pairwise import cosine_similarity
from openai import OpenAI

API_ERROR_OUTPUT = "API 调用失败"
API_MAX_RETRY = 3
API_KEY = "your_shared_secret_key"  # 密钥
api_key_header = APIKeyHeader(name="X-API-KEY", auto_error=True)
# Redis 连接
redis_client = redis.Redis(host='localhost', port=6379, db=0, decode_responses=False)


# 鉴权函数
# 可以在接口中如此使用：async def query(query: Query, api_key: str = Depends(verify_api_key)):
# 即增加Depends(function_name)参数，来进行鉴权
# 进一步，可以将API_KEY定义为较复杂的，并且放在环境变量或者配置中心中动态获取，避免直接写在代码中造成安全问题
async def verify_api_key(api_key: str = Depends(api_key_header)):
    if api_key != API_KEY:
        raise HTTPException(
            status_code=403,
            detail="Invalid API key",
            headers={"WWW-Authenticate": "Bearer"},
        )


@contextlib.contextmanager
def get_db_connection(db_path, timeout=20.0):
    conn = sqlite3.connect(db_path, timeout=timeout)
    try:
        yield conn
    except Exception:
        conn.rollback()
        raise
    else:
        conn.commit()
    finally:
        conn.close()


app = FastAPI()

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
    expose_headers=["*"]  # 重要：暴露所有响应头
)

# 创建ChromaDB客户端
chromadb_client = chromadb.PersistentClient(path="./chroma_db")

# 加载模型
model = BGEM3FlagModel("../models/bge-m3", use_fp16=True)  # 支持GPU推理
# 全局加载 rerank 模型
bge_rerank_tokenizer = None
bge_rerank_model = None
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def init_rerank_model():
    global bge_rerank_tokenizer, bge_rerank_model
    model_path = "../models/bge-reranker-v2-m3"
    bge_rerank_tokenizer = AutoTokenizer.from_pretrained(model_path)
    bge_rerank_model = AutoModelForSequenceClassification.from_pretrained(model_path).eval().to(device)


def timing_decorator(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        start_time = time.perf_counter()
        result = func(*args, **kwargs)
        end_time = time.perf_counter()
        elapsed_time = end_time - start_time
        print(f"{func.__name__} 执行时间: {elapsed_time:.4f} 秒")
        return result

    return wrapper


# 异步函数的计时装饰器
def async_timing_decorator(func):
    @wraps(func)
    async def wrapper(*args, **kwargs):
        start_time = time.perf_counter()
        result = await func(*args, **kwargs)
        end_time = time.perf_counter()
        elapsed_time = end_time - start_time
        print(f"{func.__name__} 执行时间: {elapsed_time:.4f} 秒")
        return result

    return wrapper


# 启动时初始化
init_rerank_model()


class SimpleQuery(BaseModel):
    keyword: str
    topK: Optional[int] = 3
    collectionName: Optional[str] = "my_knowledge"


class Query(BaseModel):
    keyword: str
    topK: int
    collectionName: str
    searchMethod: Literal["dense", "sparse", "hybrid"] = "hybrid"
    rerankWeight: Optional[float] = 0.7
    rerankMethod: Literal["default", "bge", "rrf"] = "bge"
    scoreThreshold: Optional[float] = 0.5
    thinking: Optional[bool] = False


class Rerank(BaseModel):
    query: str
    documents: List[str]


class Document(BaseModel):
    text: str
    metadata: Optional[Dict] = None
    collection_name: Optional[str] = "my_knowledge"


class DocumentBatch(BaseModel):
    documents: List[Document]


@app.get("/")
async def root(api_key: str = Depends(verify_api_key)):
    """
    测试服务器存活情况
    :return: str
    """
    return {"message": "Hello World"}


@app.post("/add_document")
@async_timing_decorator
async def add_document(doc: Document, api_key: str = Depends(verify_api_key)):
    """
    添加一个文档到知识库。
    """
    # 嵌入后，输出的是二维list，第一层是batch维度，即不同句子的向量，第二层才是嵌入向量
    # tolist是把numpy array转换维原生的list[float]
    result = model.encode([doc.text], return_dense=True, return_sparse=True, return_colbert_vecs=True)
    dense_vec = result["dense_vecs"][0].tolist()
    colbert_vecs_batch = result["colbert_vecs"]
    lexical_weights_batch = result["lexical_weights"]

    sparse_vecs = [
        {token.tobytes(): float(weight) for token, weight in zip(colbert_vecs, lexical_weights)}
        for colbert_vecs, lexical_weights in zip(colbert_vecs_batch, lexical_weights_batch)
    ]

    sparse_vec = sparse_vecs[0]  # 因为是单条文本，取第一个

    collection = get_or_create_collection_with_metadata(chromadb_client, name=doc.collection_name)

    # 使用UUID生成文档ID
    doc_id = str(uuid.uuid4())

    # 增强metadata信息，包括来源和时间戳
    metadata = doc.metadata or {}
    metadata["source_type"] = metadata.get("source_type", "manual_input")
    metadata["uploaded_at"] = datetime.datetime.now().isoformat()
    # 如果是通过文件上传的，应该包含文件名信息
    if "filename" not in metadata and metadata["source_type"] == "manual_input":
        metadata["filename"] = "manual_input"
        metadata["doc_start_index"] = "0"
        metadata["doc_end_index"] = "0"

    collection.add(
        ids=[doc_id],
        embeddings=[dense_vec],
        documents=[doc.text],
        metadatas=[metadata]
    )
    # 存储稀疏向量到 SQLite
    sparse_db_path = f"sparse_db/{doc.collection_name}.sparse.db"
    os.makedirs(os.path.dirname(sparse_db_path), exist_ok=True)

    # 存储稀疏向量到 Redis
    try:
        redis_key = f"sparse_vector:{doc.collection_name}:{doc_id}"
        await redis_client.set(redis_key, pickle.dumps(sparse_vec))
    except Exception as e:
        collection.delete(ids=[doc_id])
        raise HTTPException(status_code=500, detail=f"稀疏向量存储失败: {str(e)}")

    return {
        "status": "success",
        "id": doc_id,
        "sparse_vector_stored": True
    }


@app.delete("/delete_document")
async def delete_document(doc_id: str, collection_name: str = "my_knowledge", api_key: str = Depends(verify_api_key)):
    """
    删除一个文档。
    """
    collection = chromadb_client.get_collection(name=collection_name)
    collection.delete(ids=[doc_id])
    # 删除 Redis 中的稀疏向量
    try:
        redis_key = f"sparse_vector:{collection_name}:{doc_id}"
        await redis_client.delete(redis_key)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"删除稀疏向量失败: {str(e)}")

    return {"status": "success"}


@app.post("/simple_query")
async def simple_query(query: SimpleQuery, api_key: str = Depends(verify_api_key)):
    """
    简单查询，默认使用参数：混合检索，默认rerank方法，权重0.7，阈值0.5
    """
    return {"results": hybrid_search(query.keyword, query.topK, query.collectionName)}


@app.post("/query")
@async_timing_decorator
async def query(query: Query, api_key: str = Depends(verify_api_key)):
    """
    高级查询，支持BGE、RRF、Sparse、Dense、Hybrid
    """
    print(query)
    return {"results": hybrid_search(query.keyword, query.topK, query.collectionName, query.searchMethod,
                                     query.rerankWeight, query.scoreThreshold, query.rerankMethod)}


@app.post("/rerank")
@async_timing_decorator
def rerank(req: Rerank, api_key: str = Depends(verify_api_key)):
    """
    测试bge重排序效果
    """
    scores = bge_rerank_batch(req.query, req.documents)
    reranked = sorted(zip(scores, req.documents), reverse=True)
    return [{"document": doc, "score": score} for score, doc in reranked]


@app.get("/get_document")
async def get_document(doc_id: str, collection_name: str = "my_knowledge", api_key: str = Depends(verify_api_key)):
    """
    通过 GET 请求获取指定 ID 的文档内容。
    """
    ids = doc_id.split(",")
    collection = chromadb_client.get_collection(name=collection_name)

    # 从 ChromaDB 获取文档
    result = collection.get(ids=ids, include=["documents", "metadatas"])

    if not result['ids']:
        raise HTTPException(status_code=404, detail="未找到指定文档")

    results = []
    for i in range(len(result['ids'])):
        results.append({
            "id": result['ids'][i],
            "text": result['documents'][i],
            "collection_name": collection_name,
            "metadata": result['metadatas'][i]
        })
    return results


@app.post("/upload")
@async_timing_decorator
async def upload(
        file: List[UploadFile] = File(...),
        chunk_size: int = Form(...),
        chunk_overlap: int = Form(...),
        collection_name: str = Form(...),
        api_key: str = Depends(verify_api_key)
):
    """
    上传多个文件， 要求每个文件小于10M 小于50个文件，总大小小于500M，类型为doc docx txt pdf格式
    """
    error_response = await validate_files(file)
    if error_response:
        return error_response
    try:
        await embed_and_store_files(file, chunk_size, chunk_overlap, collection_name)
        return JSONResponse(content={"status": "success", "message": f"{len(file)}个文件上传成功"}, status_code=200)
    except Exception as e:
        # 记录详细错误日志
        import traceback
        error_detail = f"文件上传失败: {str(e)}\n{traceback.format_exc()}"
        print(error_detail)  # 或者使用日志系统
        return JSONResponse(content={"status": "error", "message": str(e), "detail": error_detail}, status_code=500)


@app.get("/collection_info")
async def collection_info(collection_name: Optional[str] = None, api_key: str = Depends(verify_api_key)):
    """
    获取知识库详细信息，支持单个/全部集合查询。
    返回字段：
    - name: 集合名称
    - count: 文档数量
    - dimension: 向量维度
    - distance_function: 距离计算方法
    - storage_path: 持久化路径
    - sparse_db: 稀疏向量库信息
    - created_at: 创建时间
    """
    if collection_name:
        try:
            collection = chromadb_client.get_collection(name=collection_name)
        except ValueError:
            raise HTTPException(status_code=404, detail="Collection not found")

            # 获取稀疏向量数据库信息 (Redis)
        sparse_info = {
            "type": "Redis",
            "host": "localhost",
            "port": 6379,
            "db": 0
        }

        # 尝试获取向量数量
        try:
            pattern = f"sparse_vector:{collection_name}:*"
            sparse_info["vector_count"] = len(list(redis_client.scan_iter(match=pattern)))
        except Exception as e:
            sparse_info["vector_count"] = -1  # 表示查询失败

        source_files = []
        if collection.metadata and "source_files" in collection.metadata:
            source_files = collection.metadata["source_files"].split(",")

        return {
            "name": collection.name,
            "count": collection.count(),
            "dimension": 1024,  # BGE-M3模型输出维度
            "distance_function": collection.metadata.get("distance_function_name", "cosine"),
            "storage_path": "./chroma_db",
            "sparse_db": sparse_info,
            "created_at": collection.metadata.get("created_at", "unknown"),
            "update_at": collection.metadata.get("update_at", "unknown"),
            "source_files": source_files
        }
    else:
        # 返回所有集合信息列表
        collections = chromadb_client.list_collections()
        return [{"name": col.name, "count": col.count()} for col in collections]


@app.delete("/delete_collection")
async def delete_collection(collection_name: str, api_key: str = Depends(verify_api_key)):
    """
    清空删除知识库
    """
    try:
        chromadb_client.delete_collection(name=collection_name)

        # 删除 Redis 中该集合的所有稀疏向量
        try:
            pattern = f"sparse_vector:{collection_name}:*"
            keys = list(redis_client.scan_iter(match=pattern))
            if keys:
                await redis_client.delete(*keys)
        except Exception as e:
            print(f"警告：删除Redis中的稀疏向量时出错: {str(e)}")

        return {"status": "success", "message": f"集合 {collection_name} 已删除"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/list_documents")
async def list_documents(
        collection_name: str = "my_knowledge",
        start: Optional[str] = None,
        limit: int = 5,
        api_key: str = Depends(verify_api_key)
):
    """
    顺序获取数据库中的文档块
    排序规则：
    1. 按上传时间 (uploaded_at)
    2. 按文件名 (filename)
    3. 按索引 (global_index)
    """
    print(f"Listing documents for collection {collection_name} start {start}")
    try:
        collection = chromadb_client.get_collection(name=collection_name)
    except Exception as e:
        raise HTTPException(status_code=404, detail=f"Collection {collection_name} not found")

    # 获取所有文档ID和元数据
    all_docs = collection.get(include=["metadatas"], limit=None)
    doc_infos = []

    for i, doc_id in enumerate(all_docs['ids']):
        metadata = all_docs['metadatas'][i] if all_docs['metadatas'] else {}
        doc_infos.append({
            'id': doc_id,
            'metadata': metadata
        })

    # 排序逻辑：首先按上传时间，其次按文件名，最后按全局索引
    def sort_key(doc_info):
        metadata = doc_info['metadata']
        # 获取上传时间，如果没有则使用默认值
        uploaded_at = metadata.get('uploaded_at') or ''
        # 获取文件名，如果没有则使用默认值
        filename = metadata.get('filename', '')
        # 获取全局索引，如果没有则使用默认值
        global_index_str = metadata.get('global_index', '0')
        # 将global_index转换为整数进行排序
        try:
            global_index = int(global_index_str)
        except (ValueError, TypeError):
            global_index = 0
        return uploaded_at, filename, global_index

    # 对文档进行排序
    sorted_doc_infos = sorted(doc_infos, key=sort_key)
    sorted_ids = [doc_info['id'] for doc_info in sorted_doc_infos]

    # 确定起始位置
    start_index = 0
    if start:
        try:
            start_index = sorted_ids.index(start) + 1
        except ValueError:
            # 如果指定的start ID不存在，则从头开始
            pass

    # 获取指定范围的文档
    end_index = min(start_index + limit, len(sorted_ids))
    selected_ids = sorted_ids[start_index:end_index]

    # 获取文档内容
    if selected_ids:
        result = collection.get(
            ids=selected_ids,
            include=["documents", "metadatas"]
        )

        # 重新排序结果以匹配请求的顺序
        id_to_result = {}
        for i in range(len(result['ids'])):
            id_to_result[result['ids'][i]] = {
                "id": result['ids'][i],
                "text": result['documents'][i],
                "metadata": result['metadatas'][i]
            }

        # 按照selected_ids的顺序组织文档
        documents = [id_to_result[doc_id] for doc_id in selected_ids]
    else:
        documents = []

    return {
        "documents": documents,
        "has_more": end_index < len(sorted_ids),
        "next_start": selected_ids[-1] if selected_ids else None,
        "total_count": len(sorted_ids)
    }


# 工具函数：切分文本
def split_text(text, chunk_size=500, chunk_overlap=100):
    chunks = []
    start = 0
    while start < len(text):
        end = start + chunk_size
        chunks.append(text[start:end])
        start += chunk_size - chunk_overlap
    return chunks


# 获取或创建集合，并自动添加创建时间元数据
def get_or_create_collection_with_metadata(client, name):
    try:
        collection = client.get_collection(name=name, embedding_function=None)
        return collection
    except Exception as e:
        # 如果集合不存在，则创建并添加 metadata
        created_at = datetime.datetime.now().isoformat()
        collection = client.create_collection(
            name=name,
            embedding_function=None,
            metadata={
                "created_at": created_at
            }
        )
        return collection


def read_file_content(upload_file: UploadFile) -> str:
    filename = upload_file.filename.lower()
    file_bytes = upload_file.file.read()  # 只读一次

    if filename.endswith(".txt"):
        return file_bytes.decode("utf-8")

    elif filename.endswith((".doc", ".docx")):
        temp_path = f"/tmp/{uuid.uuid4()}.docx"
        with open(temp_path, "wb") as f:
            f.write(file_bytes)
        text = docx2txt.process(temp_path)
        os.remove(temp_path)
        return text

    elif filename.endswith(".pdf"):
        doc = fitz.open(stream=file_bytes, filetype="pdf")
        return "".join([page.get_text() for page in doc])

    else:
        raise ValueError("Unsupported file format")


async def validate_files(files: List[UploadFile]) -> JSONResponse | None:
    if len(files) > 50:
        return JSONResponse(content={"status": "error", "message": "最多支持上传50个文件"}, status_code=400)

    total_size = 0
    MAX_FILE_SIZE = 10 * 1024 * 1024
    MAX_TOTAL_SIZE = 500 * 1024 * 1024
    ALLOWED_EXTENSIONS = {".txt", ".doc", ".docx", ".pdf"}  # 允许的文件扩展名

    for f in files:
        # 获取文件扩展名
        filename = f.filename.lower() if f.filename else ""
        if not filename:
            return JSONResponse(
                content={"status": "error", "message": "文件名不能为空"},
                status_code=400
            )

        # 检查文件扩展名
        ext = os.path.splitext(filename)[1]
        if ext not in ALLOWED_EXTENSIONS:
            return JSONResponse(
                content={
                    "status": "error",
                    "message": f"不支持的文件类型: {filename}。仅支持: txt, doc, docx, pdf"
                },
                status_code=400
            )

        contents = await f.read()
        file_size = len(contents)
        f.file.seek(0)  # 重置文件指针，以便后续读取

        if file_size > MAX_FILE_SIZE:
            return JSONResponse(
                content={"status": "error", "message": f"单个文件大小不能超过10MB: {f.filename}"},
                status_code=400
            )
        total_size += file_size

    if total_size > MAX_TOTAL_SIZE:
        return JSONResponse(
            content={"status": "error", "message": f"总文件大小不能超过500MB，当前为{total_size / (1024 * 1024):.2f}MB"},
            status_code=400
        )

    return None  # 校验通过


# 文档入库
async def embed_and_store_files(files: List[UploadFile], chunk_size, chunk_overlap,
                                collection_name="my_knowledge"):
    collection = get_or_create_collection_with_metadata(chromadb_client, name=collection_name)

    # 批量处理，避免长时间持有数据库连接
    batch_size = 10  # 每10个文档块一批处理
    total_processed = 0

    for file_index, upload_file in enumerate(files):
        text = read_file_content(upload_file)
        chunks = split_text(text, chunk_size, chunk_overlap)

        # 记录整个文档的起始和结束编号
        doc_start_index = total_processed
        doc_end_index = total_processed + len(chunks) - 1

        # 创建文件相关的元数据
        file_metadata = {
            "source_type": "file_upload",
            "filename": upload_file.filename,
            "uploaded_at": datetime.datetime.now().isoformat(),
            "doc_start_index": str(doc_start_index),
            "doc_end_index": str(doc_end_index)
        }

        # 分批处理chunks
        for i in range(0, len(chunks), batch_size):
            batch_chunks = chunks[i:i + batch_size]

            # 对每批进行编码和存储
            result = model.encode(batch_chunks, return_dense=True, return_sparse=True, return_colbert_vecs=True)
            dense_vecs = result["dense_vecs"].tolist()

            colbert_vecs_batch = result["colbert_vecs"]
            lexical_weights_batch = result["lexical_weights"]

            sparse_vecs = [
                {token.tobytes(): float(weight) for token, weight in zip(colbert_vecs, lexical_weights)}
                for colbert_vecs, lexical_weights in zip(colbert_vecs_batch, lexical_weights_batch)
            ]

            if not dense_vecs or not sparse_vecs:
                raise ValueError("模型输出为空，请检查输入内容或模型状态")

            # 批量存储
            try:
                # ChromaDB 批量添加
                ids_batch = [str(uuid.uuid4()) for _ in range(len(batch_chunks))]
                # 为每个chunk创建元数据
                metadata_batch = []
                for j in range(len(batch_chunks)):
                    chunk_metadata = file_metadata.copy()
                    # 添加chunk在当前文件中的索引
                    chunk_metadata["chunk_index"] = str(i + j)
                    # 添加在整个文档集合中的全局索引
                    chunk_metadata["global_index"] = str(total_processed + i + j)
                    metadata_batch.append(chunk_metadata)

                collection.add(
                    ids=ids_batch,
                    documents=batch_chunks,
                    embeddings=dense_vecs,
                    metadatas=metadata_batch
                )

                # Redis 批量存储稀疏向量
                pipe = redis_client.pipeline()
                for j, sparse_vec in enumerate(sparse_vecs):
                    redis_key = f"sparse_vector:{collection_name}:{ids_batch[j]}"
                    pipe.set(redis_key, pickle.dumps(sparse_vec))
                pipe.execute()

                total_processed += len(batch_chunks)
                print(f"已处理 {total_processed} 个文档块")

            except Exception as e:
                raise HTTPException(status_code=500, detail=f"处理文档块时出错: {str(e)}")

    print(f"✅ 文档入库成功：共处理 {total_processed} 个文档块")


def normalize(score, min_val, max_val):
    return (score - min_val) / (max_val - min_val + 1e-8)


# 检索接口
@timing_decorator
def hybrid_search(query, top_k=5, collection_name="knowledge_base", search_type="hybrid", alpha=0.7,
                  score_threshold=-100.0, rerank_method="default", fallback_strategy=True) -> List[Tuple[str, float]]:
    assert search_type in ["dense", "sparse", "hybrid"], "search_type 不合法"
    assert rerank_method in ["default", "bge", "rrf"], "rerank_method 不合法"
    result = model.encode([query], return_dense=True, return_sparse=True, return_colbert_vecs=True)

    query_dense = result["dense_vecs"][0]
    colbert_vecs = result["colbert_vecs"][0]
    lexical_weights = result["lexical_weights"][0]
    query_sparse = {token.tobytes(): weight for token, weight in zip(colbert_vecs, lexical_weights)}
    collection = chromadb_client.get_collection(name=collection_name)

    # 计算候选集大小：用户指定数据量的10倍，但不超过总文档数
    # 先获取总文档数
    total_count = collection.count()
    # 候选集大小为 top_k 的10倍，但不超过总文档数
    candidate_size = min(top_k * 10, total_count)

    # 根据搜索类型执行不同的检索策略
    if search_type == "sparse":
        # 稀疏检索：全量扫描
        doc_ids = get_all_doc_ids_optimized(collection)
        top_results = compute_similarity_for_docs(
            collection, doc_ids, query_dense, query_sparse, collection_name, search_type, alpha
        )
        # 直接返回稀疏相似度排序的结果
        top_results.sort(key=lambda x: x[0], reverse=True)  # 按稀疏分数排序
        return [(doc, score) for score, _, doc in top_results[:top_k]]

    elif search_type == "dense":
        # 稠密检索：使用 ChromaDB 自身的稠密向量检索
        dense_results = collection.query(
            query_embeddings=[query_dense.tolist()],
            n_results=top_k,  # 直接返回top_k个结果
            include=["embeddings", "documents", "metadatas", "distances"]
        )

        # 提取检索结果
        doc_ids = dense_results["ids"][0]
        docs_text = dense_results["documents"][0]
        distances = dense_results["distances"][0] if "distances" in dense_results else [0] * len(doc_ids)

        # 将距离转换为相似度分数（距离越小相似度越高）
        # 对于余弦距离，相似度 = 1 - distance
        scores = [1 - dist for dist in distances]

        # 直接返回稠密检索结果
        return [(docs_text[i], scores[i]) for i in range(len(doc_ids))]

    else:  # hybrid
        # 混合检索：使用稠密向量召回候选集
        dense_results = collection.query(
            query_embeddings=[query_dense.tolist()],
            n_results=candidate_size,
            include=["embeddings", "documents", "metadatas", "distances"]
        )

        # 提取检索结果
        doc_ids = dense_results["ids"][0]
        dense_vecs = dense_results["embeddings"][0]
        docs_text = dense_results["documents"][0]

        # 计算稀疏相似度并进行混合重排序
        top_results, top_dense, top_sparse = compute_similarity_for_candidates(
            collection, doc_ids, dense_vecs, docs_text, query_dense, query_sparse, collection_name, alpha
        )

        # === Rerank ===
        reranked_results = []

        if rerank_method == "default":
            top_results.sort(reverse=True)
            reranked_results = [(doc, score) for score, _, doc in top_results]

        elif rerank_method == "bge":
            docs = [doc for _, _, doc in top_results]
            scores = bge_rerank_batch(query, docs)
            reranked_results = [(doc, score) for score, doc in sorted(zip(scores, docs), reverse=True)]

        elif rerank_method == "rrf":
            # reciprocal rank fusion 重排序
            def to_rank_dict(pairs):  # score, id
                sorted_ids = [doc_id for _, doc_id in sorted(pairs, reverse=True)]
                return {doc_id: rank for rank, doc_id in enumerate(sorted_ids)}

            rank_dense = to_rank_dict(top_dense)
            rank_sparse = to_rank_dict(top_sparse)

            # 计算 RRF 分数
            rrf_scores = {}
            for doc_id in set(rank_dense) | set(rank_sparse):
                r1 = rank_dense.get(doc_id, 10000)
                r2 = rank_sparse.get(doc_id, 10000)
                rrf_scores[doc_id] = 1 / (60 + r1) + 1 / (60 + r2)

            id_to_text = {doc_id: text for _, doc_id, text in top_results}
            reranked_results = [(id_to_text[doc_id], score) for doc_id, score in
                                sorted(rrf_scores.items(), key=lambda x: x[1], reverse=True)]

        # 应用阈值过滤
        if score_threshold is not None:
            filtered_results = [(doc, score) for doc, score in reranked_results if score >= score_threshold]

            # 如果启用了回退策略且过滤后结果太少
            if fallback_strategy and len(filtered_results) < min(2, top_k) and reranked_results:
                # 检查最高分是否足够好
                best_score = reranked_results[0][1]

                # 根据方法设定最低可接受分数
                min_acceptable = 0.01 if rerank_method == "bge" else 0.003 if rerank_method == "rrf" else 0.3
                if best_score >= min_acceptable:
                    # 返回最高分的几个结果而不是空结果
                    filtered_results = reranked_results[:min(top_k, 3)]
                else:
                    # 分数太低，返回空结果
                    filtered_results = []
        else:
            # 没有阈值限制，直接返回top_k
            filtered_results = reranked_results[:top_k]

        print("最终结果：", filtered_results[:top_k])
        return filtered_results[:top_k]


# 为候选文档计算相似度 (用于混合检索)
def compute_similarity_for_candidates(collection, doc_ids, dense_vecs, docs_text,
                                      query_dense, query_sparse, collection_name, alpha):
    if not doc_ids:
        return [], [], []

    top_results = []
    top_dense = []
    top_sparse = []

    for idx, doc_id in enumerate(doc_ids):
        # 从 Redis 获取稀疏向量
        try:
            redis_key = f"sparse_vector:{collection_name}:{doc_id}"
            sparse_vec_data = redis_client.get(redis_key)
            if not sparse_vec_data:
                continue
            sparse_vec = pickle.loads(sparse_vec_data)
        except Exception as e:
            print(f"获取文档 {doc_id} 的稀疏向量时出错: {str(e)}")
            continue

        # 计算相似度
        score_dense = cosine_similarity([query_dense], [dense_vecs[idx]])[0][0]
        score_sparse = sum(query_sparse.get(k, 0.0) * v for k, v in sparse_vec.items())

        # 只有在混合检索时才需要归一化和加权
        score_dense_n = normalize(score_dense, -1, 1)
        score_sparse_n = normalize(score_sparse, 0, 1)
        # 使用默认的混合权重 0.7
        final_score = alpha * score_dense_n + (1 - alpha) * score_sparse_n

        top_results.append((final_score, doc_id, docs_text[idx]))
        top_dense.append((score_dense, doc_id))
        top_sparse.append((score_sparse, doc_id))

    return top_results, top_dense, top_sparse


# 为全量文档计算相似度（用于稀疏检索）
def compute_similarity_for_docs(collection, doc_ids, query_dense, query_sparse, collection_name, search_type, alpha):
    if not doc_ids:
        return []

    batch_size = 100  # 根据内存容量调整
    top_results = []

    for i in range(0, len(doc_ids), batch_size):
        batch_ids = doc_ids[i:i + batch_size]
        # 获取稠密向量
        dense_results = collection.get(ids=batch_ids, include=["embeddings", "documents", "metadatas"])
        dense_vecs = dense_results["embeddings"]
        docs_text = dense_results["documents"]

        for idx, doc_id in enumerate(batch_ids):
            # 从 Redis 获取稀疏向量
            try:
                redis_key = f"sparse_vector:{collection_name}:{doc_id}"
                sparse_vec_data = redis_client.get(redis_key)
                if not sparse_vec_data:
                    continue
                sparse_vec = pickle.loads(sparse_vec_data)
            except Exception as e:
                print(f"获取文档 {doc_id} 的稀疏向量时出错: {str(e)}")
                continue

            # 计算相似度
            score_sparse = sum(query_sparse.get(k, 0.0) * v for k, v in sparse_vec.items())

            top_results.append((score_sparse, doc_id))

    return top_results


# 优化版本的获取所有文档ID方法
def get_all_doc_ids_optimized(collection, batch_size=1000):
    doc_ids = []
    offset = 0
    while True:
        batch = collection.get(limit=batch_size, offset=offset, include=[])
        if not batch['ids']:
            break
        doc_ids.extend(batch['ids'])
        offset += batch_size
        # 如果文档数量很大，可以添加一个上限保护
        if offset > 100000:  # 限制最多处理10万条记录
            break
    return doc_ids


# BGE rerank 批处理
@timing_decorator
def bge_rerank_batch(query: str, documents: List[str]) -> List[float]:
    global bge_rerank_tokenizer, bge_rerank_model
    inputs = bge_rerank_tokenizer(
        [query] * len(documents),
        documents,
        return_tensors="pt",
        padding=True,
        truncation=True,
        max_length=512
    ).to(device)
    # 构造一对一的 query + doc 输入对。
    # [query] * len(documents) 生成一个与文档列表等长的查询列表。
    # 生成 tokenized 的输入，并转为 PyTorch 格式（return_tensors="pt"）传入模型。

    # 进入评估模式（torch.no_grad()）。
    # 模型输出的 logits 是 [batch_size] 的分数，表示每个文档与 query 的相关性。
    # squeeze(-1) 让维度变成 1D，然后转到 CPU 后转为列表返回。
    with torch.no_grad():
        scores = bge_rerank_model(**inputs).logits.squeeze(-1)
    scores = scores.cpu().tolist()

    # 使用sigmoid函数归一化到0-1范围
    # sigmoid(x) = 1 / (1 + e^(-x))
    normalized_scores = [1 / (1 + math.exp(-score)) for score in scores]

    return normalized_scores


@app.post("/model")
@async_timing_decorator
async def qwen3_32b(prompt: str, thinking: bool = False, api_key: str = Depends(verify_api_key)):
    """
    调用Qwen大模型进行对话，支持流式输出
    """
    api_key = os.getenv("QWEN_API_KEY")
    client = OpenAI(
        api_key=api_key,
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )

    completion = client.chat.completions.create(
        model="qwen-plus",
        messages=[
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ],
        stream=True,
        extra_body={"enable_thinking": thinking}
    )

    return create_streaming_response(completion)


@app.post("/generate")
async def generate_text(query: Query, api_key: str = Depends(verify_api_key)):
    """
    基于检索结果生成回答，支持流式输出
    """
    query_start_time = time.perf_counter()
    # 执行检索
    query_result_rag = hybrid_search(query.keyword, query.topK, query.collectionName, query.searchMethod,
                                     query.rerankWeight, query.scoreThreshold, query.rerankMethod)
    query_end_time = time.perf_counter()
    print(f"hybrid_search 函数运行时间: {query_end_time - query_start_time:.4f} 秒")

    # 获取上下文文档和分数
    context_docs = [doc for doc, score in query_result_rag]
    context_scores = [score for doc, score in query_result_rag]
    context_str = "".join(context_docs) if context_docs else "无相关参考资料"

    # 构建 prompt
    prompt = f"""
        ###参考资料###
        {context_str}
        ###用户问题###
        {query.keyword}
        """
    api_key = os.getenv("QWEN_API_KEY")
    if not api_key:
        raise HTTPException(status_code=500, detail="QWEN_API_KEY 环境变量未设置")
    # 调用大模型进行响应
    client = OpenAI(
        api_key=api_key,
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )

    completion = client.chat.completions.create(
        model="qwen-max-latest",
        messages=[
            {"role": "system", "content": """
                【角色定义】
                你是一位基于给定参考资料的精准回答助手，必须严格遵循以下规则：
                【回答规则】
                **唯一依据**：仅使用<参考资料>中明确提及的信息
                **边界处理**：若参考资料未提及相关内容，必须回复："根据现有资料无法回答该问题"
                **格式要求**：段落间用空行分隔"""},
            {"role": "user", "content": prompt},
        ],
        stream=False,
    )
    print(completion.model_dump_json())
    return {
        "answer": completion.model_dump_json(),
        "contexts": context_docs,
        "context_scores": context_scores,
        "context_count": len(context_docs)
    }


@app.post("/stream_generate")
@async_timing_decorator
async def generate_text(query: Query, api_key: str = Depends(verify_api_key)):
    """
    基于检索结果生成回答，支持流式输出
    """
    # 记录检索时间
    query_start_time = time.perf_counter()
    # 执行检索
    query_result_rag = hybrid_search(query.keyword, query.topK, query.collectionName, query.searchMethod,
                                     query.rerankWeight, query.scoreThreshold, query.rerankMethod)
    query_end_time = time.perf_counter()
    print(f"hybrid_search 函数运行时间: {query_end_time - query_start_time:.4f} 秒")
    context_docs = [doc for doc, score in query_result_rag]
    context_str = "".join(context_docs) if context_docs else "无相关参考资料"
    print(query)
    print(context_str)
    # 构建 prompt
    prompt = f"""
    ###参考资料###
    {context_str}
    ###用户问题###
    {query.keyword}
    """

    api_key = os.getenv("QWEN_API_KEY")
    if not api_key:
        raise HTTPException(status_code=500, detail="QWEN_API_KEY 环境变量未设置")
    # 调用大模型进行响应
    client = OpenAI(
        api_key=api_key,
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )

    completion = client.chat.completions.create(
        model="qwen-max-latest",
        messages=[
            {"role": "system", "content": """
            【角色定义】
            你是一位基于给定参考资料的精准回答助手，必须严格遵循以下规则：
            【回答规则】
            **唯一依据**：仅使用<参考资料>中明确提及的信息
            **边界处理**：若参考资料未提及相关内容，必须回复："根据现有资料无法回答该问题"
            **格式要求**：段落间用空行分隔"""},
            {"role": "user", "content": prompt},
        ],
        stream=True,
        extra_body={"enable_thinking": query.thinking}
    )
    return create_streaming_response(completion)


def create_streaming_response(completion):
    """
    创建流式响应的通用函数
    """

    async def generate_stream():
        try:
            for chunk in completion:
                if chunk.choices and len(chunk.choices) > 0:
                    delta = chunk.choices[0].delta
                    # 动态构建 delta 数据
                    delta_data = {}
                    if hasattr(delta, 'content') and delta.content:
                        delta_data["content"] = delta.content
                    if hasattr(delta, 'reasoning_content') and delta.reasoning_content:
                        delta_data["reasoning_content"] = delta.reasoning_content

                    # 只有当 delta_data 不为空时才发送数据
                    if delta_data:
                        data = {
                            "id": chunk.id,
                            "object": "chat.completion.chunk",
                            "created": chunk.created,
                            "model": chunk.model,
                            "choices": [{
                                "index": 0,
                                "delta": delta_data,
                                "finish_reason": None
                            }]
                        }
                        yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
            # 发送结束标记
            yield "data: [DONE]\n\n"
        except BrokenPipeError:
            # 客户端断开连接，这是正常情况，不需要特殊处理
            print("客户端断开连接 (BrokenPipeError)")
            return
        except ConnectionResetError:
            # 客户端重置连接
            print("客户端重置连接 (ConnectionResetError)")
            return
        except Exception as e:
            # 其他错误处理
            error_data = {
                "error": {
                    "message": str(e),
                    "type": "stream_error"
                }
            }
            try:
                yield f"data: {json.dumps(error_data, ensure_ascii=False)}\n\n"
            except:
                # 如果连错误信息都无法发送，就直接返回
                pass

    # 返回流式响应
    return StreamingResponse(
        generate_stream(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Access-Control-Allow-Origin": "*",
        }
    )

# docker打包了模型 环境
# 如果需要修改代码 外挂代码目录进行修改
# 如果需要运行评测代码，则在外部下载必要环境后调用docker的接口运行评测代码
# 前端分别分为插件和网页代码，插件直接文件见传输。
# 网页代码仍然打包为docker image运行
