# app/services/document_service.py
import datetime
import os
import pickle
import uuid
from typing import List, Optional

from chromadb import Collection
from fastapi import HTTPException, UploadFile
from fastapi.responses import JSONResponse

from app.core.database import redis_client, chromadb_client
from app.model.schemas import Document, DocumentBatch
from app.utils.file_utils import split_text, read_file_content


class DocumentService:
    @staticmethod
    def get_or_create_collection_with_metadata(name: str) -> Collection:
        try:
            collection = chromadb_client.get_collection(name=name, embedding_function=None)
            return collection
        except Exception:
            created_at = datetime.datetime.now().isoformat()
            collection = chromadb_client.create_collection(
                name=name,
                embedding_function=None,
                metadata={
                    "created_at": created_at
                }
            )
            return collection

    @staticmethod
    async def add_document(doc: Document, model):
        result = model.encode([doc.text], return_dense=True, return_sparse=True, return_colbert_vecs=True)
        dense_vec = result["dense_vecs"][0].tolist()
        colbert_vecs_batch = result["colbert_vecs"]
        lexical_weights_batch = result["lexical_weights"]

        sparse_vecs = [
            {token.tobytes(): float(weight) for token, weight in zip(colbert_vecs, lexical_weights)}
            for colbert_vecs, lexical_weights in zip(colbert_vecs_batch, lexical_weights_batch)
        ]

        sparse_vec = sparse_vecs[0]

        collection = DocumentService.get_or_create_collection_with_metadata(name=doc.collection_name)
        doc_id = str(uuid.uuid4())

        metadata = doc.metadata or {}
        metadata["source_type"] = metadata.get("source_type", "manual_input")
        metadata["uploaded_at"] = datetime.datetime.now().isoformat()

        if "filename" not in metadata and metadata["source_type"] == "manual_input":
            metadata["filename"] = "manual_input"
            metadata["doc_start_index"] = "0"
            metadata["doc_end_index"] = "0"

        collection.add(
            ids=[doc_id],
            embeddings=[dense_vec],
            documents=[doc.text],
            metadatas=[metadata]
        )

        try:
            redis_key = f"sparse_vector:{doc.collection_name}:{doc_id}"
            redis_client.set(redis_key, pickle.dumps(sparse_vec))
        except Exception as e:
            collection.delete(ids=[doc_id])
            raise HTTPException(status_code=500, detail=f"稀疏向量存储失败: {str(e)}")

        return {
            "status": "success",
            "id": doc_id,
            "sparse_vector_stored": True
        }

    @staticmethod
    async def add_documents(batch: DocumentBatch, model):
        """
        批量添加文档
        """
        results = []
        for doc in batch.documents:
            result = await DocumentService.add_document(doc, model)
            results.append(result)
        return {
            "status": "success",
            "results": results
        }

    @staticmethod
    async def delete_document(doc_id: str, collection_name: str = "my_knowledge"):
        """
        删除一个文档
        """
        collection = chromadb_client.get_collection(name=collection_name)
        collection.delete(ids=[doc_id])
        # 删除 Redis 中的稀疏向量
        try:
            redis_key = f"sparse_vector:{collection_name}:{doc_id}"
            redis_client.delete(redis_key)
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"删除稀疏向量失败: {str(e)}")

        return {"status": "success"}

    @staticmethod
    async def get_document(doc_id: str, collection_name: str = "my_knowledge"):
        """
        获取指定 ID 的文档内容
        """
        ids = doc_id.split(",")
        collection = chromadb_client.get_collection(name=collection_name)

        # 从 ChromaDB 获取文档
        result = collection.get(ids=ids, include=["documents", "metadatas"])

        if not result['ids']:
            raise HTTPException(status_code=404, detail="未找到指定文档")

        results = []
        for i in range(len(result['ids'])):
            results.append({
                "id": result['ids'][i],
                "text": result['documents'][i],
                "collection_name": collection_name,
                "metadata": result['metadatas'][i]
            })
        return results

    @staticmethod
    async def list_documents(
            collection_name: str = "my_knowledge",
            start: Optional[str] = None,
            limit: int = 5
    ):
        """
        顺序获取数据库中的文档块
        排序规则：
        1. 按上传时间 (uploaded_at)
        2. 按文件名 (filename)
        3. 按索引 (global_index)
        """
        try:
            collection = chromadb_client.get_collection(name=collection_name)
        except Exception as e:
            raise HTTPException(status_code=404, detail=f"Collection {collection_name} not found")

        # 获取所有文档ID和元数据
        all_docs = collection.get(include=["metadatas"], limit=None)
        doc_infos = []

        for i, doc_id in enumerate(all_docs['ids']):
            metadata = all_docs['metadatas'][i] if all_docs['metadatas'] else {}
            doc_infos.append({
                'id': doc_id,
                'metadata': metadata
            })

        # 排序逻辑：首先按上传时间，其次按文件名，最后按全局索引
        def sort_key(doc_info):
            metadata = doc_info['metadata']
            # 获取上传时间，如果没有则使用默认值
            uploaded_at = metadata.get('uploaded_at') or ''
            # 获取文件名，如果没有则使用默认值
            filename = metadata.get('filename', '')
            # 获取全局索引，如果没有则使用默认值
            global_index_str = metadata.get('global_index', '0')
            # 将global_index转换为整数进行排序
            try:
                global_index = int(global_index_str)
            except (ValueError, TypeError):
                global_index = 0
            return uploaded_at, filename, global_index

        # 对文档进行排序
        sorted_doc_infos = sorted(doc_infos, key=sort_key)
        sorted_ids = [doc_info['id'] for doc_info in sorted_doc_infos]

        # 确定起始位置
        start_index = 0
        if start:
            try:
                start_index = sorted_ids.index(start) + 1
            except ValueError:
                # 如果指定的start ID不存在，则从头开始
                pass

        # 获取指定范围的文档
        end_index = min(start_index + limit, len(sorted_ids))
        selected_ids = sorted_ids[start_index:end_index]

        # 获取文档内容
        if selected_ids:
            result = collection.get(
                ids=selected_ids,
                include=["documents", "metadatas"]
            )

            # 重新排序结果以匹配请求的顺序
            id_to_result = {}
            for i in range(len(result['ids'])):
                id_to_result[result['ids'][i]] = {
                    "id": result['ids'][i],
                    "text": result['documents'][i],
                    "metadata": result['metadatas'][i]
                }

            # 按照selected_ids的顺序组织文档
            documents = [id_to_result[doc_id] for doc_id in selected_ids]
        else:
            documents = []

        return {
            "documents": documents,
            "has_more": end_index < len(sorted_ids),
            "next_start": selected_ids[-1] if selected_ids else None,
            "total_count": len(sorted_ids)
        }

    @staticmethod
    async def validate_files(files: List[UploadFile]) -> JSONResponse | None:
        if len(files) > 50:
            return JSONResponse(content={"status": "error", "message": "最多支持上传50个文件"}, status_code=400)

        total_size = 0
        MAX_FILE_SIZE = 10 * 1024 * 1024
        MAX_TOTAL_SIZE = 500 * 1024 * 1024
        ALLOWED_EXTENSIONS = {".txt", ".doc", ".docx", ".pdf"}  # 允许的文件扩展名

        for f in files:
            # 获取文件扩展名
            filename = f.filename.lower() if f.filename else ""
            if not filename:
                return JSONResponse(
                    content={"status": "error", "message": "文件名不能为空"},
                    status_code=400
                )

            # 检查文件扩展名
            ext = os.path.splitext(filename)[1]
            if ext not in ALLOWED_EXTENSIONS:
                return JSONResponse(
                    content={
                        "status": "error",
                        "message": f"不支持的文件类型: {filename}。仅支持: txt, doc, docx, pdf"
                    },
                    status_code=400
                )

            contents = await f.read()
            file_size = len(contents)
            f.file.seek(0)  # 重置文件指针，以便后续读取

            if file_size > MAX_FILE_SIZE:
                return JSONResponse(
                    content={"status": "error", "message": f"单个文件大小不能超过10MB: {f.filename}"},
                    status_code=400
                )
            total_size += file_size

        if total_size > MAX_TOTAL_SIZE:
            return JSONResponse(
                content={"status": "error", "message": f"总文件大小不能超过500MB，当前为{total_size / (1024 * 1024):.2f}MB"},
                status_code=400
            )

        return None  # 校验通过

    @staticmethod
    async def embed_and_store_files(files: List[UploadFile], chunk_size, chunk_overlap, model,
                                    collection_name="my_knowledge"):
        collection = DocumentService.get_or_create_collection_with_metadata(name=collection_name)

        # 批量处理，避免长时间持有数据库连接
        batch_size = 10  # 每10个文档块一批处理
        total_processed = 0

        for file_index, upload_file in enumerate(files):
            text = read_file_content(upload_file)
            chunks = split_text(text, chunk_size, chunk_overlap)

            # 记录整个文档的起始和结束编号
            doc_start_index = total_processed
            doc_end_index = total_processed + len(chunks) - 1

            # 创建文件相关的元数据
            file_metadata = {
                "source_type": "file_upload",
                "filename": upload_file.filename,
                "uploaded_at": datetime.datetime.now().isoformat(),
                "doc_start_index": str(doc_start_index),
                "doc_end_index": str(doc_end_index)
            }

            # 分批处理chunks
            for i in range(0, len(chunks), batch_size):
                batch_chunks = chunks[i:i + batch_size]

                # 对每批进行编码和存储
                result = model.encode(batch_chunks, return_dense=True, return_sparse=True, return_colbert_vecs=True)
                dense_vecs = result["dense_vecs"].tolist()

                colbert_vecs_batch = result["colbert_vecs"]
                lexical_weights_batch = result["lexical_weights"]

                sparse_vecs = [
                    {token.tobytes(): float(weight) for token, weight in zip(colbert_vecs, lexical_weights)}
                    for colbert_vecs, lexical_weights in zip(colbert_vecs_batch, lexical_weights_batch)
                ]

                if not dense_vecs or not sparse_vecs:
                    raise ValueError("模型输出为空，请检查输入内容或模型状态")

                # 批量存储
                try:
                    # ChromaDB 批量添加
                    ids_batch = [str(uuid.uuid4()) for _ in range(len(batch_chunks))]
                    # 为每个chunk创建元数据
                    metadata_batch = []
                    for j in range(len(batch_chunks)):
                        chunk_metadata = file_metadata.copy()
                        # 添加chunk在当前文件中的索引
                        chunk_metadata["chunk_index"] = str(i + j)
                        # 添加在整个文档集合中的全局索引
                        chunk_metadata["global_index"] = str(total_processed + i + j)
                        metadata_batch.append(chunk_metadata)

                    collection.add(
                        ids=ids_batch,
                        documents=batch_chunks,
                        embeddings=dense_vecs,
                        metadatas=metadata_batch
                    )

                    # Redis 批量存储稀疏向量
                    pipe = redis_client.pipeline()
                    for j, sparse_vec in enumerate(sparse_vecs):
                        redis_key = f"sparse_vector:{collection_name}:{ids_batch[j]}"
                        pipe.set(redis_key, pickle.dumps(sparse_vec))
                    pipe.execute()

                    total_processed += len(batch_chunks)
                    print(f"已处理 {total_processed} 个文档块")

                except Exception as e:
                    raise HTTPException(status_code=500, detail=f"处理文档块时出错: {str(e)}")

        print(f"✅ 文档入库成功：共处理 {total_processed} 个文档块")
        return {"status": "success", "processed_count": total_processed}

    @staticmethod
    async def get_collection_info(collection_name: Optional[str] = None):
        """
        获取知识库详细信息，支持单个/全部集合查询
        """
        if collection_name:
            try:
                collection = chromadb_client.get_collection(name=collection_name)
            except ValueError:
                raise HTTPException(status_code=404, detail="Collection not found")

            # 获取稀疏向量数据库信息 (Redis)
            sparse_info = {
                "type": "Redis",
                "host": "localhost",
                "port": 6379,
                "db": 0
            }

            # 尝试获取向量数量
            try:
                pattern = f"sparse_vector:{collection_name}:*"
                sparse_info["vector_count"] = len(list(redis_client.scan_iter(match=pattern)))
            except Exception as e:
                sparse_info["vector_count"] = -1  # 表示查询失败

            source_files = []
            if collection.metadata and "source_files" in collection.metadata:
                source_files = collection.metadata["source_files"].split(",")

            return {
                "name": collection.name,
                "count": collection.count(),
                "dimension": 1024,  # BGE-M3模型输出维度
                "distance_function": collection.metadata.get("distance_function_name", "cosine"),
                "storage_path": "./chroma_db",
                "sparse_db": sparse_info,
                "created_at": collection.metadata.get("created_at", "unknown"),
                "update_at": collection.metadata.get("update_at", "unknown"),
                "source_files": source_files
            }
        else:
            # 返回所有集合信息列表
            collections = chromadb_client.list_collections()
            return [{"name": col.name, "count": col.count()} for col in collections]

    @staticmethod
    async def delete_collection(collection_name: str):
        """
        清空删除知识库
        """
        try:
            chromadb_client.delete_collection(name=collection_name)

            # 删除 Redis 中该集合的所有稀疏向量
            try:
                pattern = f"sparse_vector:{collection_name}:*"
                keys = list(redis_client.scan_iter(match=pattern))
                if keys:
                    redis_client.delete(*keys)
            except Exception as e:
                print(f"警告：删除Redis中的稀疏向量时出错: {str(e)}")

            return {"status": "success", "message": f"集合 {collection_name} 已删除"}
        except Exception as e:
            raise HTTPException(status_code=500, detail=str(e))


# 初始化文档服务实例
document_service = DocumentService()
