import os
import shutil
import tempfile
import uuid
import logging
import asyncio
from datetime import datetime
from pathlib import Path
from typing import List, Optional

from fastapi import UploadFile, BackgroundTasks
from sqlalchemy import select, update
from sqlalchemy.ext.asyncio import AsyncSession
from langchain_community.embeddings import OpenAIEmbeddings

from app.core.config import settings
from app.core.database import AsyncSessionLocal
from app.core.document import Document, DocumentStatus
from app.core.storage import get_minio_client
from app.core.vector import MilvusHandler

# 假设这是用户提供的 LangChain Helper
# 在实际场景中，可能是从 utils 导入，或者在这里直接实现
try:
    from utils.rag_document_handler import RAGDocumentHandler
except ImportError:
    # Fallback definition if utils is not available or compatible
    class RAGDocumentHandler:
        def __init__(self, chunk_size=1000, chunk_overlap=100):
            self.chunk_size = chunk_size
            self.chunk_overlap = chunk_overlap
            
        def load_and_split(self, file_path: str) -> List[Any]:
            # Mock or simple implementation
            from langchain_text_splitters import RecursiveCharacterTextSplitter
            from langchain_community.document_loaders import TextLoader, PyPDFLoader, Docx2txtLoader
            
            ext = os.path.splitext(file_path)[1].lower()
            if ext == ".pdf":
                loader = PyPDFLoader(file_path)
            elif ext == ".docx":
                loader = Docx2txtLoader(file_path)
            else:
                loader = TextLoader(file_path)
                
            documents = loader.load()
            text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=self.chunk_size, 
                chunk_overlap=self.chunk_overlap
            )
            return text_splitter.split_documents(documents)

logger = logging.getLogger(__name__)

class IngestionService:
    """
    文档入库服务
    负责处理文件上传、存储、切片、向量化和索引
    """

    @staticmethod
    async def upload_and_create_document(
        file: UploadFile, 
        background_tasks: BackgroundTasks
    ) -> Document:
        """
        接收上传文件，保存到 MinIO，创建 PG 记录，并触发后台处理任务
        """
        # 1. 生成唯一 Object Name
        file_ext = Path(file.filename).suffix
        object_name = f"{uuid.uuid4()}{file_ext}"
        
        # 2. 上传到 MinIO
        minio_client = get_minio_client()
        
        # 读取文件内容（注意：对于大文件，建议分块读取，这里为简化直接读取）
        # UploadFile.file 是一个 SpooledTemporaryFile
        file_content = await file.read()
        file_size = len(file_content)
        
        # 使用 BytesIO 包装以便 MinIO 客户端使用
        import io
        data_stream = io.BytesIO(file_content)
        
        # 确保 Bucket 存在
        if not minio_client.bucket_exists(settings.MINIO_BUCKET_NAME):
            minio_client.make_bucket(settings.MINIO_BUCKET_NAME)
            
        minio_client.put_object(
            bucket_name=settings.MINIO_BUCKET_NAME,
            object_name=object_name,
            data=data_stream,
            length=file_size,
            content_type=file.content_type
        )
        
        # 3. 创建 PostgreSQL 记录
        async with AsyncSessionLocal() as session:
            new_doc = Document(
                filename=file.filename,
                minio_object_name=object_name,
                status=DocumentStatus.PENDING,
                metadata_={
                    "file_size": file_size,
                    "content_type": file.content_type,
                    "original_filename": file.filename
                }
            )
            session.add(new_doc)
            await session.commit()
            await session.refresh(new_doc)
            
            # 获取 ID 用于后台任务
            doc_id = new_doc.id

        # 4. 触发后台处理任务
        background_tasks.add_task(IngestionService.process_document_task, doc_id)
        
        return new_doc

    @staticmethod
    async def process_document_task(doc_id: uuid.UUID):
        """
        后台任务：下载、处理、Embedding、存入 Milvus
        """
        logger.info(f"Starting background processing for document {doc_id}")
        
        temp_file_path = None
        
        async with AsyncSessionLocal() as session:
            try:
                # 1. 获取文档记录并更新状态为 Processing
                stmt = select(Document).where(Document.id == doc_id)
                result = await session.execute(stmt)
                doc = result.scalar_one_or_none()
                
                if not doc:
                    logger.error(f"Document {doc_id} not found")
                    return

                doc.status = DocumentStatus.PROCESSING
                await session.commit()

                # 2. 从 MinIO 下载文件到临时目录
                minio_client = get_minio_client()
                
                # 创建临时文件
                suffix = Path(doc.minio_object_name).suffix
                with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp_file:
                    temp_file_path = tmp_file.name
                
                minio_client.fget_object(
                    bucket_name=settings.MINIO_BUCKET_NAME,
                    object_name=doc.minio_object_name,
                    file_path=temp_file_path
                )
                
                # 3. 调用 Helper 切分文件
                # 这里假设我们使用一个通用的 DocumentHandler
                doc_handler = RAGDocumentHandler(chunk_size=1000, chunk_overlap=200)
                chunks = doc_handler.load_and_split(temp_file_path)
                
                if not chunks:
                    logger.warning(f"No chunks generated for document {doc_id}")
                    doc.status = DocumentStatus.FAILED
                    doc.metadata_["error"] = "No text content found"
                    await session.commit()
                    return

                # 4. Embedding & Milvus Insert
                # 准备数据
                texts = [chunk.page_content for chunk in chunks]
                
                # 构造 metadata, 确保包含 document_id 以便未来删除
                metadatas = []
                for chunk in chunks:
                    meta = chunk.metadata.copy()
                    meta["document_id"] = str(doc_id)
                    meta["filename"] = doc.filename
                    # 确保 metadata 是简单的 JSON 可序列化字典
                    metadatas.append(meta)

                # 初始化 Embedding 模型
                embeddings = OpenAIEmbeddings(
                    model=settings.EMBEDDING_MODEL,
                    openai_api_key=settings.OPENAI_API_KEY,
                    base_url=settings.OPENAI_BASE_URL
                )
                
                # 生成向量
                vectors = await embeddings.aembed_documents(texts)
                
                # 获取 Milvus Collection
                collection = MilvusHandler.get_collection()
                
                # 准备插入数据
                # Schema: [id(auto), vector, text, metadata]
                # 注意：Milvus 的 insert API 通常接受列式数据：[vectors, texts, metadatas] 或者行式字典列表
                # 假设我们在 core/vector.py 中定义了 Schema 顺序：id, vector, text, metadata
                # id 是 auto_id，所以不需要传入
                
                data = [
                    vectors,        # vector column
                    texts,          # text column
                    metadatas       # metadata column (JSON field)
                ]
                
                collection.insert(data)
                # 刷新以确保数据立即可见（生产环境可根据需求优化）
                collection.flush()
                
                # 5. 更新 PG 状态为 Indexed
                doc.status = DocumentStatus.INDEXED
                # 更新 metadata，记录处理信息
                current_meta = doc.metadata_ if doc.metadata_ else {}
                current_meta.update({
                    "chunks_count": len(chunks),
                    "indexed_at": datetime.now().isoformat()
                })
                doc.metadata_ = current_meta
                
                await session.commit()
                logger.info(f"Document {doc_id} processed and indexed successfully")

            except Exception as e:
                logger.error(f"Error processing document {doc_id}: {str(e)}", exc_info=True)
                # 重新获取 session 以防事务回滚失效（虽然 async session通常没问题，但为了保险）
                # 在这里我们直接使用当前的 session，但需要 rollback 之前的错误
                await session.rollback()
                
                # 更新错误状态
                # 注意：需要一个新的事务来更新状态
                doc.status = DocumentStatus.FAILED
                current_meta = doc.metadata_ if doc.metadata_ else {}
                current_meta["error"] = str(e)
                doc.metadata_ = current_meta
                
                session.add(doc)
                await session.commit()
                
            finally:
                # 清理临时文件
                if temp_file_path and os.path.exists(temp_file_path):
                    try:
                        os.remove(temp_file_path)
                    except OSError:
                        pass

