import os
import shutil
import tempfile
import json
import logging
from typing import Dict, Any, List
from fastapi import UploadFile

from openai import AsyncOpenAI
from app.config import get_settings
from app.database import init_collection
from utils.rag_document_handler import RAGDocumentHandler

# 配置日志
logger = logging.getLogger(__name__)

# 初始化配置和客户端
settings = get_settings()
aclient = AsyncOpenAI(
    api_key=settings.OPENAI_API_KEY, 
    base_url=settings.OPENAI_API_BASE if settings.OPENAI_API_BASE else None
)

# 获取 Milvus 集合实例
# 注意：在实际应用中，可能需要更好地管理连接生命周期
try:
    collection = init_collection()
except Exception as e:
    logger.error(f"初始化 Milvus 集合失败: {e}")
    collection = None

def load_and_split_file(file_path: str):
    """
    调用现有的文档处理逻辑
    """
    handler = RAGDocumentHandler()
    # 返回 List[Document]
    return handler.load_and_split(file_path)

async def get_embeddings(texts: List[str]) -> List[List[float]]:
    """
    使用 OpenAI 生成 Embeddings
    """
    if not texts:
        return []
        
    try:
        # 简单的批量处理，生产环境可能需要分批次以避免 token 限制
        response = await aclient.embeddings.create(
            input=texts,
            model=settings.EMBEDDING_MODEL
        )
        return [data.embedding for data in response.data]
    except Exception as e:
        logger.error(f"生成 Embeddings 失败: {e}")
        raise RuntimeError(f"Embedding 生成失败: {e}")

async def ingest_file(file: UploadFile) -> Dict[str, Any]:
    """
    处理文件上传、切分、Embedding 并存入 Milvus
    """
    if collection is None:
        raise RuntimeError("Milvus 集合未初始化")

    # 1. 保存文件到临时路径
    # 使用 mkstemp 避免 Windows 下 NamedTemporaryFile 的权限问题
    suffix = os.path.splitext(file.filename)[1]
    fd, tmp_path = tempfile.mkstemp(suffix=suffix)
    
    try:
        # 关闭文件描述符，使用文件路径操作
        os.close(fd)
        
        # 写入文件内容
        with open(tmp_path, 'wb') as f:
            shutil.copyfileobj(file.file, f)

        # 2. 加载并切分文件
        # 文档块列表 (List[Document])
        chunks = load_and_split_file(tmp_path)
        
        if not chunks:
            return {"status": "warning", "message": "文件中未提取到文本"}

        # 准备数据
        texts = [doc.page_content for doc in chunks]
        # 提取元数据，添加 source 字段
        metadatas = []
        for doc in chunks:
            meta = doc.metadata.copy() if doc.metadata else {}
            meta["source"] = file.filename
            metadatas.append(meta)
        
        # 3. 生成 Embeddings
        vectors = await get_embeddings(texts)
        
        # 4. 插入 Milvus
        # schema: [id(auto), vector, text, metadata]
        # 字段顺序: vector, text, metadata (因为 id 是 auto_id)
        data = [
            vectors,
            texts,
            metadatas # JSON 字段直接传 dict 列表
        ]
        
        res = collection.insert(data)
        
        # 确保数据可见
        collection.flush()
        
        logger.info(f"文件 {file.filename} 处理完成，插入 {len(chunks)} 个文档块")
        
        return {
            "status": "success", 
            "filename": file.filename, 
            "chunks_count": len(chunks),
            "milvus_ids": res.primary_keys
        }
        
    except Exception as e:
        logger.error(f"文件处理失败: {e}")
        raise
        
    finally:
        # 清理临时文件
        if os.path.exists(tmp_path):
            try:
                os.remove(tmp_path)
            except Exception as e:
                logger.warning(f"清理临时文件失败: {e}")

async def chat_with_data(query: str) -> Dict[str, Any]:
    """
    RAG 核心逻辑：搜索 + 回答
    """
    if collection is None:
        raise RuntimeError("Milvus 集合未初始化")

    # 1. 对查询进行 Embedding
    query_vectors = await get_embeddings([query])
    if not query_vectors:
        raise RuntimeError("无法生成查询向量")
    
    query_vector = query_vectors[0]
    
    # 2. 在 Milvus 中搜索 Top 3
    try:
        collection.load() # 确保集合已加载到内存
        
        search_params = {
            "metric_type": "L2", 
            "params": {"nprobe": 10},
        }
        
        results = collection.search(
            data=[query_vector], 
            anns_field="vector", 
            param=search_params, 
            limit=3,
            output_fields=["text", "metadata"]
        )
    except Exception as e:
        logger.error(f"Milvus 搜索失败: {e}")
        raise RuntimeError(f"搜索失败: {e}")
    
    # 解析搜索结果
    source_documents = []
    context_texts = []
    
    if results and len(results) > 0:
        for hit in results[0]:
            text = hit.entity.get("text")
            metadata = hit.entity.get("metadata")
            score = hit.distance
            
            context_texts.append(text)
            source_documents.append({
                "text": text,
                "metadata": metadata,
                "score": score
            })
    
    # 3. 构建 Prompt
    if not context_texts:
        context_str = "暂无相关上下文信息。"
    else:
        context_str = "\n\n".join(context_texts)
        
    prompt = f"""
    基于以下上下文信息回答用户的问题。如果上下文中没有答案，请说明不知道。
    
    Context:
    {context_str}
    
    Question: 
    {query}
    """
    
    # 4. 调用 OpenAI ChatCompletion
    try:
        response = await aclient.chat.completions.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": "你是一个有用的AI助手。"},
                {"role": "user", "content": prompt}
            ],
            temperature=0.7
        )
        
        answer = response.choices[0].message.content
        
        return {
            "answer": answer,
            "source_documents": source_documents
        }
    except Exception as e:
        logger.error(f"生成回答失败: {e}")
        raise RuntimeError(f"生成回答失败: {e}")
