import logging
import uuid
from typing import List, Optional

from fastapi import APIRouter, UploadFile, File, Depends, HTTPException, BackgroundTasks, Query
from sqlalchemy import select, func
from sqlalchemy.ext.asyncio import AsyncSession
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

from app.core.database import get_db
from app.core.config import settings
from app.core.document import Document, DocumentStatus
from app.core.vector import MilvusHandler
from app.services.ingestion import IngestionService
from app.schemas.rag import (
    DocumentResponse, 
    DocumentListResponse, 
    ChatRequest, 
    ChatResponse, 
    SourceDocument
)

router = APIRouter(prefix="/api/v1", tags=["RAG"])
logger = logging.getLogger(__name__)

# --- Document Endpoints ---

@router.post("/documents", response_model=DocumentResponse, status_code=201)
async def upload_document(
    background_tasks: BackgroundTasks,
    file: UploadFile = File(...),
):
    """
    Upload a document for ingestion.
    """
    try:
        doc = await IngestionService.upload_and_create_document(file, background_tasks)
        return doc
    except Exception as e:
        logger.error(f"Upload failed: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}")

@router.get("/documents", response_model=DocumentListResponse)
async def list_documents(
    skip: int = Query(0, ge=0),
    limit: int = Query(20, ge=1, le=100),
    status: Optional[DocumentStatus] = None,
    db: AsyncSession = Depends(get_db),
):
    """
    List uploaded documents with pagination.
    """
    query = select(Document)
    if status:
        query = query.where(Document.status == status)
    
    # Calculate total count
    count_query = select(func.count()).select_from(query.subquery())
    total = await db.scalar(count_query)
    
    # Get items
    query = query.order_by(Document.created_at.desc()).offset(skip).limit(limit)
    result = await db.execute(query)
    items = result.scalars().all()
    
    return DocumentListResponse(total=total or 0, items=items)

# --- Chat Endpoints ---

@router.post("/chat", response_model=ChatResponse)
async def chat_with_documents(
    request: ChatRequest,
    db: AsyncSession = Depends(get_db),
):
    """
    Chat with your documents.
    """
    # 1. Vectorize Query
    try:
        embeddings = OpenAIEmbeddings(
            model=settings.EMBEDDING_MODEL,
            openai_api_key=settings.OPENAI_API_KEY,
            base_url=settings.OPENAI_BASE_URL
        )
        query_vector = await embeddings.aembed_query(request.query)
    except Exception as e:
        logger.error(f"Embedding generation failed: {e}")
        raise HTTPException(status_code=500, detail="Failed to process query")

    # 2. Search in Milvus
    try:
        collection = MilvusHandler.get_collection()
        search_params = {
            "metric_type": "L2", 
            "params": {"nprobe": 10}
        }
        
        # search returns a 2D list (batch search), we only have 1 query
        results = collection.search(
            data=[query_vector],
            anns_field="vector",
            param=search_params,
            limit=request.top_k,
            output_fields=["text", "metadata"]
        )
        
        hits = results[0] # The first (and only) query result
    except Exception as e:
        logger.error(f"Milvus search failed: {e}")
        raise HTTPException(status_code=500, detail="Vector search failed")

    if not hits:
        return ChatResponse(answer="I couldn't find any relevant information in the documents.", sources=[])

    # 3. Enrich Results (Optional: Get filenames from DB if not in metadata)
    # In our ingestion service, we stored 'filename' in metadata, so we can skip DB query here for speed.
    # However, we will construct the context for LLM.
    
    context_parts = []
    sources = []
    
    for hit in hits:
        # hit.entity.get('text') might depend on how pymilvus returns fields
        # If output_fields is used, we access them via entity property or dictionary style
        text = hit.entity.get('text')
        meta = hit.entity.get('metadata') or {}
        score = hit.distance # L2 distance (smaller is better) or IP (larger is better)
        
        context_parts.append(text)
        
        sources.append(SourceDocument(
            filename=meta.get('filename', 'Unknown'),
            page_content=text,
            score=score,
            metadata=meta
        ))

    context_str = "\n\n---\n\n".join(context_parts)

    # 4. Call LLM
    try:
        llm = ChatOpenAI(
            model="gpt-3.5-turbo", # Or configurable via settings
            temperature=0,
            openai_api_key=settings.OPENAI_API_KEY,
            base_url=settings.OPENAI_BASE_URL
        )
        
        prompt_template = ChatPromptTemplate.from_template("""
        You are a helpful assistant. Answer the question based ONLY on the following context:
        
        <context>
        {context}
        </context>
        
        Question: {question}
        """)
        
        chain = prompt_template | llm | StrOutputParser()
        answer = await chain.ainvoke({"context": context_str, "question": request.query})
        
    except Exception as e:
        logger.error(f"LLM call failed: {e}")
        raise HTTPException(status_code=500, detail="Failed to generate answer")

    return ChatResponse(answer=answer, sources=sources)

