from fastapi import FastAPI, HTTPException, UploadFile, File, Query, Depends, Form
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from .utils.prompt import ClientMessage, convert_to_openai_messages
from .utils.rag import RAGManager
from .utils.deepseek import DeepSeekClient
from .utils.types import RAGQueryRequest, RAGQueryResponse, DocumentUploadResponse
import os
import sys
from typing import List, Optional, Dict, Any
from pathlib import Path
import shutil
import logging
from pydantic import BaseModel
from dotenv import load_dotenv
import json
import asyncio
from contextlib import asynccontextmanager

# 设置控制台输出编码
if sys.stdout.encoding != 'utf-8':
    try:
        sys.stdout.reconfigure(encoding='utf-8')
    except AttributeError:
        # Python 3.6及以下版本没有reconfigure方法
        pass

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler(filename="app.log", encoding="utf-8")
    ]
)
logger = logging.getLogger(__name__)


load_dotenv(".env.local")

@asynccontextmanager
async def lifespan(app: FastAPI):
    """Lifespan context manager for the application"""
    logger.info("Starting the application")
    # Initialize RAG manager on startup
    app.state.rag_manager = RAGManager(
        knowledge_base_dir="knowledge_base",
        index_storage_dir="index_storage",
    )
    # Initialize DeepSeek client
    app.state.deepseek_client = DeepSeekClient()
    logger.info("RAG manager initialized")
    yield
    logger.info("Shutting down the application")

app = FastAPI(
    title="Simple API", description="A simple API using FastAPI", version="1.0.0",
    lifespan=lifespan
)

# CORS configuration
app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:3000"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Mount static files
static_dir = Path(__file__).parent.parent / "static"
static_dir.mkdir(exist_ok=True)
app.mount("/static", StaticFiles(directory=static_dir), name="static")

# Create a directory for knowledge base if it doesn't exist
knowledge_base_dir = Path(__file__).parent.parent / "knowledge_base"
knowledge_base_dir.mkdir(exist_ok=True)

# Create a directory for index storage if it doesn't exist
index_storage_dir = Path(__file__).parent.parent / "index_storage"
index_storage_dir.mkdir(exist_ok=True)


class Request(BaseModel):
    messages: List[ClientMessage]


# Get RAG manager dependency
def get_rag_manager():
    return app.state.rag_manager

# Get DeepSeek client dependency
def get_deepseek_client():
    return app.state.deepseek_client


# Define available tools for tool calls
def get_current_weather(location: str, unit: str = "celsius") -> Dict[str, Any]:
    """Get the current weather in a location"""
    # This is a mock implementation
    weather_data = {
        "location": location,
        "temperature": 22.5 if unit == "celsius" else 72.5,
        "unit": unit,
        "condition": "Sunny",
        "humidity": 60,
        "wind_speed": 10,
    }
    return weather_data


def retrieve_knowledge(query: str) -> Dict[str, Any]:
    """Retrieve relevant information from the knowledge base"""
    rag_manager = app.state.rag_manager
    return rag_manager.query(query)


# Register available tools
available_tools = {
    "get_current_weather": get_current_weather,
    "retrieve_knowledge": retrieve_knowledge,
}


@app.get("/")
def read_root():
    """Redirect to the RAG demo page"""
    return FileResponse(static_dir / "index.html")


@app.post("/api/chat")
async def chat(
    request: Request, 
    protocol: str = Query("data"),
    rag_manager: RAGManager = Depends(get_rag_manager),
    deepseek_client: DeepSeekClient = Depends(get_deepseek_client)
):
    messages = request.messages
    openai_messages = convert_to_openai_messages(messages)
    
    # Extract the latest user query
    latest_user_message = None
    for message in reversed(openai_messages):
        if message["role"] == "user":
            latest_user_message = message["content"]
            break
    
    # Get context from RAG if we have a user message
    context = ""
    sources = []
    if latest_user_message:
        # Retrieve relevant information from RAG
        retrieval_result = rag_manager.query(latest_user_message)

        # 记录检索结果，确保中文正确显示
        if retrieval_result:
            logger.info(f"检索到 {len(retrieval_result.get('sources', []))} 个相关文档")
            logger.info(f"上下文长度: {len(retrieval_result.get('context', ''))}")
    
        if retrieval_result:
            context = retrieval_result.get("context", "")
            sources = retrieval_result.get("sources", [])
    
    # Add the streaming response with DeepSeek integration
    response = StreamingResponse(
        stream_deepseek_response(openai_messages, context, sources, protocol, deepseek_client),
        media_type="charset=utf-8"
    )
    
    # 确保设置了正确的头部
    if protocol == "data":
        response.headers["x-vercel-ai-data-stream"] = "v1"
    
    # 设置适当的字符集
    response.headers["Content-Type"] = "charset=utf-8"
    
    return response


@app.post("/api/rag/upload", response_model=DocumentUploadResponse)
async def upload_documents(
    files: List[UploadFile] = File(...),
    rag_manager: RAGManager = Depends(get_rag_manager)
):
    """
    Upload documents to the knowledge base
    """
    uploaded_files = []
    try:
        # Save files to knowledge base directory
        for file in files:
            # Generate a safe filename
            file_ext = os.path.splitext(file.filename)[1]
            if file_ext.lower() not in ['.txt', '.md', '.pdf', '.docx', '.csv']:
                continue
            
            safe_filename = Path(file.filename).name
            file_path = os.path.join("knowledge_base", safe_filename)
            
            # Save the file
            with open(file_path, "wb") as buffer:
                shutil.copyfileobj(file.file, buffer)
            
            uploaded_files.append(file_path)
        
        # Update the RAG index with new documents
        if uploaded_files:
            success = rag_manager.add_documents(uploaded_files)
            if success:
                return DocumentUploadResponse(
                    success=True,
                    message=f"Successfully uploaded {len(uploaded_files)} files",
                    file_paths=uploaded_files
                )
        
        return DocumentUploadResponse(
            success=False,
            message="No valid files were uploaded",
            file_paths=uploaded_files
        )
            
    except Exception as e:
        logger.error(f"Error uploading files: {e}")
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/api/rag/query")
async def query_rag(
    request: RAGQueryRequest,
    rag_manager: RAGManager = Depends(get_rag_manager),
    deepseek_client: DeepSeekClient = Depends(get_deepseek_client)
):
    """
    Query the RAG system with natural language
    """
    try:
        # Retrieve relevant context
        retrieval_result = rag_manager.query(
            request.query, 
            similarity_top_k=request.similarity_top_k
        )
        
        context = retrieval_result["context"]
        sources = retrieval_result["sources"]
        
        # If streaming is requested
        if request.stream:
            async def generate_streaming_response():
                # 添加源信息部分 (Source Part)
                if sources:
                    for source in sources:
                        source_obj = {
                            "sourceType": "file",
                            "id": source,
                            "title": source
                        }
                        yield f"h:{json.dumps(source_obj, ensure_ascii=False)}\n".encode('utf-8')
                
                # Use DeepSeek to generate an answer if it's available
                if deepseek_client.is_available:
                    # 创建一个用户查询消息，已将检索结果合并到 DeepSeekClient 中处理
                    messages = [{"role": "user", "content": request.query}]
                    
                    # 使用流式生成回答
                    for chunk in deepseek_client.generate_response_stream(messages, context):
                        # 格式: `0:string\n`
                        yield f"0:{json.dumps(chunk, ensure_ascii=False)}\n".encode('utf-8')
                else:
                    # 如果DeepSeek不可用，提供简单回复
                    answer = f"这是一个简单回复。我找到了相关信息：{context[:100]}..." if context else "没有找到相关信息。"
                    
                    yield f"0:{json.dumps(answer, ensure_ascii=False)}\n".encode('utf-8')
                
                # 添加完成消息部分 (Finish Message Part)
                finish_message = {
                    "finishReason": "stop",
                    "usage": {
                        "promptTokens": len(request.query) // 4,  # 粗略估计
                        "completionTokens": 0  # 无法准确估计，使用0
                    }
                }
                yield f"d:{json.dumps(finish_message, ensure_ascii=False)}\n".encode('utf-8')
            
            response = StreamingResponse(
                generate_streaming_response()            
            )
            # 添加 Vercel AI SDK 所需的头部
            response.headers["x-vercel-ai-data-stream"] = "v1"
            # 设置适当的字符集
            response.headers["Content-Type"] = "charset=utf-8"
            return response
        else:
            # 非流式响应
            if deepseek_client.is_available:
                # 使用DeepSeek生成回答，检索结果已在 DeepSeekClient 中处理
                messages = [{"role": "user", "content": request.query}]
                answer = deepseek_client.generate_response(messages, context)
            else:
                # 简单回复
                answer = f"这是一个简单回复。我找到了相关信息。" if context else "没有找到相关信息。"
            
            return RAGQueryResponse(
                answer=answer,
                context=context,
                sources=sources
            )
            
    except Exception as e:
        logger.error(f"Error querying RAG: {e}")
        raise HTTPException(status_code=500, detail=str(e))


async def stream_deepseek_response(
    messages: List[Dict], 
    context: str,
    sources: List[str],
    protocol: str = "data",
    deepseek_client: DeepSeekClient = None
):
    """使用DeepSeek流式生成回复"""
    
    if deepseek_client and deepseek_client.is_available:
        # 使用DeepSeek的流式生成 (DeepSeekClient 已处理检索结果的合并)
        if protocol == "text":
            # 纯文本模式 - 直接返回文本
            for chunk in deepseek_client.generate_response_stream(messages, context):
                yield chunk
                await asyncio.sleep(0.01)
                
        elif protocol == "data":
            # 数据流模式 - 按照 Vercel AI SDK 协议格式化
            # 每个流部分的格式: `TYPE_ID:CONTENT_JSON\n`
            
            # 添加源信息部分 (Source Part)
            if sources:
                for source in sources:
                    source_obj = {
                        "sourceType": "file",
                        "id": source,
                        "title": source
                    }
                    yield f"h:{json.dumps(source_obj, ensure_ascii=False)}\n"
            
            # 流式文本部分 (Text Part)
            for chunk in deepseek_client.generate_response_stream(messages, context):
                if chunk:
                    # 格式: `0:string\n`
                    # 对文本内容进行JSON序列化，确保中文字符正确处理
                    yield f"0:{json.dumps(chunk, ensure_ascii=False)}\n"
            
            # 添加完成消息部分 (Finish Message Part)
            finish_message = {
                "finishReason": "stop",
                "usage": {
                    "promptTokens": len(str(messages)) // 4,  # 大致估计
                    "completionTokens": 0  # 无法准确估计，使用0
                }
            }
            yield f"d:{json.dumps(finish_message, ensure_ascii=False)}\n"
    else:
        # 回退到简单响应
        if context:
            full_text = f"根据我检索到的信息:\n\n{context}\n\n以下是回答:\n根据检索到的文档，我可以提供以下信息：{context[:200]}..."
            if sources:
                full_text += f"\n\n参考来源: {', '.join(sources)}"
        else:
            full_text = "我没有找到与您问题相关的信息。请尝试用其他方式描述您的问题，或者上传更多相关文档。"
        
        # 流式输出
        if protocol == "text":
            chunks = [full_text[i:i+20] for i in range(0, len(full_text), 20)]
            for chunk in chunks:
                yield chunk
                await asyncio.sleep(0.1)
    
        elif protocol == "data":
            # 添加源信息部分 (Source Part)
            if sources:
                for source in sources:
                    source_obj = {
                        "sourceType": "file",
                        "id": source,
                        "title": source
                    }
                    yield f"h:{json.dumps(source_obj, ensure_ascii=False)}\n"
            
            # 文本按块输出
            chunks = [full_text[i:i+20] for i in range(0, len(full_text), 20)]
            for chunk in chunks:
                yield f"0:{json.dumps(chunk, ensure_ascii=False)}\n"
                await asyncio.sleep(0.1)
            
            # 添加完成消息部分 (Finish Message Part)
            finish_message = {
                "finishReason": "stop",
                "usage": {
                    "promptTokens": 0,
                    "completionTokens": len(full_text) // 4  # 粗略估计
                }
            }
            yield f"d:{json.dumps(finish_message, ensure_ascii=False)}\n"


if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8080)
