#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
最小化的 FastAPI 示例服务器，仅用于快速实验/调试。
生产场景请使用 ``backend.api.main``（标准端口 8001 + 统一 CORS 配置）。
"""
import uvicorn
from fastapi import FastAPI
import logging
import uuid
from typing import List, Dict, Any, Optional
from pydantic import BaseModel
import requests
import os
import sys

# 添加项目根目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 导入配置
from backend.common.config import ConfigManager
from backend.middleware.cors_helper import configure_cors

# 获取配置实例
config = ConfigManager()
ollama_base_url = config.llm_providers.ollama.base_url

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 创建FastAPI应用
app = FastAPI(
    title="AI聊天应用",
    description="基于LLM的智能聊天和RAG应用",
    version="1.0.0",
    docs_url="/api/docs",
    redoc_url="/api/redoc",
    openapi_url="/api/openapi.json"
)

configure_cors(app)

# 数据模型
class Message(BaseModel):
    id: str
    session_id: str
    role: str
    content: str
    created_at: str

class Session(BaseModel):
    id: str
    title: str
    created_at: str
    updated_at: str
    message_count: int

# 认证相关模型
class UserPublic(BaseModel):
    id: str
    username: str
    nickname: Optional[str] = None
    created_at: Optional[str] = None

class RegisterRequest(BaseModel):
    username: str
    password: str
    nickname: Optional[str] = None

class LoginRequest(BaseModel):
    username: str
    password: str

class TokenResponse(BaseModel):
    token: str
    user: UserPublic

# 模拟数据存储
sessions_db: Dict[str, Session] = {}
messages_db: Dict[str, List[Message]] = {}
users_db: Dict[str, Dict] = {}  # 简单的用户存储

# 健康检查端点
@app.get("/v1/health", tags=["系统"])
async def health_check():
    """检查服务健康状态"""
    return {
        "status": "healthy",
        "version": "v1",
        "timestamp": 1234567890,
        "features": {
            "rag_enabled": True,
            "tools_enabled": True
        }
    }

# 注册用户
@app.post("/v1/auth/register", response_model=UserPublic, tags=["认证"])
async def register(payload: RegisterRequest):
    """注册新用户"""
    if payload.username in users_db:
        from fastapi import HTTPException
        raise HTTPException(status_code=409, detail="Username already exists")
    
    user_id = str(uuid.uuid4())
    now = "2023-01-01T00:00:00Z"
    user = {
        "id": user_id,
        "username": payload.username,
        "password": payload.password,  # 实际应用中应该存储哈希值
        "nickname": payload.nickname or payload.username,
        "created_at": now
    }
    users_db[payload.username] = user
    
    return UserPublic(
        id=user_id,
        username=payload.username,
        nickname=payload.nickname or payload.username,
        created_at=now
    )

# 用户登录
@app.post("/v1/auth/login", response_model=TokenResponse, tags=["认证"])
async def login(payload: LoginRequest):
    """用户登录"""
    if payload.username not in users_db:
        from fastapi import HTTPException
        raise HTTPException(status_code=401, detail="Invalid credentials")
    
    user = users_db[payload.username]
    if user["password"] != payload.password:  # 实际应用中应该验证哈希值
        from fastapi import HTTPException
        raise HTTPException(status_code=401, detail="Invalid credentials")
    
    # 生成简单的token（实际应用中应该使用JWT）
    token = f"token_{uuid.uuid4()}"
    
    return TokenResponse(
        token=token,
        user=UserPublic(
            id=user["id"],
            username=user["username"],
            nickname=user["nickname"],
            created_at=user["created_at"]
        )
    )

# 用户登出
@app.post("/v1/auth/logout", tags=["认证"])
async def logout():
    """用户登出"""
    return {"success": True}

# 获取所有会话
@app.get("/v1/sessions", tags=["会话"])
async def get_sessions(limit: int = 20, offset: int = 0):
    """获取所有会话"""
    session_list = list(sessions_db.values())
    # 按更新时间倒序排列
    session_list.sort(key=lambda x: x.updated_at, reverse=True)
    # 分页
    paginated_sessions = session_list[offset:offset+limit]
    return {"sessions": paginated_sessions}

# 创建新会话
@app.post("/v1/sessions", tags=["会话"])
async def create_session():
    """创建新会话"""
    session_id = str(uuid.uuid4())
    now = "2023-01-01T00:00:00Z"  # 简化的时间戳
    new_session = Session(
        id=session_id,
        title="新对话",
        created_at=now,
        updated_at=now,
        message_count=0
    )
    sessions_db[session_id] = new_session
    messages_db[session_id] = []
    return new_session

# 删除会话
@app.delete("/v1/sessions/{session_id}", tags=["会话"])
async def delete_session(session_id: str):
    """删除会话"""
    if session_id in sessions_db:
        del sessions_db[session_id]
        if session_id in messages_db:
            del messages_db[session_id]
        return {"message": "会话已删除"}
    return {"error": "会话不存在"}

# 获取消息
@app.get("/v1/sessions/{session_id}/messages", tags=["消息"])
async def get_messages(session_id: str, limit: int = 200, offset: int = 0):
    """获取会话消息"""
    if session_id not in messages_db:
        return {"messages": []}
    
    message_list = messages_db[session_id]
    # 按创建时间正序排列
    message_list.sort(key=lambda x: x.created_at)
    # 分页
    paginated_messages = message_list[offset:offset+limit]
    return {"messages": paginated_messages}

# 发送消息
@app.post("/v1/sessions/{session_id}/messages", tags=["消息"])
async def send_message(session_id: str, message: Dict[str, Any]):
    """发送消息"""
    if session_id not in sessions_db:
        # 如果会话不存在，创建一个新会话
        now = "2023-01-01T00:00:00Z"
        new_session = Session(
            id=session_id,
            title="新对话",
            created_at=now,
            updated_at=now,
            message_count=0
        )
        sessions_db[session_id] = new_session
        messages_db[session_id] = []
    
    # 添加用户消息
    user_message = Message(
        id=str(uuid.uuid4()),
        session_id=session_id,
        role="user",
        content=message.get("content", ""),
        created_at="2023-01-01T00:00:00Z"
    )
    messages_db[session_id].append(user_message)
    
    # 添加AI回复
    ai_message = Message(
        id=str(uuid.uuid4()),
        session_id=session_id,
        role="assistant",
        content="这是一个模拟回复。实际应用中，这里会调用LLM生成回复。",
        created_at="2023-01-01T00:00:01Z"
    )
    messages_db[session_id].append(ai_message)
    
    # 更新会话信息
    session = sessions_db[session_id]
    session.updated_at = "2023-01-01T00:00:01Z"
    session.message_count = len(messages_db[session_id])
    if session.message_count == 2:  # 第一条对话
        session.title = message.get("content", "新对话")[:20] + "..."
    
    return {"message": "消息已发送"}

# OLLAMA健康检查
@app.get("/v1/llm/ollama/health", tags=["LLM"])
async def ollama_health():
    """OLLAMA健康检查"""
    try:
        # 尝试连接到Ollama服务，增加更长的超时时间
        response = requests.get(f"{ollama_base_url}/api/tags", timeout=10)
        if response.status_code == 200:
            return {"success": True, "status": "healthy", "base_url": ollama_base_url}
        else:
            return {"success": False, "status": "unhealthy", "error": f"Ollama API返回状态码: {response.status_code}"}
    except requests.exceptions.Timeout:
        return {"success": False, "status": "timeout", "error": "连接Ollama服务超时"}
    except requests.exceptions.ConnectionError:
        return {"success": False, "status": "connection_error", "error": "无法连接到Ollama服务"}
    except Exception as e:
        return {"success": False, "status": "unhealthy", "error": f"无法连接到Ollama服务: {str(e)}"}

# 获取Ollama模型列表
@app.get("/v1/llm/ollama/models", tags=["LLM"])
async def get_ollama_models():
    """获取Ollama可用模型列表"""
    try:
        # 请求Ollama API获取模型列表
        response = requests.get(f"{ollama_base_url}/api/tags", timeout=5)
        if response.status_code == 200:
            data = response.json()
            models = data.get("models", [])
            # 转换为前端需要的格式
            model_list = []
            for model in models:
                model_list.append({
                    "name": model.get("name", ""),
                    "model": model.get("model", model.get("name", "")),
                    "size": model.get("size", 0),
                    "digest": model.get("digest", ""),
                    "modified_at": model.get("modified_at", "")
                })
            return {"models": model_list}
        else:
            return {"error": f"Ollama API返回状态码: {response.status_code}"}
    except Exception as e:
        return {"error": f"无法获取模型列表: {str(e)}"}

# 获取当前使用的模型
@app.get("/v1/llm/ollama/current-model", tags=["LLM"])
async def get_current_model(session_id: str):
    """获取当前会话使用的模型"""
    # 简化实现，返回默认模型
    return {"model": "qwen2.5:7B"}

# 设置使用的模型
@app.post("/v1/llm/ollama/use-model", tags=["LLM"])
async def use_model(model: str, session_id: str):
    """设置会话使用的模型"""
    # 简化实现，直接返回成功
    return {"success": True, "model": model}

# 聊天端点
@app.post("/v1/llm/ollama/chat", tags=["LLM"])
async def chat_with_llm(request: Dict[str, Any]):
    """与LLM聊天"""
    try:
        # 获取请求参数
        messages = request.get("messages", [])
        model = request.get("model", "qwen2.5:7B")
        stream = request.get("stream", True)
        session_id = request.get("session_id", "")
        
        # 如果启用了流式响应
        if stream:
            from fastapi.responses import StreamingResponse
            import json
            
            async def generate_stream():
                # 模拟流式响应
                response_text = f"这是来自模型 {model} 的模拟回复。实际应用中，这里会调用Ollama API生成真实的回复。"
                
                # 分块发送响应
                for i, char in enumerate(response_text):
                    chunk = {
                        "id": f"chatcmpl-{uuid.uuid4()}",
                        "object": "chat.completion.chunk",
                        "created": 1234567890,
                        "model": model,
                        "choices": [
                            {
                                "index": 0,
                                "delta": {
                                    "content": char
                                },
                                "finish_reason": None
                            }
                        ]
                    }
                    yield f"data: {json.dumps(chunk)}\n\n"
                
                # 发送结束标记
                end_chunk = {
                    "id": f"chatcmpl-{uuid.uuid4()}",
                    "object": "chat.completion.chunk",
                    "created": 1234567890,
                    "model": model,
                    "choices": [
                        {
                            "index": 0,
                            "delta": {},
                            "finish_reason": "stop"
                        }
                    ]
                }
                yield f"data: {json.dumps(end_chunk)}\n\n"
                yield "data: [DONE]\n\n"
            
            return StreamingResponse(
                generate_stream(),
                media_type="text/plain",
                headers={
                    "Cache-Control": "no-cache",
                    "Connection": "keep-alive",
                    "Content-Type": "text/event-stream"
                }
            )
        else:
            # 非流式响应
            response = {
                "id": f"chatcmpl-{uuid.uuid4()}",
                "object": "chat.completion",
                "created": 1234567890,
                "model": model,
                "choices": [
                    {
                        "index": 0,
                        "message": {
                            "role": "assistant",
                            "content": f"这是来自模型 {model} 的模拟回复。实际应用中，这里会调用Ollama API生成真实的回复。"
                        },
                        "finish_reason": "stop"
                    }
                ],
                "usage": {
                    "prompt_tokens": 10,
                    "completion_tokens": 10,
                    "total_tokens": 20
                }
            }
            return response
            
    except Exception as e:
        logger.error(f"聊天请求处理错误: {str(e)}")
        return {"error": f"处理聊天请求时出错: {str(e)}"}

if __name__ == "__main__":
    logger.info("正在启动最小化后端服务器...")
    uvicorn.run(
        "minimal_server:app",
        host="0.0.0.0",
        port=8001,
        reload=True,
        log_level="info"
    )