"""Main FastAPI application for AI service."""

from contextlib import asynccontextmanager
import asyncio
import json
import logging
from datetime import datetime
from uuid import uuid4
from typing import Optional
from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect, Query
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from motor.motor_asyncio import AsyncIOMotorClient

from ..core.config import settings
from ..core.init_indexes import init_all_indexes
from ..chains.generation import ContentGenerationChain
from ..chains.memory import ConversationMemory
from ..prompts.manager import PromptManager
from ..loaders.hot_data import HotDataLoader
from ..metrics.prometheus import PrometheusMetrics
from ..metrics.collector import MetricsCollector
from ..streaming.websocket_manager import websocket_manager
from .streaming_routes import router as streaming_router
from .model_discovery import router as model_discovery_router
from .model_config import router as model_config_router
from .model_health import router as model_health_router
from .model_metadata import router as model_metadata_router
from .model_templates import router as model_templates_router
from .template_sync import router as template_sync_router
from .key_management import router as key_management_router
from .hot_content_routes import router as hot_content_router
from .quality_routes import router as quality_router
from .template_routes import router as template_routes_router
from .file_routes import router as file_routes_router
from .brainstorm_routes import router as brainstorm_router
from .hints_routes import router as hints_router

# Configure logging
logging.basicConfig(level=getattr(logging, settings.log_level))
logger = logging.getLogger(__name__)

# Global instances
mongodb_client = None
generation_chain = None
conversation_memory = None
prompt_manager = None
hot_data_loader = None
prometheus_metrics = None
metrics_collector = None


@asynccontextmanager
async def lifespan(app: FastAPI):
    """Application lifespan manager."""
    global mongodb_client, generation_chain, conversation_memory
    global prompt_manager, hot_data_loader, prometheus_metrics, metrics_collector
    
    # Startup
    logger.info("Starting AI service...")
    
    # Initialize MongoDB
    mongodb_client = AsyncIOMotorClient(settings.mongodb_url)
    db = mongodb_client[settings.mongodb_database]

    # Initialize database indexes
    try:
        await init_all_indexes(db)
        logger.info("Database indexes initialized")
    except Exception as e:
        logger.warning(f"Failed to initialize indexes: {e}")

    # Initialize components
    generation_chain = ContentGenerationChain(mongodb_client)
    conversation_memory = ConversationMemory(mongodb_client)
    prompt_manager = PromptManager(mongodb_client)
    hot_data_loader = HotDataLoader(mongodb_client)
    
    # Initialize metrics
    prometheus_metrics = PrometheusMetrics(service_name="ai_service")
    metrics_collector = MetricsCollector()
    
    # Start WebSocket manager
    await websocket_manager.start()
    
    # Start Prometheus server
    if settings.metrics_enabled:
        prometheus_metrics.start_http_server_async(settings.prometheus_port)
    
    logger.info("AI service started successfully")
    
    yield
    
    # Shutdown
    logger.info("Shutting down AI service...")
    
    # Stop WebSocket manager
    await websocket_manager.stop()
    
    if mongodb_client:
        mongodb_client.close()
    logger.info("AI service stopped")


# Create FastAPI app
app = FastAPI(
    title="AI Content Generation Service",
    version="0.1.0",
    description="LangChain-based AI service for content generation",
    lifespan=lifespan
)

# Add CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # Configure appropriately for production
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Include routers
app.include_router(streaming_router, prefix="/api/v1")
app.include_router(hot_content_router, prefix="/api/v1")  # 添加热点内容路由
app.include_router(quality_router)  # 添加质量评分路由
app.include_router(model_discovery_router)
app.include_router(model_config_router)
app.include_router(model_health_router)
app.include_router(model_metadata_router)
app.include_router(model_templates_router)
app.include_router(template_sync_router)
app.include_router(key_management_router)
app.include_router(template_routes_router)  # 添加模板管理路由
app.include_router(file_routes_router)  # 添加文件处理路由
app.include_router(brainstorm_router)  # 添加头脑风暴路由
app.include_router(hints_router, prefix="/api/v1/ai")  # 添加AI提示路由


@app.get("/health")
async def health_check():
    """Health check endpoint."""
    return {
        "status": "healthy",
        "service": "ai-service",
        "version": "0.1.0"
    }


@app.get("/models")
async def get_available_models():
    """Get list of available models."""
    if not generation_chain:
        raise HTTPException(status_code=503, detail="Service not initialized")
    
    models = generation_chain.get_available_models()
    return {
        "models": models,
        "default": settings.default_model
    }


@app.get("/metrics")
async def get_metrics():
    """Get service metrics."""
    if not metrics_collector:
        raise HTTPException(status_code=503, detail="Metrics not initialized")
    
    return metrics_collector.get_summary()


@app.post("/ai/generate")
async def generate_content(request: dict):
    """Generate AI content using real AI models."""
    try:
        if not generation_chain:
            raise HTTPException(status_code=503, detail="Service not initialized")
        
        # 提取请求参数
        model = request.get("model", "gpt-4")
        messages = request.get("messages", [])
        parameters = request.get("parameters", {})
        stream = request.get("stream", False)
        prompt_id = request.get("prompt_id")
        prompt_version = request.get("prompt_version")
        
        # 提取前端传递的模型配置（API密钥、Base URL等）
        provider = request.get("provider")
        api_key = request.get("api_key")
        api_base = request.get("api_base")
        
        # 调试：检查API密钥完整性
        logger.info(f"Request debug - Provider: {provider}, API Key length: {len(api_key) if api_key else 0}, API Key sample: {api_key[:20] if api_key else 'None'}...")
        
        # 验证必需参数
        if not messages:
            raise HTTPException(status_code=400, detail="Messages are required")
        
        # 提取模型参数
        temperature = parameters.get("temperature", 0.7)
        top_p = parameters.get("top_p", 0.9)
        max_tokens = parameters.get("max_tokens", 1500)
        presence_penalty = parameters.get("presence_penalty", 0.0)
        frequency_penalty = parameters.get("frequency_penalty", 0.0)
        
        # 验证参数范围
        if not (0 <= temperature <= 2):
            raise HTTPException(status_code=400, detail="Temperature must be between 0 and 2")
        if not (0 <= top_p <= 1):
            raise HTTPException(status_code=400, detail="Top_p must be between 0 and 1")
        if not (1 <= max_tokens <= 8192):
            raise HTTPException(status_code=400, detail="Max_tokens must be between 1 and 8192")
        
        # 构建对话格式的prompt
        conversation_prompt = ""
        for msg in messages:
            role = msg.get("role", "user")
            content = msg.get("content", "")
            if role == "system":
                conversation_prompt += f"System: {content}\n\n"
            elif role == "user":
                conversation_prompt += f"Human: {content}\n\n"
            elif role == "assistant":
                conversation_prompt += f"Assistant: {content}\n\n"

        # 添加Assistant提示
        conversation_prompt += "Assistant: "

        # 记录调试信息
        logger.info(f"[Generate] Total prompt length: {len(conversation_prompt)}")
        logger.info(f"[Generate] Prompt preview (first 500 chars): {conversation_prompt[:500]}")
        logger.info(f"[Generate] Messages count: {len(messages)}")
        for i, msg in enumerate(messages):
            logger.info(f"[Generate] Message {i+1} - Role: {msg.get('role')}, Content length: {len(msg.get('content', ''))}")

        # 使用 generation_chain 进行真实的AI内容生成
        try:
            if stream:
                # 流式生成
                logger.info(f"Stream generation request - Model: {model}, Has API Key: {bool(api_key)}, Has API Base: {bool(api_base)}")
                
                # 检查是否有自定义API配置
                if api_key and api_base:
                    # 使用自定义模型API的流式生成
                    logger.info(f"Using custom API for streaming: {provider or 'unknown'} - {model}")
                    return StreamingResponse(
                        generate_streaming_content_custom(
                            model, conversation_prompt, temperature, top_p, max_tokens,
                            api_key=api_key, api_base=api_base, provider=provider
                        ),
                        media_type="text/event-stream"
                    )
                else:
                    # 使用内置模型的流式生成
                    logger.info(f"Using built-in generation chain for streaming: {model}")
                    # 检查generation_chain是否有可用模型
                    available_models = generation_chain.get_available_models() if generation_chain else []
                    logger.info(f"Available built-in models: {available_models}")
                    
                    if not available_models:
                        logger.error("No built-in models available and no custom API config provided")
                        raise HTTPException(
                            status_code=503, 
                            detail="No AI models available. Please configure API keys in environment variables or provide custom API configuration."
                        )
                    
                    return StreamingResponse(
                        generate_streaming_content(
                            generation_chain, model, conversation_prompt, temperature, top_p, max_tokens
                        ),
                        media_type="text/event-stream"
                    )
            else:
                # 非流式生成 - 根据是否有自定义API配置来决定调用方式
                if api_key and api_base:
                    # 使用用户自定义的模型配置（DeepSeek、通义千问等）
                    content = await call_custom_model_api(
                        conversation_prompt, model, temperature, max_tokens,
                        api_key=api_key, api_base=api_base, provider=provider
                    )
                else:
                    # 使用generation_chain处理内置模型
                    content_chunks = []
                    async for chunk in generation_chain.stream(
                        prompt=conversation_prompt,
                        model_name=model,
                        temperature=temperature,
                        top_p=top_p,
                        max_tokens=max_tokens
                    ):
                        content_chunks.append(chunk)
                    content = "".join(content_chunks)
                
                # 计算token使用量 (简单估算)
                prompt_tokens = len(conversation_prompt.split())
                completion_tokens = len(content.split())
                total_tokens = prompt_tokens + completion_tokens
                
                # 格式化响应
                response = {
                    "id": f"gen_{uuid4().hex[:12]}",
                    "content": content,
                    "usage": {
                        "prompt_tokens": prompt_tokens,
                        "completion_tokens": completion_tokens,
                        "total_tokens": total_tokens
                    },
                    "model": model,
                    "created": int(datetime.utcnow().timestamp()),
                    "choices": [{
                        "message": {
                            "role": "assistant",
                            "content": content
                        },
                        "finish_reason": "stop"
                    }]
                }
                
                return response
                
        except Exception as ai_error:
            logger.error(f"AI generation failed: {ai_error}")
            # 如果AI调用失败，返回错误信息
            raise HTTPException(
                status_code=500, 
                detail=f"AI model generation failed: {str(ai_error)}"
            )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"Content generation request failed: {e}")
        raise HTTPException(status_code=500, detail=f"Request processing failed: {str(e)}")


async def call_custom_model_api(prompt: str, model: str, temperature: float, max_tokens: int,
                                api_key: str, api_base: str, provider: str = None) -> str:
    """调用用户配置的自定义模型API（支持OpenAI兼容格式和Anthropic格式）"""
    import aiohttp

    # 检测是否为Claude/Anthropic模型
    is_claude = (provider and provider.lower() in ['claude', 'anthropic']) or (model and 'claude' in model.lower())

    # 确保api_base有正确的路径
    if is_claude:
        # Claude模型使用/v1/messages端点
        if not api_base.endswith('/v1/messages'):
            if api_base.endswith('/'):
                api_base = api_base + 'v1/messages'
            else:
                api_base = api_base + '/v1/messages'
    else:
        # 其他模型使用/chat/completions端点
        if not api_base.endswith('/chat/completions'):
            if api_base.endswith('/'):
                api_base = api_base + 'chat/completions'
            elif '/v1' in api_base or '/v3' in api_base:
                api_base = api_base + '/chat/completions'
            else:
                api_base = api_base + '/v1/chat/completions'

    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json"
    }

    # 将对话格式转换为标准的messages格式
    # 注意：不要用\n\n分割，因为文章内容中也包含\n\n
    messages = []
    current_role = None
    current_content = []

    lines = prompt.split('\n')
    for line in lines:
        if line.startswith('System: '):
            if current_role:
                messages.append({"role": current_role, "content": '\n'.join(current_content)})
            current_role = "system"
            current_content = [line[8:]]
        elif line.startswith('Human: '):
            if current_role:
                messages.append({"role": current_role, "content": '\n'.join(current_content)})
            current_role = "user"
            current_content = [line[7:]]
        elif line.startswith('Assistant: '):
            if current_role:
                messages.append({"role": current_role, "content": '\n'.join(current_content)})
            if line != 'Assistant: ':
                current_role = "assistant"
                current_content = [line[11:]]
            else:
                current_role = None
                current_content = []
        else:
            # 继续添加到当前消息
            if current_role:
                current_content.append(line)

    # 添加最后一条消息
    if current_role and current_content:
        messages.append({"role": current_role, "content": '\n'.join(current_content)})

    # 如果没有解析出消息，使用原始prompt
    if not messages:
        messages = [{"role": "user", "content": prompt}]

    logger.info(f"[call_custom_model_api] Parsed messages count: {len(messages)}")
    for i, msg in enumerate(messages):
        logger.info(f"[call_custom_model_api] Message {i+1} - Role: {msg['role']}, Content length: {len(msg['content'])}")

    # 根据模型类型构建不同的请求格式
    if is_claude:
        # Claude/Anthropic API格式
        payload = {
            "model": model,
            "messages": messages,
            "max_tokens": max_tokens
        }
        # Claude使用temperature，不使用stream参数
        if temperature is not None:
            payload["temperature"] = temperature
    else:
        # OpenAI API格式
        payload = {
            "model": model,
            "messages": messages,
            "temperature": temperature,
            "max_tokens": max_tokens,
            "stream": False
        }
    
    try:
        logger.info(f"Calling custom model API: {provider} - {model} at {api_base}")
        logger.info(f"Is Claude: {is_claude}, Payload keys: {list(payload.keys())}")
        logger.info(f"Headers: Authorization Bearer {api_key[:10]}...")

        # 推理模型需要更长的超时时间
        timeout_seconds = 180 if 'reasoner' in model.lower() else 60
        logger.info(f"Setting timeout to {timeout_seconds}s for model: {model}")

        async with aiohttp.ClientSession() as session:
            async with session.post(
                api_base,
                headers=headers,
                json=payload,
                timeout=aiohttp.ClientTimeout(total=timeout_seconds)
            ) as response:
                if response.status == 200:
                    result = await response.json()
                    if is_claude:
                        # Claude响应格式
                        if "content" in result and result["content"]:
                            content = result["content"][0]["text"] if isinstance(result["content"], list) else result["content"]
                        else:
                            content = "No content in response"
                    else:
                        # OpenAI响应格式
                        content = result["choices"][0]["message"]["content"]
                    return content
                else:
                    error_text = await response.text()
                    logger.error(f"{provider} API error {response.status}: {error_text}")
                    return f"{provider} API调用失败: {response.status} - {error_text}"
    except Exception as e:
        import traceback
        provider_name = provider or "AI"
        error_details = traceback.format_exc()
        logger.error(f"{provider_name} API调用异常: {str(e)}\n详细错误:\n{error_details}")
        return f"{provider_name} API调用异常: {str(e)}"


async def generate_streaming_content(generation_chain, model, prompt, temperature, top_p, max_tokens):
    """生成流式AI内容"""
    try:
        async for chunk in generation_chain.stream(
            prompt=prompt,
            model_name=model,
            temperature=temperature,
            top_p=top_p,
            max_tokens=max_tokens
        ):
            data = {
                "content": chunk,
                "finished": False
            }
            yield f"data: {json.dumps(data)}\n\n"
            
        # 发送完成信号
        data = {
            "content": "",
            "finished": True
        }
        yield f"data: {json.dumps(data)}\n\n"
        yield "data: [DONE]\n\n"
        
    except Exception as e:
        logger.error(f"Streaming generation failed: {e}")
        error_data = {
            "error": str(e),
            "finished": True
        }
        yield f"data: {json.dumps(error_data)}\n\n"


async def generate_streaming_content_custom(model, prompt, temperature, top_p, max_tokens, 
                                           api_key, api_base, provider=None):
    """生成流式AI内容（使用自定义API）"""
    import aiohttp
    
    try:
        # 检测是否为Claude/Anthropic模型
        is_claude = (provider and provider.lower() in ['claude', 'anthropic']) or (model and 'claude' in model.lower())
        
        # 根据模型类型设置正确的端点
        if is_claude:
            # Claude模型使用/v1/messages端点
            if not api_base.endswith('/v1/messages'):
                if api_base.endswith('/'):
                    api_base = api_base + 'v1/messages'
                else:
                    api_base = api_base + '/v1/messages'
        else:
            # 其他模型使用/chat/completions端点
            if not api_base.endswith('/chat/completions'):
                if api_base.endswith('/'):
                    api_base = api_base + 'chat/completions'
                elif '/v1' in api_base or '/v3' in api_base:
                    api_base = api_base + '/chat/completions'
                else:
                    api_base = api_base + '/v1/chat/completions'
        
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        # 将对话格式转换为标准的messages格式
        messages = []
        for line in prompt.split('\n\n'):
            if line.startswith('System: '):
                messages.append({"role": "system", "content": line[8:]})
            elif line.startswith('Human: '):
                messages.append({"role": "user", "content": line[7:]})
            elif line.startswith('Assistant: '):
                # 跳过空的Assistant提示
                if line[11:].strip():
                    messages.append({"role": "assistant", "content": line[11:]})
        
        # 如果没有消息，创建默认用户消息
        if not messages:
            messages = [{"role": "user", "content": prompt}]
        
        # 根据模型类型构建不同的请求格式
        if is_claude:
            # Claude/Anthropic API格式
            payload = {
                "model": model,
                "messages": messages,
                "max_tokens": max_tokens,
                "stream": True
            }
            # Claude使用temperature，不使用top_p
            if temperature is not None:
                payload["temperature"] = temperature
        else:
            # OpenAI API格式
            payload = {
                "model": model,
                "messages": messages,
                "temperature": temperature,
                "max_tokens": max_tokens,
                "stream": True
            }
            if top_p is not None:
                payload["top_p"] = top_p
        
        logger.info(f"Sending streaming request to: {api_base}")
        logger.info(f"Provider: {provider}, Is Claude: {is_claude}")
        logger.info(f"Messages count: {len(messages)}")
        logger.info(f"API Key length: {len(api_key) if api_key else 0}, starts with: {api_key[:10] if api_key else 'None'}...")
        
        async with aiohttp.ClientSession() as session:
            async with session.post(api_base, headers=headers, json=payload) as response:
                if response.status != 200:
                    error_text = await response.text()
                    logger.error(f"API error: {response.status} - {error_text}")
                    yield f"data: {json.dumps({'error': f'API error: {response.status}', 'finished': True})}\n\n"
                    return
                
                buffer = b''
                async for chunk in response.content.iter_any():
                    buffer += chunk
                    lines = buffer.split(b'\n')
                    buffer = lines[-1]  # Keep the incomplete line in buffer
                    
                    for line in lines[:-1]:
                        if line:
                            line_text = line.decode('utf-8').strip()
                            if line_text.startswith('data: '):
                                data_str = line_text[6:]
                                if data_str == '[DONE]':
                                    break
                                try:
                                    chunk_data = json.loads(data_str)
                                    content = ""
                                    
                                    if is_claude:
                                        # Claude/Anthropic流式响应格式
                                        if chunk_data.get('type') == 'content_block_delta':
                                            delta = chunk_data.get('delta', {})
                                            content = delta.get('text', '')
                                        elif chunk_data.get('type') == 'message_delta':
                                            # 有时Claude返回这种格式
                                            delta = chunk_data.get('delta', {})
                                            if 'content' in delta and delta['content']:
                                                content = delta['content'][0].get('text', '') if isinstance(delta['content'], list) else delta['content'].get('text', '')
                                    else:
                                        # OpenAI流式响应格式
                                        if 'choices' in chunk_data and chunk_data['choices']:
                                            delta = chunk_data['choices'][0].get('delta', {})
                                            content = delta.get('content', '')
                                    
                                    if content:
                                        data = {
                                            "content": content,
                                            "finished": False
                                        }
                                        yield f"data: {json.dumps(data)}\n\n"
                                except json.JSONDecodeError as e:
                                    logger.warning(f"Failed to parse chunk: {data_str[:100]} - {e}")
                                    continue
        
        # 发送完成信号
        data = {
            "content": "",
            "finished": True
        }
        yield f"data: {json.dumps(data)}\n\n"
        yield "data: [DONE]\n\n"
        
    except Exception as e:
        logger.error(f"Custom streaming generation failed: {e}")
        error_data = {
            "error": str(e),
            "finished": True
        }
        yield f"data: {json.dumps(error_data)}\n\n"


@app.websocket("/ws/config-sync")
async def websocket_config_sync_direct(
    websocket: WebSocket,
    token: Optional[str] = Query(None)
):
    """配置同步WebSocket端点 - 直接路由"""
    from ..auth.jwt_handler import get_current_user_ws
    
    client_id = None
    
    try:
        # 先建立连接
        await websocket.accept()
        
        # 然后进行认证
        user = await get_current_user_ws(websocket, token)
        if not user:
            await websocket.close(code=1008, reason="Authentication failed")
            return
            
        client_id = f"config_sync_{user['user_id']}_{uuid4().hex[:8]}"
        
        logger.info(f"Config sync WebSocket connected: client_id={client_id}")
        
        # 主消息循环
        while True:
            # 接收客户端消息
            data = await websocket.receive_json()
            
            # 处理消息
            message_type = data.get("type")
            
            if message_type == "auth":
                # 认证消息
                await websocket.send_json({
                    "type": "auth_success",
                    "client_id": client_id
                })
                
            elif message_type == "sync_request":
                # 同步请求
                await websocket.send_json({
                    "type": "sync_response",
                    "data": {
                        "status": "success",
                        "message": "Configuration sync completed"
                    }
                })
                
            elif message_type == "heartbeat":
                # 心跳
                await websocket.send_json({
                    "type": "heartbeat_ack",
                    "timestamp": data.get("timestamp")
                })
                
            else:
                # 其他消息
                logger.info(f"Config sync message: {message_type}")
                
    except WebSocketDisconnect:
        logger.info(f"Config sync WebSocket断开: client_id={client_id}")
    except Exception as e:
        logger.error(f"Config sync WebSocket错误: client_id={client_id}, error={e}")
    finally:
        if client_id:
            logger.info(f"Config sync WebSocket清理: client_id={client_id}")


@app.websocket("/ws/hot-items")
async def websocket_hot_items_direct(
    websocket: WebSocket,
    token: Optional[str] = Query(None)
):
    """热点数据WebSocket端点 - 直接路由"""
    from ..auth.jwt_handler import get_current_user_ws
    
    client_id = None
    
    try:
        # 先建立连接
        await websocket.accept()
        
        # 然后进行认证
        user = await get_current_user_ws(websocket, token)
        if not user:
            await websocket.close(code=1008, reason="Authentication failed")
            return
            
        client_id = f"hot_items_{user['user_id']}_{uuid4().hex[:8]}"
        
        logger.info(f"Hot items WebSocket connected: client_id={client_id}")
        
        # 主消息循环
        while True:
            # 接收客户端消息
            data = await websocket.receive_json()
            
            # 处理消息
            message_type = data.get("type")
            
            if message_type == "subscribe":
                # 订阅热点数据
                await websocket.send_json({
                    "type": "subscribed",
                    "message": "Successfully subscribed to hot items updates"
                })
                
            elif message_type == "heartbeat":
                # 心跳
                await websocket.send_json({
                    "type": "heartbeat_ack",
                    "timestamp": data.get("timestamp")
                })
                
            else:
                # 其他消息
                logger.info(f"Hot items message: {message_type}")
                
    except WebSocketDisconnect:
        logger.info(f"Hot items WebSocket断开: client_id={client_id}")
    except Exception as e:
        logger.error(f"Hot items WebSocket错误: client_id={client_id}, error={e}")
    finally:
        if client_id:
            logger.info(f"Hot items WebSocket清理: client_id={client_id}")


# Additional API endpoints would be added here for:
# - Content generation
# - Conversation management
# - Prompt template CRUD
# - Document loading
# etc.


if __name__ == "__main__":
    import uvicorn
    uvicorn.run(
        "src.api.main:app",
        host=settings.api_host,
        port=settings.api_port,
        reload=settings.debug
    )