"""
对话相关路由
"""

from flask import Blueprint, request, jsonify, Response, stream_with_context, current_app
from app.services.ai_service import AIService
from app.utils.decorators import require_api_key
import logging

logger = logging.getLogger(__name__)

chat_bp = Blueprint('chat', __name__)


@chat_bp.route('/chat', methods=['POST'])
@require_api_key
def chat():
    """
    AI 对话接口
    
    请求体：
    {
        "message": "用户的问题",
        "model": "qwen-plus",  // 可选，默认使用配置中的模型
        "stream": false,  // 是否流式返回
        "temperature": 0.7,  // 可选
        "max_tokens": 2000,  // 可选
        "system_prompt": "你是一个助手"  // 可选，系统提示词
    }
    
    响应（非流式）：
    {
        "success": true,
        "reply": "AI的回复",
        "model": "qwen-plus",
        "usage": {
            "prompt_tokens": 10,
            "completion_tokens": 100,
            "total_tokens": 110
        }
    }
    
    响应（流式）：
    data: {"content": "你"}
    data: {"content": "好"}
    data: {"done": true}
    """
    try:
        data = request.get_json()
        
        if not data or 'message' not in data:
            return jsonify({'error': '缺少 message 参数'}), 400
        
        # 提取参数
        user_message = data.get('message', '')
        model = data.get('model', current_app.config.get('DEFAULT_MODEL'))
        stream = data.get('stream', False)
        temperature = data.get('temperature', current_app.config.get('DEFAULT_TEMPERATURE'))
        max_tokens = data.get('max_tokens', current_app.config.get('DEFAULT_MAX_TOKENS'))
        system_prompt = data.get('system_prompt')
        
        logger.info(f'收到对话请求: model={model}, stream={stream}, message={user_message[:50]}...')
        
        # 创建 AI 服务实例
        ai_service = AIService()
        
        if stream:
            # 流式响应
            return Response(
                stream_with_context(
                    ai_service.chat_stream(
                        message=user_message,
                        model=model,
                        temperature=temperature,
                        max_tokens=max_tokens,
                        system_prompt=system_prompt
                    )
                ),
                content_type='text/event-stream'
            )
        else:
            # 普通响应
            result = ai_service.chat(
                message=user_message,
                model=model,
                temperature=temperature,
                max_tokens=max_tokens,
                system_prompt=system_prompt
            )
            
            if result['success']:
                return jsonify(result), 200
            else:
                return jsonify(result), result.get('status_code', 500)
    
    except ValueError as e:
        logger.error(f'参数错误: {str(e)}')
        return jsonify({'error': '参数错误', 'details': str(e)}), 400
    except Exception as e:
        logger.error(f'对话接口错误: {str(e)}', exc_info=True)
        return jsonify({'error': '服务器内部错误', 'details': str(e)}), 500
