from flask import Blueprint, request, jsonify
from ..aiservice import LLMService, ModelType
import asyncio
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

llm_bp = Blueprint('llm', __name__)

@llm_bp.route('/chat', methods=['POST'])
async def chat_completion():
    """
    聊天对话 API 端点
    
    请求体格式:
    {
        "messages": [{"role": "user", "content": "你好"}, ...],
        "model_type": "deepseek-chat",  // 可选
        "temperature": 0.7,  // 可选
        "max_tokens": 1000,  // 可选
        "additional_params": {}  // 可选
    }
    """
    try:
        data = request.get_json()
        logger.info(f"Received chat request with data: {data}")
        
        if not data or 'messages' not in data:
            logger.error("Missing required field: messages")
            return jsonify({
                'success': False,
                'error': 'Missing required field: messages'
            }), 400
            
        llm_service = LLMService()
        
        # 处理可选参数
        model_type = ModelType(data.get('model_type')) if data.get('model_type') else None
        temperature = data.get('temperature', 0.7)
        max_tokens = data.get('max_tokens', 1000)
        additional_params = data.get('additional_params')
        
        logger.info(f"Calling LLM service with model_type: {model_type}, temperature: {temperature}, max_tokens: {max_tokens}")
        
        response = await llm_service.chat_completion(
            messages=data['messages'],
            model_type=model_type,
            temperature=temperature,
            max_tokens=max_tokens,
            additional_params=additional_params
        )
        
        logger.info(f"LLM service response: {response}")
        return jsonify(response)
        
    except ValueError as e:
        logger.error(f"ValueError in chat_completion: {str(e)}")
        return jsonify({
            'success': False,
            'error': str(e),
            'error_type': 'ValueError'
        }), 400
    except Exception as e:
        logger.error(f"Unexpected error in chat_completion: {str(e)}", exc_info=True)
        return jsonify({
            'success': False,
            'error': str(e),
            'error_type': type(e).__name__
        }), 500

@llm_bp.route('/generate', methods=['POST'])
def generate_completion():
    """
    文本生成 API 端点
    """
    try:
        data = request.get_json()
        logger.info(f"Received generate request with data: {data}")
        
        if not data or 'prompt' not in data:
            logger.error("Missing required field: prompt")
            return jsonify({
                'success': False,
                'error': 'Missing required field: prompt'
            }), 400
            
        llm_service = LLMService()
        
        # 处理可选参数
        model_type = ModelType(data.get('model_type')) if data.get('model_type') else None
        temperature = data.get('temperature', 0.7)
        max_tokens = data.get('max_tokens', 1000)
        additional_params = data.get('additional_params')
        
        logger.info(f"Calling LLM service with model_type: {model_type}, temperature: {temperature}, max_tokens: {max_tokens}")
        
        # 使用事件循环运行异步函数
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        response = loop.run_until_complete(
            llm_service.generate_completion(
                prompt=data['prompt'],
                model_type=model_type,
                temperature=temperature,
                max_tokens=max_tokens,
                additional_params=additional_params
            )
        )
        loop.close()
        
        logger.info(f"LLM service response: {response}")
        return jsonify(response)
        
    except Exception as e:
        logger.error(f"Unexpected error in generate_completion: {str(e)}", exc_info=True)
        return jsonify({
            'success': False,
            'error': str(e),
            'error_type': type(e).__name__
        }), 500 