from flask import Blueprint, request, jsonify, g, current_app
from extensions import db
from models import User
from utils import success, error, bad_request
import requests
import json

# 创建AI助手API蓝图
ai_api = Blueprint('ai_api', __name__, url_prefix='/api/ai')

# 发送消息给AI助手
@ai_api.route('/chat', methods=['POST'])
def call_llm_api():
    """调用LLM API（非流式）"""
    try:
        # 检查用户权限
        if not g.current_user:
            return error(code=401, msg='用户不存在').to_response()
        
        # 检查请求是否为JSON格式
        if not request.is_json:
            return bad_request('请求必须是JSON格式').to_response()
        
        data = request.get_json()
        message = data.get('message', '').strip()
        
        if not message:
            return bad_request('消息内容不能为空').to_response()
        
        # 获取配置
        llm_api_url = current_app.config.get('LLM_API_URL')
        llm_auth_token = current_app.config.get('LLM_AUTH_TOKEN')
        llm_default_params = current_app.config.get('LLM_DEFAULT_PARAMS', {})
        
        # 处理历史对话
        messages = []
        if 'history' in data and isinstance(data['history'], list) and len(data['history']) > 0:
            for item in data['history']:
                if isinstance(item, (list, tuple)) and len(item) == 2:
                    msg_type, msg_content = item
                    role = "user" if msg_type == "user" else "assistant"
                    cleaned_content = str(msg_content).strip()
                    if cleaned_content:
                        messages.append({"role": role, "content": cleaned_content})
        
        # 追加当前用户输入
        messages.append({"role": "user", "content": message})
        
        # 构建请求参数
        payload = {
            **llm_default_params,
            "messages": messages
        }
        
        # 发送同步请求到LLM API
        headers = {
            "Authorization": f"Bearer {llm_auth_token}",
            "Content-Type": "application/json"
        }
        
        # 增加超时时间，复杂问题需要更长时间处理
        response = requests.post(
            url=llm_api_url,
            headers=headers,
            json=payload,
            timeout=120  # 增加到120秒（2分钟）
        )
        
        if response.status_code != 200:
            error_detail = response.text[:200]
            error_msg = f"大模型接口请求失败 [{response.status_code}]: {error_detail}"
            current_app.logger.error(error_msg)
            return error(msg='AI服务暂时不可用，请稍后重试').to_response()
        
        # 解析响应
        response_data = response.json()
        
        # 提取回复内容（OpenAI格式）
        reply = None
        if 'choices' in response_data and len(response_data['choices']) > 0:
            reply = response_data['choices'][0].get('message', {}).get('content', '')
        
        if not reply:
            return error(msg='AI服务返回空响应').to_response()
        
        return success(data={'reply': reply}, msg='获取回复成功').to_response()
        
    except requests.exceptions.Timeout:
        current_app.logger.error('LLM API请求超时')
        return error(msg='您的问题比较复杂，AI正在思考中，请稍后重试或简化问题').to_response()
    except requests.exceptions.RequestException as e:
        current_app.logger.error(f'LLM API请求异常: {str(e)}')
        return error(msg='AI服务连接失败，请稍后重试').to_response()
    except Exception as e:
        current_app.logger.error(f'LLM API处理异常: {str(e)}')
        return error(msg=f'AI服务处理失败: {str(e)}').to_response()
