import os
import base64
import cv2
import numpy as np
import requests
import time
from flask import Flask, request, jsonify, send_from_directory
from flask_cors import CORS
from ultralytics import YOLO
import math
import datetime
import logging
import json
from werkzeug.utils import secure_filename
from flask_socketio import SocketIO, emit
from flask_sqlalchemy import SQLAlchemy
import random
from collections import defaultdict
from sqlalchemy import or_, and_
import urllib.parse
import threading
import uuid
import psutil

# ================= Flask 应用初始化和配置 =================
app = Flask(__name__, static_folder='../frontend/build', static_url_path='')
CORS(app)

# ================= 日志配置 =================
app.logger.setLevel(logging.INFO)
logger = app.logger

# 确保只添加一次处理器
for handler in app.logger.handlers[:]:
    app.logger.removeHandler(handler)

console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
app.logger.addHandler(console_handler)

# 导入 AppBuilder SDK
try:
    import appbuilder
    APPBUILDER_SDK_AVAILABLE = True
    logger.info("AppBuilder SDK imported successfully.")
except ImportError:
    APPBUILDER_SDK_AVAILABLE = False
    logger.warning("AppBuilder SDK not found. AI features relying on AppBuilder will be disabled. Please run 'pip install appbuilder-sdk'.")

# ================= SocketIO 配置 =================
socketio = SocketIO(app, cors_allowed_origins="*", async_mode='eventlet', ping_interval=20, ping_timeout=40)

class WebSocketHandler(logging.Handler):
    def emit(self, record):
        try:
            log_entry = self.format(record)
            # 仅发送消息部分，前端再处理时间戳和级别
            socketio.emit('log_message', {'data': log_entry})
        except Exception as e:
            print(f"Error sending log via WebSocket: {e}", flush=True)

if not any(isinstance(handler, WebSocketHandler) for handler in app.logger.handlers):
    websocket_handler = WebSocketHandler()
    websocket_handler.setFormatter(logging.Formatter('%(levelname)s - %(message)s')) # 仅消息
    app.logger.addHandler(websocket_handler)

# ================= 数据库配置 =================
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)

# ================= 数据库模型定义 =================
class HistoryRecord(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    timestamp = db.Column(db.String(50), nullable=False)
    person_count = db.Column(db.Integer, nullable=True) # 允许为空，因为图像理解可能无人数
    original_image_b64 = db.Column(db.Text, nullable=False)
    result_image_b64 = db.Column(db.Text, nullable=False)
    safety_analysis = db.Column(db.Text, nullable=True)
    error_message = db.Column(db.Text, nullable=True)
    persons_json = db.Column(db.Text, nullable=True)
    device_id = db.Column(db.String(50), nullable=True)
    location = db.Column(db.String(100), nullable=True)
    record_type = db.Column(db.String(50), nullable=False, default='detection') # 'detection', 'image_interpretation'

    def __repr__(self):
        return f"HistoryRecord(ID: {self.id}, Type: {self.record_type}, Time: '{self.timestamp}', Persons: {self.person_count}, Location: {self.location})"

    def to_dict(self):
        return {
            'id': self.id,
            'timestamp': self.timestamp,
            'person_count': self.person_count,
            'original_image_b64': self.original_image_b64,
            'result_image_b64': self.result_image_b64,
            'safety_analysis': self.safety_analysis,
            'error': {'message': self.error_message} if self.error_message else None,
            'persons': json.loads(self.persons_json) if self.persons_json else [],
            'device_id': self.device_id,
            'location': self.location,
            'record_type': self.record_type
        }

class Device(db.Model):
    id = db.Column(db.String(50), primary_key=True)
    name = db.Column(db.String(100), nullable=False)
    ip = db.Column(db.String(50), nullable=False, unique=True)
    location = db.Column(db.String(100), nullable=False)
    status_value = db.Column(db.String(50), nullable=False)
    status_label = db.Column(db.String(50), nullable=False)
    status_color = db.Column(db.String(50), nullable=False)
    lastActive = db.Column(db.String(50), nullable=False)
    device_type = db.Column(db.String(50), nullable=False, default='camera')

    def to_dict(self):
        return {
            'id': self.id,
            'name': self.name,
            'ip': self.ip,
            'location': self.location,
            'status': {
                'value': self.status_value,
                'label': self.status_label,
                'color': self.status_color
            },
            'lastActive': self.lastActive,
            'device_type': self.device_type
        }

class Person(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    name = db.Column(db.String(100), nullable=False)
    studentId = db.Column(db.String(100), nullable=True, unique=True)
    department = db.Column(db.String(100), nullable=True)
    behavior_label = db.Column(db.String(50), nullable=False)
    behavior_value = db.Column(db.String(50), nullable=False)
    behavior_color = db.Column(db.String(50), nullable=False)
    location = db.Column(db.String(100), nullable=False)
    time = db.Column(db.String(50), nullable=False)
    confidence = db.Column(db.String(10), nullable=False)

    def to_dict(self):
        return {
            'id': self.id,
            'name': self.name,
            'studentId': self.studentId,
            'department': self.department,
            'behavior': {
                'label': self.behavior_label,
                'value': self.behavior_value,
                'color': self.behavior_color
            },
            'location': self.location,
            'time': self.time,
            'confidence': self.confidence,
        }

# ================= 文件上传配置 =================
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
os.makedirs(UPLOAD_FOLDER, exist_ok=True)

SETTINGS_FILE = 'settings.json'
DEFAULT_SETTINGS = {
    'sensitivity': 'medium',
    'notification': 'all',
    'retention': '30',
    'interval': '10',
    'theme': 'light', # 默认亮色模式
    'active_theme': 'standard-light', # 默认亮色皮肤
    'model_version': 'yolov8n-pose',
    'confidence_threshold': 50,
    'notification_methods': ['system'],
    'sound_reminders': True,
    'ai_interpret_results': True,
    'ai_proactive_alerts': True,
    'app_builder_app_id': '' # 默认留空，需要用户在设置中配置
}

def _load_settings():
    if os.path.exists(SETTINGS_FILE):
        with open(SETTINGS_FILE, 'r') as f:
            try:
                settings = json.load(f)
                # Merge with defaults to ensure all keys are present
                merged_settings = DEFAULT_SETTINGS.copy()
                for key, value in settings.items():
                    if key in merged_settings and isinstance(merged_settings[key], dict) and isinstance(value, dict):
                        merged_settings[key].update(value) # shallow merge for dicts
                    elif key in merged_settings and isinstance(merged_settings[key], list) and isinstance(value, list):
                        merged_settings[key] = value # replace lists
                    else:
                        merged_settings[key] = value
                return merged_settings
            except json.JSONDecodeError:
                logger.error(f"Error decoding {SETTINGS_FILE}. Using default settings.")
                return DEFAULT_SETTINGS.copy()
    return DEFAULT_SETTINGS.copy()

def _save_settings(settings):
    with open(SETTINGS_FILE, 'w') as f:
        json.dump(settings, f, indent=4)
    logger.info("Settings saved successfully.")

current_settings = _load_settings()

# ================= AppBuilder SDK 客户端初始化 =================
appbuilder_client = None
# appbuilder.create_conversation() 仅适用于AppBuilderClient.run()，并非AppBuilderChat().create_conversation()
# 所以此处不需要一个全局的chat_conversation_id，每次调用AppBuilderClient.run()都会创建会话
# 如果要支持ChatAppBuilder，需要使用 appbuilder.AppBuilderChat(app_id) 并管理其 conversation_id
# 为了简化，我们继续使用AppBuilderClient.run()来模拟聊天
# appbuilder_chat_client = None # 预留给未来的ChatAppBuilder
# chat_conversation_id = None # 预留给未来的ChatAppBuilder

def initialize_appbuilder_client():
    global appbuilder_client
    if not APPBUILDER_SDK_AVAILABLE:
        logger.error("AppBuilder SDK is not available. Cannot initialize client.")
        socketio.emit('log_message', {'data': 'ERROR: AppBuilder SDK未安装或不可用。AI功能受限。'})
        appbuilder_client = None
        return

    app_id = current_settings.get('app_builder_app_id')
    app_token = os.environ.get("APPBUILDER_TOKEN")

    if not app_id:
        logger.warning("AppBuilder App ID is not configured in settings. AI features (except direct Qianfan API) will not work.")
        socketio.emit('log_message', {'data': 'WARNING: AppBuilder App ID未配置。请检查系统设置。'})
        appbuilder_client = None
        return

    if not app_token:
        logger.error("APPBUILDER_TOKEN environment variable is not set. AI features will not work.")
        socketio.emit('log_message', {'data': 'ERROR: APPBUILDER_TOKEN环境变量未设置。请检查服务器配置。'})
        appbuilder_client = None
        return

    logger.info(f"Initializing AppBuilderClient with App ID: {app_id}")
    try:
        # appbuilder_client = appbuilder.AppBuilderClient(app_id) # Old way, creating client with app_id
        # 新SDK用法，直接通过AppBuilder().run()调用，不需要预先创建AppBuilderClient
        # 但是，为了模拟一个"Client initialized"状态，我们可以尝试一个空调用
        # 或者更合理地，检查TOKEN和APPID是否有效
        # 暂时保持AppBuilderClient的创建，以便AppBuilderClient is not None来检查状态
        appbuilder_client = appbuilder.AppBuilderClient(app_id)
        logger.info(f"AppBuilderClient initialized successfully with App ID: {app_id}.")
        socketio.emit('log_message', {'data': 'INFO: AppBuilder AI助手已成功连接。'})
    except appbuilder.AppBuilderServerException as e:
        logger.error(f"Failed to initialize AppBuilderClient: {e.code} - {e.message}", exc_info=True)
        socketio.emit('log_message', {'data': f'ERROR: AppBuilder AI助手初始化失败: {e.message}. 请检查App ID和Token。'})
        appbuilder_client = None
    except Exception as e:
        logger.error(f"An unexpected error occurred during AppBuilder initialization: {e}", exc_info=True)
        socketio.emit('log_message', {'data': f'ERROR: AppBuilder AI助手初始化出现未知错误: {e}.'})
        appbuilder_client = None

def call_appbuilder_agent(prompt_text, conversation_id_param=None):
    if not appbuilder_client: # 检查客户端是否已初始化
        logger.warning("AppBuilder client not initialized. Cannot call agent.")
        return "AI助手服务未就绪，请检查服务器日志或系统设置。"

    try:
        timeout_seconds = 40
        # AppBuilderClient.run() 每次调用都会创建新会话，除非传入了conversation_id
        # 对于模拟聊天，如果前端传入了conversation_id，则保持会话
        if conversation_id_param:
            resp = appbuilder_client.run(conversation_id_param, prompt_text, timeout=timeout_seconds)
        else:
            # 对于单次问答（如解读报告、预警），每次都创建新会话
            temp_conversation_id = appbuilder_client.create_conversation()
            resp = appbuilder_client.run(temp_conversation_id, prompt_text, timeout=timeout_seconds)

        if resp and resp.content and resp.content.answer:
            return resp.content.answer
        else:
            logger.warning(f"AppBuilder agent returned an empty or malformed response: {resp}")
            return "AI助手未能生成有效回复，可能是应用配置或回复内容问题。"
    except requests.exceptions.Timeout as e:
        logger.error(f"AppBuilder Timeout Error: {e}", exc_info=True)
        return f"AI助手服务错误: 请求超时 ({timeout_seconds}秒)。AppBuilder服务响应过慢或网络延迟高。请稍后重试。"
    except appbuilder.AppBuilderServerException as e:
        logger.error(f"AppBuilder Server Error: {e.code} - {e.message}", exc_info=True)
        if "QuotaLimitExceeded" in e.message:
            return f"AI助手服务错误: 配额已达上限。请登录百度千帆控制台检查您的AppBuilder应用配额。"
        if "Network Error" in e.message or "Service Temporarily Unavailable" in e.message or "Gateway Timeout" in e.message:
            return f"AI助手服务错误: 网络或服务暂时不可用 ({e.message})。请稍后重试。"
        return f"AI助手服务错误: {e.message}。请检查AppBuilder应用配置或额度。"
    except requests.exceptions.ConnectionError as e:
        logger.error(f"AppBuilder Connection Error: {e}", exc_info=True)
        return f"AI助手服务错误: 网络连接失败。请检查服务器网络。"
    except Exception as e:
        logger.error(f"An unexpected error occurred during AppBuilder agent call: {e}", exc_info=True)
        return f"AI助手调用失败: {str(e)}。请检查网络或AppBuilder SDK状态。"

# ================= 千帆Access Token管理 =================
_cached_qianfan_access_token = None
_qianfan_token_expiry_time = 0
_qianfan_token_lock = threading.Lock() # 用于线程安全地获取和更新Token

def get_qianfan_access_token():
    global _cached_qianfan_access_token, _qianfan_token_expiry_time

    with _qianfan_token_lock:
        if _cached_qianfan_access_token and time.time() < _qianfan_token_expiry_time:
            return _cached_qianfan_access_token

        api_key = os.environ.get("BAIDU_API_KEY")
        secret_key = os.environ.get("BAIDU_SECRET_KEY")

        if not api_key or not secret_key:
            logger.error("BAIDU_API_KEY or BAIDU_SECRET_KEY environment variables are not set for Qianfan direct API calls.")
            socketio.emit('log_message', {'data': 'ERROR: BAIDU_API_KEY 或 BAIDU_SECRET_KEY 未设置。部分AI功能受限。'})
            return None

        url = "https://aip.baidubce.com/oauth/2.0/token"
        params = {"grant_type": "client_credentials", "client_id": api_key, "client_secret": secret_key}
        try:
            response = requests.post(url, params=params, timeout=10)
            response.raise_for_status()
            result = response.json()
            if "access_token" in result:
                _cached_qianfan_access_token = result["access_token"]
                # 令牌有效期通常是30天（2592000秒），提前5分钟刷新
                _qianfan_token_expiry_time = time.time() + result.get("expires_in", 2592000) - 300
                logger.info("Successfully obtained new Qianfan Access Token.")
                socketio.emit('log_message', {'data': 'INFO: 百度千帆 Access Token 获取成功。'})
                return _cached_qianfan_access_token
            else:
                error_msg = result.get('error_description', result.get('error', '未知错误'))
                logger.error(f"Failed to get Qianfan Access Token: {error_msg}")
                socketio.emit('log_message', {'data': f'ERROR: 百度千帆 Access Token 获取失败: {error_msg}。'})
                return None
        except requests.exceptions.RequestException as e:
            logger.error(f"Error requesting Qianfan Access Token: {e}", exc_info=True)
            socketio.emit('log_message', {'data': f'ERROR: 请求百度千帆 Access Token 失败: {e}。'})
            return None

def call_baidu_image_understanding_api(image_b64: str, question: str) -> str:
    access_token = get_qianfan_access_token()
    if not access_token:
        return "图像理解AI服务未就绪：无法获取百度云 Access Token。请检查 BAIDU_API_KEY 和 BAIDU_SECRET_KEY。"

    submit_url = f"https://aip.baidubce.com/rest/2.0/image-classify/v1/image-understanding/request?access_token={access_token}"

    if ',' in image_b64:
        pure_b64 = image_b64.split(',')[1]
    else:
        pure_b64 = image_b64

    # 图像大小限制： base64编码后不大于10M
    # 百度千帆API要求image字段需要进行URL编码
    encoded_image_b64 = urllib.parse.quote_plus(pure_b64)

    submit_payload = json.dumps({
        "image": encoded_image_b64,
        "question": question
    })
    submit_headers = {
        'Content-Type': 'application/json'
    }

    try:
        submit_response = requests.post(submit_url, headers=submit_headers, data=submit_payload.encode("utf-8"), timeout=30)
        submit_response.raise_for_status()
        submit_result = submit_response.json()

        if "result" in submit_result and "task_id" in submit_result["result"]:
            task_id = submit_result["result"]["task_id"]
            logger.info(f"Image Understanding: Submitted task {task_id}. Polling for result...")
        else:
            error_message = submit_result.get('error_msg', submit_result.get('error', {}).get('message', '未知错误'))
            logger.error(f"Image Understanding: Failed to get task_id from submit response: {submit_result}. Error: {error_message}")
            return f"图像理解AI提交请求失败: {error_message}"

    except requests.exceptions.Timeout as e:
        logger.error(f"Image Understanding Submit Timeout: {e}", exc_info=True)
        return f"图像理解AI提交请求超时。图片过大或网络延迟高。请稍后重试。"
    except requests.exceptions.RequestException as e:
        error_text = e.response.text if e.response else str(e)
        status_code = e.response.status_code if e.response else 'N/A'
        logger.error(f"Image Understanding Submit Error: {status_code} - {error_text}", exc_info=True)
        return f"图像理解AI提交请求失败: HTTP错误 {status_code} - {error_text}"
    except Exception as e:
        logger.error(f"An unexpected error occurred during image understanding submit: {e}", exc_info=True)
        return f"图像理解AI提交失败: {str(e)}"

    get_result_url = f"https://aip.baidubce.com/rest/2.0/image-classify/v1/image-understanding/get-result?access_token={access_token}"
    get_result_payload = json.dumps({"task_id": task_id})

    max_poll_attempts = 15
    poll_interval_seconds = 2

    for i in range(max_poll_attempts):
        time.sleep(poll_interval_seconds)
        try:
            get_response = requests.post(get_result_url, headers=submit_headers, data=get_result_payload.encode("utf-8"), timeout=10)
            get_response.raise_for_status()
            get_result = get_response.json()

            if "result" in get_result:
                ret_code = get_result["result"].get("ret_code")
                ret_msg = get_result["result"].get("ret_msg")
                description = get_result["result"].get("description")

                if ret_code == 0 and ret_msg == "success":
                    logger.info(f"Image Understanding: Task {task_id} completed successfully.")
                    return description
                elif ret_code == 1 and ret_msg == "processing":
                    logger.info(f"Image Understanding: Task {task_id} still processing (attempt {i+1}/{max_poll_attempts})...")
                    continue
                else:
                    logger.error(f"Image Understanding: Task {task_id} failed with unexpected status: {get_result}")
                    return f"图像理解AI处理失败: {get_result.get('result', {}).get('ret_msg', '未知状态')}"
            else:
                logger.error(f"Image Understanding: Malformed result from get-result API: {get_result}")
                return f"图像理解AI获取结果异常: {get_result.get('error_msg', '响应格式错误')}"

        except requests.exceptions.Timeout as e:
            logger.warning(f"Image Understanding Get Result Timeout (task {task_id}, attempt {i+1}): {e}")
            if i == max_poll_attempts - 1:
                logger.error(f"Image Understanding: Task {task_id} timed out after {max_poll_attempts} attempts.")
                return f"图像理解AI处理超时，请稍后再试。"
        except requests.exceptions.RequestException as e:
            error_text = e.response.text if e.response else str(e)
            status_code = e.response.status_code if e.response else 'N/A'
            logger.error(f"Image Understanding Get Result Error (task {task_id}, attempt {i+1}): {status_code} - {error_text}", exc_info=True)
            return f"图像理解AI获取结果失败: HTTP错误 {status_code} - {error_text}"
        except Exception as e:
            logger.error(f"An unexpected error occurred during image understanding result polling: {e}", exc_info=True)
            return f"图像理解AI获取结果异常: {str(e)}"

    logger.error(f"Image Understanding: Task {task_id} did not complete within {max_poll_attempts} attempts.")
    return "图像理解AI处理超时，请稍后再试。"


# ================= 行为类型定义 =================
BEHAVIOR_TYPES = [
    { 'label': '正常行为', 'value': 'normal', 'color': 'green' },
    { 'label': '危险行为', 'value': 'danger', 'color': 'red' },
    { 'label': '可疑行为', 'value': 'suspicious', 'color': 'orange' },
    { 'label': '跌倒', 'value': 'fall', 'color': 'volcano' },
    { 'label': '奔跑', 'value': 'run', 'color': 'gold' },
    { 'label': '聚集', 'value': 'crowd', 'color': 'purple' },
    { 'label': '站立', 'value': 'stand', 'color': 'blue' },
    { 'label': '行走', 'value': 'walk', 'color': 'cyan' },
    { 'label': '弯腰', 'value': 'bend', 'color': 'magenta' }
]
BEHAVIOR_LABELS = [b['label'] for b in BEHAVIOR_TYPES]

CAMPUS_LOCATIONS = [
  '教学楼A栋', '教学楼B栋', '图书馆', '食堂',
  '体育馆', '宿舍区', '校门口', '操场',
  '行政楼', '实验楼', '艺术楼', '停车场',
  '篮球场', '网球场', '游泳馆', '校医院'
]

DEVICE_STATUS_TYPES = [
  { 'label': '在线', 'value': 'online', 'color': 'green' },
  { 'label': '离线', 'value': 'offline', 'color': 'gray' },
  { 'label': '故障', 'value': 'error', 'color': 'red' },
  { 'label': '维护中', 'value': 'maintenance', 'color': 'orange' }
]

SAFETY_INFO = {
    "正常行为": { "title": "行为正常", "tips": ["当前行为符合预期。", "未发现明显安全隐患。"] },
    "站立": { "title": "人员站立", "tips": ["人员处于站立姿态。", "请结合周边环境判断。"] },
    "行走": { "title": "人员行走", "tips": ["人员正在行走。", "关注行进方向和速度。"] },
    "危险行为": { "title": "检测到危险行为!", "tips": ["姿态异常，可能存在危险。", "请立即核实情况！", "建议安保人员介入。"] },
    "跌倒": { "title": "检测到人员跌倒!", "tips": ["人员可能已跌倒，存在受伤风险。", "请立即确认人员安全！", "必要时提供援助。"] },
    "奔跑": { "title": "人员奔跑", "tips": ["检测到人员快速移动。", "请注意奔跑原因和方向。", "可能是嬉戏或紧急情况。"] },
    "可疑行为": { "title": "检测到可疑行为", "tips": ["行为模式与常规不符。", "建议安全人员保持关注。", "记录行为特征以备后续分析。"] },
    "弯腰": { "title": "人员弯腰", "tips": ["检测到弯腰姿态。", "注意是否在拾取物品或有其他意图。"] },
    "聚集": { "title": "检测到人员聚集", "tips": ["多名人员在同一区域停留。", "请关注是否可能引起秩序问题或冲突。"] },
    "未知": { "title": "行为类型未知", "tips": ["未能准确识别当前行为。", "请人工核查图像。", "可能是遮挡或姿态复杂导致。"] }
}

def get_behavior_obj_by_label(label_or_value):
    for b_type in BEHAVIOR_TYPES:
        if b_type['label'] == label_or_value:
            return b_type
    for b_type in BEHAVIOR_TYPES:
        if b_type['value'] == label_or_value:
            return b_type
    # Fallback for unknown labels
    return { 'label': label_or_value if isinstance(label_or_value, str) else "未知", 'value': 'unknown', 'color': 'gray' }


def get_device_status_obj_by_value(value):
    for d_status in DEVICE_STATUS_TYPES:
        if d_status['value'] == value:
            return d_status
    return DEVICE_STATUS_TYPES[0] # Default to online if not found

COLOR_MAP_BGR = {
    'green': (0, 255, 0),
    'red': (0, 0, 255),
    'orange': (0, 165, 255),
    'volcano': (0, 85, 255), # A dark red-orange
    'gold': (0, 215, 255),
    'purple': (128, 0, 128),
    'blue': (255, 0, 0),
    'cyan': (255, 255, 0),
    'magenta': (255, 0, 255),
    'gray': (128, 128, 128),
    'default': (128, 128, 128) # Default color if not found
}
def get_draw_color(color_name):
    return COLOR_MAP_BGR.get(color_name.lower(), COLOR_MAP_BGR['default'])

# ================= 模型初始化 =================
model = None
def load_yolo_model():
    global model
    model_name = current_settings.get('model_version', 'yolov8n-pose')
    model_path = f'{model_name}.pt'

    logger.info(f"Attempting to load YOLO model: {model_path}")
    try:
        if not os.path.exists(model_path):
            logger.warning(f"Model file '{model_path}' not found locally. Attempting to download...")
            # For demonstration, we can simulate download or provide a public URL
            # In a real scenario, you'd download from Ultralytics or a private repo
            # Example: from ultralytics import checks; checks.setup_yolo() to ensure models are downloaded
            try:
                # This will try to download if not exists
                temp_model = YOLO(model_path)
                logger.info(f"YOLO model '{model_path}' downloaded and loaded successfully.")
                model = temp_model
            except Exception as dl_e:
                logger.error(f"Failed to download or load YOLO model '{model_path}': {dl_e}. Please check your network or try downloading it manually.")
                model = None
        else:
            model = YOLO(model_path)
            logger.info(f"YOLO model '{model_path}' loaded successfully.")
    except Exception as e:
        logger.error(f"Failed to load YOLO model from '{model_path}': {str(e)}. "
                     "This might be due to missing file, network issues, or corrupted download. "
                     "Please ensure the .pt file exists or is correctly named.", exc_info=True)
        model = None
        socketio.emit('log_message', {'data': f'ERROR: AI模型加载失败: {model_name}. 请检查服务器日志并确保模型文件存在。'})

# ================= 工具函数 =================
def allowed_file(filename):
    return '.' in filename and \
           filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

# ================= 行为检测逻辑 =================
def calculate_angle(p1, p2, p3):
    # p2 is the vertex
    a = np.array(p1)
    b = np.array(p2)
    c = np.array(p3)
    ba = a - b
    bc = c - b

    # Avoid division by zero for norm of zero vectors
    norm_ba = np.linalg.norm(ba)
    norm_bc = np.linalg.norm(bc)
    if norm_ba == 0 or norm_bc == 0:
        return 0 # Or handle as an error condition, e.g., None or raise ValueError

    cosine_angle = np.dot(ba, bc) / (norm_ba * norm_bc)
    angle = np.degrees(np.arccos(np.clip(cosine_angle, -1, 1))) # Clip to handle floating point inaccuracies
    return angle

def calculate_distance(p1, p2):
    return np.sqrt(np.sum((np.array(p1) - np.array(p2)) ** 2))

def detect_behavior(keypoints_data, person_box):
    # Keypoint indices (0-indexed)
    NOSE, LEFT_EYE, RIGHT_EYE, LEFT_EAR, RIGHT_EAR, \
    LEFT_SHOULDER, RIGHT_SHOULDER, LEFT_ELBOW, RIGHT_ELBOW, LEFT_WRIST, RIGHT_WRIST, \
    LEFT_HIP, RIGHT_HIP, LEFT_KNEE, RIGHT_KNEE, LEFT_ANKLE, RIGHT_ANKLE = range(17)

    # Confidence threshold for keypoints to be considered
    KP_CONF_THRESHOLD = 0.3

    def get_kp(idx):
        if idx < len(keypoints_data) and keypoints_data[idx][2] > KP_CONF_THRESHOLD:
            return keypoints_data[idx][:2]
        return None

    kps = {i: get_kp(i) for i in range(17)} # Dictionary for easier access by index

    x1, y1, x2, y2 = person_box
    box_width = x2 - x1
    box_height = y2 - y1

    # 1. Fall Detection
    # Check if a person is lying down (wide bounding box, low aspect ratio compared to standing)
    # And if keypoints like shoulders and hips are roughly at the same height (on the ground)
    is_fallen = False
    if box_height > 0 and box_width / box_height > 1.8: # Aspect ratio suggests horizontal posture
        if kps[LEFT_SHOULDER] and kps[RIGHT_SHOULDER] and kps[LEFT_HIP] and kps[RIGHT_HIP]:
            shoulder_y = (kps[LEFT_SHOULDER][1] + kps[RIGHT_SHOULDER][1]) / 2
            hip_y = (kps[LEFT_HIP][1] + kps[RIGHT_HIP][1]) / 2
            # If shoulder and hip y-coordinates are very close, likely lying down
            if abs(shoulder_y - hip_y) < box_height * 0.2: # Threshold for vertical distance
                is_fallen = True
    if is_fallen:
        return "跌倒", SAFETY_INFO.get("跌倒", SAFETY_INFO["未知"])

    # 2. Running Detection
    # Legs are bent, and ankles are far apart or knees are high
    if kps[LEFT_KNEE] and kps[RIGHT_KNEE] and kps[LEFT_HIP] and kps[RIGHT_HIP] and kps[LEFT_ANKLE] and kps[RIGHT_ANKLE]:
        left_knee_angle = calculate_angle(kps[LEFT_HIP], kps[LEFT_KNEE], kps[LEFT_ANKLE])
        right_knee_angle = calculate_angle(kps[RIGHT_HIP], kps[RIGHT_KNEE], kps[RIGHT_ANKLE])

        # A "bent" leg (smaller angle) combined with significant displacement (indicating stride)
        # Using a relaxed threshold for bent legs (e.g., < 160 degrees) for motion
        if (left_knee_angle < 160 and right_knee_angle < 160) and \
           (abs(kps[LEFT_ANKLE][0] - kps[RIGHT_ANKLE][0]) > box_width * 0.3): # Feet are wide apart horizontally
            return "奔跑", SAFETY_INFO.get("奔跑", SAFETY_INFO["未知"])

    # 3. Bending Detection (弯腰)
    # Angle between mid-shoulder-hip vector and vertical is significant
    if kps[LEFT_SHOULDER] and kps[RIGHT_SHOULDER] and kps[LEFT_HIP] and kps[RIGHT_HIP]:
        mid_shoulder = ((kps[LEFT_SHOULDER][0] + kps[RIGHT_SHOULDER][0]) / 2, (kps[LEFT_SHOULDER][1] + kps[RIGHT_SHOULDER][1]) / 2)
        mid_hip = ((kps[LEFT_HIP][0] + kps[RIGHT_HIP][0]) / 2, (kps[LEFT_HIP][1] + kps[RIGHT_HIP][1]) / 2)

        # Vector from hip to shoulder
        torso_vector = np.array(mid_shoulder) - np.array(mid_hip)
        if np.linalg.norm(torso_vector) > 0: # Ensure non-zero vector
            # Compare with a vertical vector (0, -1) pointing upwards
            vertical_vector = np.array([0, -1])
            dot_product = np.dot(torso_vector, vertical_vector)
            norms_product = np.linalg.norm(torso_vector) * np.linalg.norm(vertical_vector)
            angle_rad = np.arccos(np.clip(dot_product / norms_product, -1, 1))
            angle_deg = np.degrees(angle_rad)

            # If the torso is tilted significantly (e.g., more than 30 degrees from vertical)
            if angle_deg > 30 and angle_deg < 150: # Avoid angles close to 0 (standing) or 180 (lying down)
                return "弯腰", SAFETY_INFO.get("弯腰", SAFETY_INFO["未知"])

    # 4. Hands Above Head (潜在危险行为)
    # Wrists are significantly above nose or ear level
    if kps[LEFT_WRIST] and kps[RIGHT_WRIST] and kps[NOSE]:
        if kps[LEFT_WRIST][1] < kps[NOSE][1] - box_height * 0.1 and kps[RIGHT_WRIST][1] < kps[NOSE][1] - box_height * 0.1:
            return "危险行为", SAFETY_INFO.get("危险行为", SAFETY_INFO["未知"])


    # 5. Walking Detection
    # Check average knee angles (should be somewhat bent for walking) and hip-ankle vertical distance
    if kps[LEFT_KNEE] and kps[RIGHT_KNEE] and kps[LEFT_HIP] and kps[RIGHT_HIP] and kps[LEFT_ANKLE] and kps[RIGHT_ANKLE]:
        left_knee_angle = calculate_angle(kps[LEFT_HIP], kps[LEFT_KNEE], kps[LEFT_ANKLE])
        right_knee_angle = calculate_angle(kps[RIGHT_HIP], kps[RIGHT_KNEE], kps[RIGHT_ANKLE])

        # If knees are slightly bent (e.g., 140-170 degrees) and feet are not too close (some stride)
        if (140 < left_knee_angle < 175 or 140 < right_knee_angle < 175) and \
           (abs(kps[LEFT_ANKLE][0] - kps[RIGHT_ANKLE][0]) > box_width * 0.1): # Minimal stride
            return "行走", SAFETY_INFO.get("行走", SAFETY_INFO["未知"])


    # 6. Standing Detection
    # If keypoints are mostly vertical and legs are relatively straight
    if kps[LEFT_KNEE] and kps[RIGHT_KNEE] and kps[LEFT_HIP] and kps[RIGHT_HIP] and kps[LEFT_ANKLE] and kps[RIGHT_ANKLE]:
        left_knee_angle = calculate_angle(kps[LEFT_HIP], kps[LEFT_KNEE], kps[LEFT_ANKLE])
        right_knee_angle = calculate_angle(kps[RIGHT_HIP], kps[RIGHT_KNEE], kps[RIGHT_ANKLE])
        # If legs are mostly straight (e.g., > 165 degrees)
        if left_knee_angle > 165 and right_knee_angle > 165:
            return "站立", SAFETY_INFO.get("站立", SAFETY_INFO["未知"])

    # 7. Suspicious (General unusual posture not fitting others)
    # If torso or limb angles are unusual, or a low number of highly confident keypoints
    visible_keypoints_count = sum(1 for kp in keypoints_data if kp[2] > KP_CONF_THRESHOLD)
    if visible_keypoints_count < 8: # If not many keypoints are confident, could be suspicious or obstructed
        return "可疑行为", SAFETY_INFO.get("可疑行为", SAFETY_INFO["未知"])


    # Default to normal behavior if no specific anomaly is detected
    return "正常行为", SAFETY_INFO.get("正常行为", SAFETY_INFO["正常行为"])


# ================= 文件上传处理 =================
@app.route('/api/upload', methods=['POST'])
def upload_file_route():
    if 'file' not in request.files:
        logger.error("Upload: No file part")
        return jsonify({'error': 'No file part'}), 400
    file = request.files['file']
    if file.filename == '':
        logger.error("Upload: No selected file")
        return jsonify({'error': 'No selected file'}), 400
    if file and allowed_file(file.filename):
        filename = secure_filename(file.filename)
        filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        file.save(filepath)
        logger.info(f"Upload: File '{filename}' saved successfully.")
        file_url = f"/uploads/{filename}" # Relative URL for frontend to use
        return jsonify({
            'success': True,
            'file_url': file_url,
            'message': 'File uploaded successfully'
        })
    logger.error("Upload: File type not allowed")
    return jsonify({'error': 'File type not allowed'}), 400


# ================= 核心检测接口 =================
@app.route('/api/detect', methods=['POST'])
def detect_route():
    global model, current_settings
    data = request.get_json()
    if not data or 'imageBase64' not in data:
        logger.error("Detect: Missing imageBase64 data.")
        return jsonify({'error': '未收到图片数据'}), 400

    original_image_b64 = data['imageBase64']
    device_id = data.get('deviceId') # Optional: device ID if image comes from a specific device
    location = data.get('location')   # Optional: location if image comes from a specific location

    img = None
    error_message = None
    try:
        # Handle both with and without 'data:image/jpeg;base64,' prefix
        if original_image_b64.startswith('data:image'):
            img_bytes = base64.b64decode(original_image_b64.split(',')[1])
        else:
            img_bytes = base64.b64decode(original_image_b64)

        nparr = np.frombuffer(img_bytes, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        if img is None: raise ValueError("Image decoding failed. Might be corrupted or invalid format.")
    except Exception as e:
        error_message = f"图片解码或加载失败: {str(e)}"
        logger.error(f"Detect: Image processing initial stage failed: {error_message}")

    if model is None and not error_message: # If image loaded but model didn't
        error_message = "AI模型未就绪，请检查服务器日志或尝试重新加载模型。"
        logger.error("Detect: Model not loaded or failed to load.")

    persons_data = []
    safety_analysis_text = "分析失败或未进行分析。"
    result_image_b64_output = original_image_b64 # Default to original if processing fails early

    if not error_message: # Proceed only if image loaded and model is ready
        try:
            confidence_threshold = current_settings.get('confidence_threshold', 50) / 100.0
            logger.info(f"Detect: Using confidence threshold: {confidence_threshold}")
            results = model(img, conf=confidence_threshold, imgsz=640) # Perform detection

            person_id_counter = 0
            for r_idx, r in enumerate(results):
                # Check if keypoints and boxes attributes exist and are not None
                if not hasattr(r, 'keypoints') or r.keypoints is None or r.boxes is None:
                    logger.warning(f"Detect: No keypoints or boxes found for result {r_idx}. Skipping. Ensure a pose model (e.g., yolov8n-pose) is loaded if keypoints are expected.")
                    continue

                for box, keypoints_obj in zip(r.boxes, r.keypoints):
                    if int(box.cls[0]) == 0:  # Class 0 is 'person' in COCO
                        x1, y1, x2, y2 = map(int, box.xyxy[0])
                        confidence = float(box.conf[0])
                        kpts_numpy = keypoints_obj.data[0].cpu().numpy() # Keypoints as numpy array

                        behavior_label, safety_info_dict = detect_behavior(kpts_numpy, (x1, y1, x2, y2))
                        behavior_obj = get_behavior_obj_by_label(behavior_label)
                        draw_color = get_draw_color(behavior_obj['color'])

                        # Draw bounding box and label
                        cv2.rectangle(img, (x1, y1), (x2, y2), draw_color, 2)
                        cv2.putText(img, f"{behavior_obj['label']} ({confidence:.2f})",
                                   (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, draw_color, 2)

                        # Draw keypoints
                        for i in range(kpts_numpy.shape[0]): # Iterate through keypoints
                            px, py, pconf = kpts_numpy[i]
                            if pconf > 0.3: # Draw only if confidence is above a threshold
                                cv2.circle(img, (int(px), int(py)), 3, draw_color, -1)

                        persons_data.append({
                            "id": f"person_{person_id_counter}", # Unique ID for this detection instance
                            "confidence": f"{confidence:.2f}",
                            "behavior": behavior_obj,
                            "safety_info": safety_info_dict,
                            "box_coords": [x1, y1, x2, y2],
                            "keypoints_coords": kpts_numpy.tolist() # Convert keypoints to list for JSON
                        })
                        person_id_counter += 1

            # Crowd detection logic (simple version)
            if len(persons_data) >= 5: # If 5 or more people are detected
                all_x_centers = [p["box_coords"][0] + (p["box_coords"][2] - p["box_coords"][0]) / 2 for p in persons_data]
                all_y_centers = [p["box_coords"][1] + (p["box_coords"][3] - p["box_coords"][1]) / 2 for p in persons_data]

                if all_x_centers and all_y_centers: # Ensure lists are not empty
                    max_x, min_x = max(all_x_centers), min(all_x_centers)
                    max_y, min_y = max(all_y_centers), min(all_y_centers)

                    cluster_width = max_x - min_x
                    cluster_height = max_y - min_y

                    scene_width = img.shape[1]
                    scene_height = img.shape[0]

                    # If people are clustered in less than 50% of scene width/height
                    if cluster_width < scene_width * 0.5 and cluster_height < scene_height * 0.5:
                        # Check if '聚集' is not already a primary behavior for any individual
                        if not any(p['behavior']['value'] == 'crowd' for p in persons_data):
                            crowd_behavior = get_behavior_obj_by_label('聚集')
                            persons_data.append({ # Add a scenario-level "crowd" event
                                "id": "scenario_crowd",
                                "confidence": "0.98", # High confidence for scenario event
                                "behavior": crowd_behavior,
                                "safety_info": SAFETY_INFO['聚集'],
                                "box_coords": [int(min_x), int(min_y), int(max_x), int(max_y)], # Bounding box of the crowd
                                "keypoints_coords": [] # No specific keypoints for crowd scenario
                            })


            # Encode result image to base64
            _, img_encoded = cv2.imencode('.jpg', img)
            result_image_b64_output = f"data:image/jpeg;base64,{base64.b64encode(img_encoded).decode('utf-8')}"

            if current_settings.get('ai_interpret_results', True):
                safety_analysis_text = generate_safety_analysis_text(persons_data, location)
                logger.info(f"Detect: Analysis completed for {len(persons_data)} persons. Safety analysis generated.")
            else:
                safety_analysis_text = "AI分析功能已关闭。"
                logger.info("Detect: AI analysis skipped as per settings.")

        except Exception as e:
            error_message = f"检测或结果处理失败: {str(e)}"
            logger.error(f"Detect: Detection or result processing failed: {error_message}", exc_info=True)
            persons_data = [] # Clear persons data on error
            safety_analysis_text = "行为监测系统内部错误，无法生成完整报告或AI服务不可用。请检查服务器日志和AppBuilder AI连接。"
            # result_image_b64_output remains original_image_b64 or last good state

    # Save to history
    new_record = HistoryRecord(
        timestamp=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        person_count=len(persons_data),
        original_image_b64=original_image_b64,
        result_image_b64=result_image_b64_output,
        safety_analysis=safety_analysis_text,
        error_message=error_message, # Save error message if any
        persons_json=json.dumps(persons_data),
        device_id=device_id,
        location=location,
        record_type='detection'
    )
    db.session.add(new_record)
    db.session.commit()
    logger.info("Detect: Result saved to database.")

    if error_message:
        return jsonify({'error': error_message, 'result_image': result_image_b64_output, 'safety_analysis': safety_analysis_text}), 500
    else:
        return jsonify({
            "person_count": len(persons_data),
            "persons": persons_data,
            "result_image": result_image_b64_output,
            "safety_analysis": safety_analysis_text
        })

# ================= 历史记录接口 =================
@app.route('/api/history', methods=['GET'])
def get_history_route():
    try:
        # 获取所有历史记录，包括检测和图像理解
        records = HistoryRecord.query.order_by(HistoryRecord.timestamp.desc()).all()
        history_list = [record.to_dict() for record in records]
        logger.info(f"History: Retrieved {len(history_list)} records.")
        return jsonify(history_list)
    except Exception as e:
        logger.error(f"History: Failed to retrieve history: {str(e)}", exc_info=True)
        return jsonify({'error': 'Failed to retrieve history'}), 500

# ================= AI助手功能：安全分析报告生成 =================
def generate_safety_analysis_text(persons_data, location=None):
    if not current_settings.get('ai_interpret_results', False) or not appbuilder_client:
        return "AI分析功能已关闭或AppBuilder AI助手未就绪。"

    if not persons_data:
        prompt_text = f"当前摄像头位于{location if location else '未知区域'}，未检测到人员。请用专业口吻生成一段简短的安全态势评估报告，强调区域安全。保持简洁，200字以内。"
    else:
        behaviors_detected_labels = [p.get("behavior", {}).get("label", "未知") for p in persons_data]
        behavior_counts = defaultdict(int)
        for b_label in behaviors_detected_labels:
            behavior_counts[b_label] += 1

        behavior_summary_parts = []
        for b_type in BEHAVIOR_TYPES: # Iterate in defined order
            if behavior_counts[b_type['label']] > 0:
                behavior_summary_parts.append(f"{behavior_counts[b_type['label']]}人{b_type['label']}")

        behavior_summary = ", ".join(behavior_summary_parts) if behavior_summary_parts else "无具体行为数据"

        total_persons = len(persons_data)

        # Check for presence of any abnormal behaviors by their 'value'
        abnormal_behaviors_present = any(
            get_behavior_obj_by_label(b_label)['value'] in ['danger', 'suspicious', 'fall', 'run', 'crowd', 'bend'] # 包含弯腰
            for b_label in behavior_counts.keys()
        )

        prompt_text = f"当前场景位于{location if location else '未知区域'}，共检测到{total_persons}名人员，行为分布为：{behavior_summary}。"

        if abnormal_behaviors_present:
            abnormal_summary_parts = []
            for b_type in BEHAVIOR_TYPES: # Iterate in defined order
                if behavior_counts[b_type['label']] > 0 and b_type['value'] in ['danger', 'suspicious', 'fall', 'run', 'crowd', 'bend']:
                     abnormal_summary_parts.append(f"{behavior_counts[b_type['label']]}人{b_type['label']}")
            abnormal_summary = ", ".join(abnormal_summary_parts)

            prompt_text += f"特别关注：检测到以下异常行为：{abnormal_summary}。"
            prompt_text += "请作为校园安全AI助手，对当前情况进行专业、严谨且详细的评估。分析潜在风险，并给出具体、可行的应对建议，例如：'请立即核实情况，必要时安保人员介入。'。请避免无关的客套话，直接给出分析报告，字数控制在300字以内。"
        else:
            prompt_text += "未检测到异常行为。"
            prompt_text += "请作为校园安全AI助手，对当前情况进行专业、严谨的评估。分析潜在风险（即使是正常行为下的潜在风险，如人流密集），并给出具体、可行的应对建议。例如：'该区域安全，请保持警惕。' 如果是人流较多，可以提醒注意秩序。请避免无关的客套话，直接给出分析报告，字数控制在300字以内。"

    logger.info(f"AppBuilder Analysis Prompt: {prompt_text[:200]}...") # Log first 200 chars
    return call_appbuilder_agent(prompt_text)


# ================= AI助手功能：解读分析报告 =================
@app.route('/api/interpret_analysis', methods=['POST'])
def interpret_analysis_route():
    data = request.get_json()
    safety_analysis_text = data.get('safety_analysis')
    if not safety_analysis_text:
        return jsonify({'error': 'Missing safety_analysis text for interpretation'}), 400

    if not appbuilder_client:
        return jsonify({'interpretation': "AI助手服务未就绪，无法提供解读。"}), 200

    prompt = f"作为一款校园安全AI助手，请你以专业、友好且通俗易懂的语言，详细解读以下行为监测报告：\n\n“{safety_analysis_text}”\n\n请包含：\n1. 对报告内容的总结。\n2. 潜在的安全风险分析。\n3. 具体可行的安全建议或应对措施。\n4. 强调持续监测的重要性。\n请确保语言礼貌、专业，并直接给出解读内容，不要包含额外的开场白或结束语。结果应以段落形式输出，字数控制在300字以内。"

    logger.info(f"AI Interpretation: Requesting interpretation for: {safety_analysis_text[:50]}...")
    interpretation = call_appbuilder_agent(prompt)
    logger.info(f"AI Interpretation: Response received.")

    return jsonify({'interpretation': interpretation})

# ================= AI助手功能：生成主动风险预警 =================
@app.route('/api/ai_proactive_insights', methods=['POST'])
def ai_proactive_insights_route():
    data = request.get_json()
    current_stats = data.get('current_stats', {}) # Expects stats from frontend if available

    if not current_settings.get('ai_proactive_alerts', False) or not appbuilder_client:
        return jsonify({'insight': "AI主动预警功能已关闭或AppBuilder AI助手未就绪。"}), 200

    # Extract data from current_stats or use defaults
    total_detections = current_stats.get('totalDetections', 0)
    alerts = current_stats.get('alerts', 0)
    safe_persons = current_stats.get('safePersons', 0)
    online_devices = current_stats.get('onlineDevices', 0)
    total_devices = current_stats.get('devices', 0)

    behavior_summary = "无数据"
    if current_stats.get('behaviorStats'):
        behavior_summary_list = []
        for item in current_stats['behaviorStats']: # Assuming item is [label, count]
            if len(item) == 2:
                behavior_summary_list.append(f"{item[1]}人{item[0]}") # count person label
        behavior_summary = ", ".join(behavior_summary_list) if behavior_summary_list else "无具体行为数据"


    prompt = f"作为校园安全AI助手，请根据当前系统状态和行为统计数据，生成一份简短、专业且具有前瞻性的安全概览报告。数据如下：\n" \
             f"- 总检测人数: {total_detections}人\n" \
             f"- 安全人数: {safe_persons}人\n" \
             f"- 警报次数: {alerts}次\n" \
             f"- 设备在线数: {online_devices}/{total_devices}\n" \
             f"- 行为统计: {behavior_summary}\n\n" \
             f"请侧重于：\n1. 概括当前安全态势。\n2. 指出潜在风险点或值得关注的趋势（即使是模拟数据，也请尝试提出合理洞察）。\n3. 给出1-2条主动预防性建议。\n4. 报告长度控制在100-200字之间，语气积极、专业。不要包含额外的开场白或结束语。"

    logger.info(f"AI Proactive Insights: Generating insights based on stats: {prompt[:100]}...")
    insight = call_appbuilder_agent(prompt)
    logger.info("AI Proactive Insights: Response received.")
    return jsonify({'insight': insight})

# ================= AI助手功能：智能告警处理建议 =================
@app.route('/api/ai_recommend_alert_action', methods=['POST'])
def ai_recommend_alert_action_route():
    data = request.get_json()
    alert_title = data.get('title')
    alert_level = data.get('level', {}).get('label', '未知') # Extract label from level object
    alert_location = data.get('location')
    alert_description = data.get('description')

    if not all([alert_title, alert_location]): # Basic check
        return jsonify({'error': 'Missing alert details for recommendation'}), 400

    if not appbuilder_client:
        return jsonify({'recommendations': "AI助手服务未就绪，无法提供建议。"}), 200

    prompt = f"作为校园安全AI助手，你收到一个告警：\n" \
             f"标题: {alert_title}\n" \
             f"级别: {alert_level}\n" \
             f"位置: {alert_location}\n" \
             f"描述: {alert_description if alert_description else '无详细描述'}\n\n" \
             f"请根据此信息，提出3-5条针对性强、具体可行的告警处理建议或行动方案，从“立即响应”、“调查核实”、“后续跟进”等方面展开。请直接列出建议，不要有额外说明，每条建议用数字序号开头，保持简洁。"

    logger.info(f"AI Alert Recommendation: Requesting for alert: {alert_title} at {alert_location}.")
    recommendations = call_appbuilder_agent(prompt)
    logger.info("AI Alert Recommendation: Response received.")
    return jsonify({'recommendations': recommendations})


# ================= AI助手功能：智能聊天机器人 =================
@app.route('/api/ai_chat', methods=['POST'])
def ai_chat_route():
    data = request.get_json()
    user_message = data.get('message')
    conversation_id = data.get('conversation_id') # 接收前端传入的会话ID

    if not user_message:
        return jsonify({'error': 'Missing message for AI chat'}), 400

    if not appbuilder_client:
        return jsonify({'response': "AI聊天助手服务未就绪。请检查AppBuilder应用是否配置正确。"}), 200

    # 使用前端传入的会话ID
    chat_response = call_appbuilder_agent(user_message, conversation_id)
    logger.info(f"AI Chat: User message: {user_message[:50]}..., AI response: {chat_response[:50]}...")
    return jsonify({'response': chat_response})

# ================= 图像理解接口 =================
@app.route('/api/image_interpret', methods=['POST'])
def image_interpret_route():
    data = request.get_json()
    image_b64 = data.get('image_b64')
    text_prompt = data.get('text_prompt')

    if not image_b64 or not text_prompt:
        return jsonify({'error': '缺少图片数据或文本描述'}), 400

    interpretation = call_baidu_image_understanding_api(image_b64, text_prompt)

    # Save a simplified history record for image interpretation
    new_record = HistoryRecord(
        timestamp=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        person_count=0, # No person detection in this context
        original_image_b64=image_b64, # Save the actual image in history
        result_image_b64=image_b64,   # Same for result image
        safety_analysis=f"图像理解：问题“{text_prompt}”\n结果：{interpretation}",
        error_message=None,
        persons_json="[]",
        device_id=None,
        location="多模态图像理解",
        record_type='image_interpretation'
    )
    db.session.add(new_record)
    db.session.commit()
    logger.info("Image interpretation result saved to history.")

    return jsonify({'interpretation': interpretation})

# ================= 校园安防功能 =================
@app.route('/api/campus/emergency', methods=['POST'])
def emergency_alert_route():
    data = request.get_json()
    if not data or 'location' not in data:
        logger.error("Emergency: Missing location data.")
        return jsonify({'success': False, 'error': '缺少位置信息'}), 400
    location = data['location']
    description = data.get('description', '无详细描述')
    timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    alert_id = f"alert_{timestamp.replace(' ', '_').replace(':', '')}_{random.randint(0, 999)}"
    new_alert_data = {
        'id': alert_id,
        'title': f"紧急报警: {location}",
        'level': {'label': '紧急', 'value': 'critical', 'color': 'red'}, # 更改为红色，更符合紧急
        'location': location,
        'time': timestamp,
        'status': '未处理', # Initial status
        'description': description
    }
    logger.warning(f"紧急报警: {json.dumps(new_alert_data)}")
    socketio.emit('new_alert', new_alert_data) # Emit to all connected clients
    return jsonify({
        'success': True, 'message': f'报警已成功发送，安保人员将前往 {location}。',
        'response_time': timestamp, 'location': location, 'description': description
    })

@app.route('/api/campus/checkin', methods=['POST'])
def campus_checkin_route():
    data = request.get_json()
    if not data or 'student_id' not in data:
        logger.error("Checkin: Missing student_id.")
        return jsonify({'success': False, 'error': '缺少学号信息'}), 400

    student_id = data['student_id']
    location = data.get('location', '未知签到点')
    timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

    # Check if person already exists
    existing_person = Person.query.filter_by(studentId=student_id).first()
    if existing_person:
        existing_person.location = location
        existing_person.time = timestamp
        normal_behavior = get_behavior_obj_by_label('normal') # Update behavior to normal on check-in
        existing_person.behavior_label=normal_behavior['label']
        existing_person.behavior_value=normal_behavior['value']
        existing_person.behavior_color=normal_behavior['color']
        db.session.commit()
        logger.info(f"Checkin: Person {student_id} updated existing record to {location}.")
        return jsonify({
            'success': True, 'message': f'学号 {student_id} 于 {location} 签到成功 (更新)。',
            'student_id': student_id, 'checkin_time': existing_person.time, 'location': existing_person.location
        })

    # If not existing, create new
    logger.info(f"学生签到: 时间={timestamp}, 学号={student_id}, 位置={location}")

    random_department = random.choice(['计算机工程系', '机电系', '化工系', '土木系', '经管系', '艺术设计学院', '外国语学院'])
    normal_behavior = get_behavior_obj_by_label('normal') # Default to normal behavior on check-in
    new_person = Person(
        name=f"学生_{student_id}", # Generic name
        studentId=student_id,
        department=random_department,
        behavior_label=normal_behavior['label'],
        behavior_value=normal_behavior['value'],
        behavior_color=normal_behavior['color'],
        location=location,
        time=timestamp,
        confidence='1.00' # Assuming 100% confidence for manual check-in
    )
    db.session.add(new_person)
    db.session.commit()
    logger.info(f"Checkin: Person {student_id} saved to database.")

    return jsonify({
        'success': True, 'message': f'学号 {student_id} 于 {location} 签到成功。',
        'student_id': student_id, 'checkin_time': timestamp, 'location': location
    })


# ================= 设备管理接口 =================
@app.route('/api/devices', methods=['POST'])
def add_device_route():
    data = request.get_json()
    if not data or not all(k in data for k in ['name', 'ip', 'location', 'status', 'device_type']):
        logger.error("Add Device: Missing device info.")
        return jsonify({'success': False, 'error': '缺少设备信息，请提供名称、IP、位置、状态和设备类型。'}), 400

    # Check for existing IP
    existing_device = Device.query.filter_by(ip=data['ip']).first()
    if existing_device:
        logger.warning(f"Add Device: Device with IP {data['ip']} already exists.")
        return jsonify({'success': False, 'error': f"设备IP {data['ip']} 已存在。请检查或更新现有设备。"}), 409 # 409 Conflict

    device_status_obj = get_device_status_obj_by_value(data['status']) # Get full status object

    new_device = Device(
        id=f"device_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}_{random.randint(0, 999)}", # Unique ID
        name=data['name'],
        ip=data['ip'],
        location=data['location'],
        status_value=device_status_obj['value'],
        status_label=device_status_obj['label'],
        status_color=device_status_obj['color'],
        lastActive=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
        device_type=data['device_type']
    )
    db.session.add(new_device)
    db.session.commit()
    logger.info(f"新增设备: '{data['name']}' ({data['ip']}) saved to database.")

    return jsonify({
        'success': True, 'message': f"设备 '{data['name']}' 已成功添加。",
        'device_id': new_device.id
    })

@app.route('/api/devices_list', methods=['GET'])
def get_devices_list_route():
    try:
        devices = Device.query.all()
        devices_list = [device.to_dict() for device in devices]
        logger.info(f"Devices: Retrieved {len(devices_list)} devices.")
        return jsonify(devices_list)
    except Exception as e:
        logger.error(f"Devices: Failed to retrieve devices: {str(e)}", exc_info=True)
        return jsonify({'error': 'Failed to retrieve devices'}), 500


@app.route('/api/persons_list', methods=['GET'])
def get_persons_list_route():
    try:
        persons = Person.query.all()
        persons_list = []
        for person in persons:
            p_dict = person.to_dict()

            # For each person, fetch recent relevant activities
            # Query HistoryRecord where detected persons include this person's ID (not reliable with mock data)
            # Or, for simplicity, retrieve records at the same location as the person's last known location
            recent_activities_raw = HistoryRecord.query.filter(
                HistoryRecord.location == person.location,
                HistoryRecord.record_type == 'detection' # Only consider detection records
            ).order_by(HistoryRecord.timestamp.desc()).limit(5).all()

            p_dict['activityHistory'] = []
            for record in recent_activities_raw:
                try:
                    detected_persons_in_record = json.loads(record.persons_json) if record.persons_json else []

                    # Try to find a relevant behavior from the record for this activity entry
                    # Prioritize non-normal behaviors first
                    relevant_behavior = None
                    if detected_persons_in_record:
                        for dp in detected_persons_in_record:
                            # If the detected person in the record *might* be our person (e.g., in the same area at the same time,
                            # or if we had real ID matching), use their behavior.
                            # For mock data, just take the most "interesting" behavior from the record's detections.
                            if dp.get('behavior', {}).get('value') not in ['normal', 'stand', 'walk']:
                                relevant_behavior = dp.get('behavior')
                                break
                        if not relevant_behavior and detected_persons_in_record:
                            # If all normal, take the first one or a "normal" placeholder
                            relevant_behavior = detected_persons_in_record[0].get('behavior')
                    if not relevant_behavior:
                        relevant_behavior = get_behavior_obj_by_label('normal') # Default if no specific behavior found

                    p_dict['activityHistory'].append({
                        'time': record.timestamp,
                        'location': record.location if record.location else '未知区域',
                        'behavior': relevant_behavior,
                        'record_id': record.id
                    })
                except json.JSONDecodeError:
                    logger.warning(f"Failed to decode persons_json for record {record.id} when fetching person activities.")
                    # Fallback for corrupted records
                    p_dict['activityHistory'].append({
                        'time': record.timestamp,
                        'location': record.location if record.location else '未知区域',
                        'behavior': get_behavior_obj_by_label('unknown'),
                        'record_id': record.id
                    })
            persons_list.append(p_dict)

        logger.info(f"Persons: Retrieved {len(persons_list)} persons.")
        return jsonify(persons_list)
    except Exception as e:
        logger.error(f"Persons: Failed to retrieve persons: {str(e)}", exc_info=True)
        return jsonify({'error': 'Failed to retrieve persons'}), 500


# ================= 实时监控接口 =================
@app.route('/api/live-stream/<device_id>', methods=['GET'])
def get_live_stream_url(device_id):
    logger.info(f"Live Stream: Request for device {device_id} stream.")

    device = Device.query.get(device_id)
    if not device:
        logger.warning(f"Live Stream: Device {device_id} not found in DB.")
        return jsonify({'success': False, 'error': '未找到该摄像头。'}), 404

    if device.status_value != 'online':
        logger.warning(f"Live Stream: Device {device_id} is {device.status_label}, cannot get stream.")
        return jsonify({'success': False, 'error': f'摄像头 {device.name} 当前状态为 {device.status_label}，无法提供实时流。'}), 400

    # Mock URL for demonstration. Replace with actual stream URL logic.
    # This could involve querying a media server, constructing an RTSP URL, etc.
    # Using a common publicly available mp4 file for demonstration
    mock_stream_url = "https://test-videos.co.uk/vids/bigbuckbunny/mp4/720/Big_Buck_Bunny_720_10s_5MB.mp4"

    logger.info(f"Live Stream: Providing mock stream URL for {device_id}: {mock_stream_url}")
    return jsonify({'success': True, 'stream_url': mock_stream_url})

# ================= 系统设置接口 =================
@app.route('/api/settings', methods=['GET'])
def get_settings_route():
    global current_settings
    logger.info("Settings: Fetching current system settings.")
    return jsonify({'settings': current_settings})

@app.route('/api/settings', methods=['POST'])
def save_settings_route():
    global current_settings # Ensure we're modifying the global variable
    data = request.get_json()
    if not data:
        logger.error("Settings: No settings data received for save.")
        return jsonify({'success': False, 'error': '未收到设置数据。'}), 400

    old_model_version = current_settings.get('model_version', 'yolov8n-pose')
    old_app_builder_app_id = current_settings.get('app_builder_app_id')

    # Update current_settings with received data, ensuring type consistency with defaults
    for key, value in data.items():
        if key in DEFAULT_SETTINGS:
            if isinstance(DEFAULT_SETTINGS[key], dict) and isinstance(value, dict):
                current_settings[key].update(value) # Shallow merge for dicts
            elif isinstance(DEFAULT_SETTINGS[key], list) and isinstance(value, list):
                current_settings[key] = value # Replace lists entirely
            else:
                # Ensure basic type consistency (e.g., int for confidence_threshold)
                if key == 'confidence_threshold':
                    try:
                        current_settings[key] = max(0, min(100, int(value)))
                    except ValueError:
                        logger.warning(f"Settings: Invalid value for {key}: {value}. Retaining old value.")
                elif key == 'ai_interpret_results' or key == 'ai_proactive_alerts' or key == 'sound_reminders':
                    current_settings[key] = bool(value) # Ensure boolean type
                else:
                    current_settings[key] = value
        else:
            logger.warning(f"Settings: Unknown setting key received: {key}. Ignoring.")
    _save_settings(current_settings)

    # Post-save actions (e.g., reload model, reinit client)
    if 'model_version' in data and data['model_version'] != old_model_version:
        logger.info(f"Model version changed from {old_model_version} to {data['model_version']}. Reloading YOLO model...")
        load_yolo_model() # This function should handle model loading

    # 重新初始化AppBuilder客户端，因为App ID可能已更改
    if 'app_builder_app_id' in data and data['app_builder_app_id'] != old_app_builder_app_id:
        logger.info(f"AppBuilder App ID changed from '{old_app_builder_app_id}' to '{data['app_builder_app_id']}'. Re-initializing AppBuilder client...")
        initialize_appbuilder_client()

    logger.info(f"Settings: System settings updated: {current_settings}")
    return jsonify({'success': True, 'message': '设置已保存成功。'})

# ================= 统计数据接口 =================
@app.route('/api/stats', methods=['GET'])
def get_stats_route():
    try:
        # 只统计类型为 'detection' 的记录
        total_detections = int(db.session.query(db.func.sum(HistoryRecord.person_count)).filter(HistoryRecord.record_type == 'detection').scalar() or 0)

        # 定义表示警报/异常情况的行为值
        abnormal_behavior_values = ['danger', 'suspicious', 'fall', 'run', 'crowd', 'bend']

        # 告警数量：筛选出有 error_message 或 safety_analysis 中包含异常关键词的 detection 记录
        alert_conditions = [HistoryRecord.record_type == 'detection', HistoryRecord.error_message.isnot(None)]
        # 构建一个复杂的 JSON 搜索条件，查找 persons_json 中任何人的 behavior.value 属于 abnormal_behavior_values
        # SQLAlchemy for SQLite doesn't have direct JSON querying like PostgreSQL.
        # So we'll have to rely on `safety_analysis` text or more complex string matching for `persons_json`.
        # For this simulation, relying on `safety_analysis` and a simple contains check.
        for keyword in abnormal_behavior_values:
            alert_conditions.append(HistoryRecord.safety_analysis.like(f'%{get_behavior_obj_by_label(keyword)["label"]}%'))
        
        alerts_count = HistoryRecord.query.filter(or_(*alert_conditions)).count()


        # 安全人数： 筛选出 safety_analysis 中不包含异常关键词的 detection 记录
        safe_person_conditions = [HistoryRecord.record_type == 'detection', HistoryRecord.error_message.is_(None)]
        for keyword in abnormal_behavior_values:
            safe_person_conditions.append(~HistoryRecord.safety_analysis.like(f'%{get_behavior_obj_by_label(keyword)["label"]}%'))
        
        safe_persons_count = int(db.session.query(db.func.sum(HistoryRecord.person_count)).filter(
            and_(*safe_person_conditions)
        ).scalar() or 0)


        total_devices = Device.query.count()
        online_devices = Device.query.filter_by(status_value='online').count()

        behavior_stats = defaultdict(int)
        location_activity_stats = defaultdict(int) # Persons per location
        time_trend_normal = defaultdict(int) # Persons with normal behavior per hour
        time_trend_abnormal = defaultdict(int) # Persons with abnormal behavior per hour

        # 只加载类型为 'detection' 的记录用于统计
        all_detection_records = HistoryRecord.query.filter_by(record_type='detection').order_by(HistoryRecord.timestamp).all()

        for record in all_detection_records:
            try:
                record_time = datetime.datetime.strptime(record.timestamp, '%Y-%m-%d %H:%M:%S')
                hour_key = record_time.strftime('%H:00') # Group by hour
            except ValueError:
                logger.warning(f"Invalid timestamp format for record {record.id}: '{record.timestamp}'. Skipping time and location aggregation for this record.")
                continue # Skip this record for time/location stats if timestamp is bad

            # Aggregate behavior stats from persons_json
            try:
                persons = json.loads(record.persons_json) if record.persons_json else []
            except json.JSONDecodeError:
                logger.error(f"Failed to decode persons_json for record {record.id}. Data: '{record.persons_json[:100]}...'")
                persons = []

            for person in persons:
                behavior_label = person.get('behavior', {}).get('label', '未知')
                if behavior_label in BEHAVIOR_LABELS: # Only count known behavior labels
                    behavior_stats[behavior_label] += 1
                else:
                    behavior_stats['未知行为'] += 1 # Group unknown labels

            # Aggregate location activity
            if record.location: # Prefer explicit record location
                location_activity_stats[record.location] += record.person_count if record.person_count else 0
            else: # Fallback to device location if record location is missing
                if record.device_id:
                    device = Device.query.get(record.device_id)
                    if device:
                        location_activity_stats[device.location] += record.person_count if record.person_count else 0
                else: # If no location info at all
                    location_activity_stats['未知区域'] += record.person_count if record.person_count else 0

            # Aggregate time trends
            is_abnormal_record = False
            if record.error_message: # Record with error is abnormal
                is_abnormal_record = True
            elif record.safety_analysis: # Check safety analysis for keywords
                for keyword in abnormal_behavior_values:
                    if get_behavior_obj_by_label(keyword)['label'] in record.safety_analysis:
                        is_abnormal_record = True
                        break

            record_person_count = record.person_count if record.person_count is not None else 0
            if is_abnormal_record:
                time_trend_abnormal[hour_key] += record_person_count
            else:
                time_trend_normal[hour_key] += record_person_count

        # Format for charts (ensure all defined behaviors/locations are present, even if 0)
        chart_behavior_stats = []
        for b_type in BEHAVIOR_TYPES:
            chart_behavior_stats.append([b_type['label'], behavior_stats[b_type['label']]])
        if '未知行为' in behavior_stats and behavior_stats['未知行为'] > 0: # Add unknown if present
             chart_behavior_stats.append(['未知行为', behavior_stats['未知行为']])

        chart_location_stats = []
        for loc in CAMPUS_LOCATIONS: # Use predefined campus locations
            chart_location_stats.append([loc, location_activity_stats.get(loc, 0)])
        if '未知区域' in location_activity_stats and location_activity_stats['未知区域'] > 0:
             chart_location_stats.append(['未知区域', location_activity_stats['未知区域']])


        chart_time_trend = []
        all_hours = [f"{i:02d}:00" for i in range(24)] # Ensure all hours are present
        for hour in all_hours:
            chart_time_trend.append([hour, time_trend_normal.get(hour, 0), time_trend_abnormal.get(hour, 0)])

        stats = {
            'totalDetections': total_detections,
            'alerts': alerts_count,
            'safePersons': safe_persons_count,
            'devices': total_devices,
            'onlineDevices': online_devices,
            'behaviorStats': chart_behavior_stats,
            'locationStats': chart_location_stats,
            'timeTrend': chart_time_trend
        }
        logger.info(f"Stats: Retrieved stats.")
        return jsonify(stats)
    except Exception as e:
        logger.error(f"Stats: Failed to retrieve stats: {str(e)}", exc_info=True)
        return jsonify({'error': 'Failed to retrieve stats'}), 500

# ================= 训练任务状态跟踪 =================
training_jobs = {}
training_queue = []
training_thread = None
training_thread_lock = threading.Lock() # 线程锁，确保只有一个训练线程运行

# ================= 增强版大模型训练接口 =================
@app.route('/api/train_model', methods=['POST'])
def train_model_route():
    """大模型训练接口 - 支持多任务队列和进度跟踪"""
    global training_jobs, training_queue

    data = request.get_json()
    if not data or not all(k in data for k in ['model_type', 'dataset_name', 'epochs']):
        logger.error("Train Model: Missing training parameters.")
        return jsonify({
            'success': False,
            'error': '缺少训练参数: 需要 model_type, dataset_name, epochs'
        }), 400

    # 提取参数
    model_type = data.get('model_type', 'yolov8') # Default to yolov8 if not specified
    dataset_name = data.get('dataset_name')
    epochs = int(data.get('epochs', 100)) # Default epochs
    batch_size = data.get('batch_size', 16)
    imgsz = data.get('imgsz', 640)

    # 检查资源是否可用
    # if not _check_training_resources(): # Implement this function
    #     return jsonify({
    #         'success': False,
    #         'error': '资源不足: GPU内存不足或系统内存低于阈值'
    #     }), 503 # Service Unavailable

    # 创建训练任务
    job_id = str(uuid.uuid4())
    start_time = datetime.datetime.now()
    # 模拟估算时间：每epoch 10秒（前端模拟0.5分钟，这里是后端模拟时间）
    estimated_duration_seconds = epochs * 10
    estimated_end_time = start_time + datetime.timedelta(seconds=estimated_duration_seconds)

    job = {
        'id': job_id,
        'model_type': model_type,
        'dataset_name': dataset_name,
        'epochs': epochs,
        'batch_size': batch_size,
        'imgsz': imgsz,
        'status': 'queued', # Initial status
        'progress': 0,
        'start_time': start_time.isoformat(),
        'estimated_end': estimated_end_time.isoformat(),
        'estimated_remaining': str(datetime.timedelta(seconds=estimated_duration_seconds)), # 初始剩余时间
        'current_epoch': 0,
        'metrics': { # To store training metrics per epoch
            'loss': [],
            'accuracy': [], # Or mAP for detection models
            'val_loss': [],
            'val_accuracy': []
        },
        'logs': [] # Store log messages for the job
    }

    # 添加到队列
    training_queue.append(job_id)
    training_jobs[job_id] = job

    # 立即发送队列更新，让前端知道有新任务排队
    socketio.emit('training_queue_update', {'queue': [training_jobs[jid] for jid in training_queue]})
    socketio.emit('training_status_update', {'job': job}) # 发送初始任务状态

    # 启动训练线程（如果空闲）
    _start_next_training_job()

    logger.info(f"Train Model: Training job {job_id} created for {model_type} on {dataset_name}. Queue position: {len(training_queue)}")

    return jsonify({
        'success': True,
        'message': f'训练任务已加入队列 (ID: {job_id})',
        'job_id': job_id,
        'queue_position': len(training_queue)
    })

def _start_next_training_job():
    global training_queue, training_thread, training_thread_lock

    with training_thread_lock:
        if training_thread is not None and training_thread.is_alive():
            # Already a training job running
            logger.info("Training thread already active. New job will wait in queue.")
            return

        if not training_queue:
            logger.info("Training queue is empty. No jobs to start.")
            return

        next_job_id = training_queue[0] # Get the first job in queue (do not pop yet)
        job = training_jobs.get(next_job_id)
        if not job:
            logger.error(f"Job {next_job_id} not found in training_jobs. Removing from queue.")
            training_queue.pop(0) # Remove invalid job
            _start_next_training_job() # Try next
            return

        if job['status'] in ['completed', 'failed', 'cancelled']:
            logger.info(f"Job {next_job_id} already in final state ({job['status']}). Removing from queue.")
            training_queue.pop(0) # Remove already finished job
            _start_next_training_job() # Try next
            return

        # Start the new thread
        training_thread = threading.Thread(target=_training_task_runner, args=(next_job_id,))
        training_thread.start()
        logger.info(f"Started training thread for job: {next_job_id}")


def _training_task_runner(job_id):
    """单独的线程函数，负责执行训练任务并更新状态"""
    global training_jobs, training_queue, current_settings, training_thread_lock

    job = training_jobs[job_id]
    job['status'] = 'training'
    job['start_time'] = datetime.datetime.now().isoformat() # Actual start time
    socketio.emit('training_status_update', {'job': job}) # Notify frontend

    total_epochs = job['epochs']
    epoch_duration_seconds = 10 # Each epoch takes 10 seconds for simulation

    try:
        for epoch in range(1, total_epochs + 1):
            if job['status'] == 'cancelled': # Check for cancellation
                logger.info(f"Training job {job_id} cancelled during epoch {epoch}.")
                break

            # Update progress
            job['current_epoch'] = epoch
            job['progress'] = int((epoch / total_epochs) * 100)

            # Update estimated remaining time
            elapsed_seconds = (datetime.datetime.now() - datetime.datetime.fromisoformat(job['start_time'])).total_seconds()
            remaining_seconds = (total_epochs - epoch) * epoch_duration_seconds
            job['estimated_remaining'] = str(datetime.timedelta(seconds=int(remaining_seconds)))

            # Simulate metrics (replace with actual YOLO training calls and metric parsing)
            loss = max(0.01, 0.2 * (1 - epoch/total_epochs) + 0.05 + random.uniform(-0.02, 0.02)) # Simulate decreasing loss
            accuracy = min(0.99, 0.7 + (epoch/total_epochs)*0.25 + random.uniform(-0.01, 0.01)) # Simulate increasing accuracy
            val_loss = loss * random.uniform(1.05, 1.2) # Validation loss slightly higher
            val_accuracy = accuracy * random.uniform(0.95, 0.99)

            job['metrics']['loss'].append(round(loss, 4))
            job['metrics']['accuracy'].append(round(accuracy, 4))
            job['metrics']['val_loss'].append(round(val_loss, 4))
            job['metrics']['val_accuracy'].append(round(val_accuracy, 4))

            # Simulate log
            log_entry = {
                'time': datetime.datetime.now().strftime('%H:%M:%S'),
                'epoch': epoch,
                'message': f"Epoch {epoch}/{total_epochs} - Loss: {loss:.4f}, Acc: {accuracy:.4f}, Val Loss: {val_loss:.4f}, Val Acc: {val_accuracy:.4f}"
            }
            job['logs'].append(log_entry)
            socketio.emit('training_log', {'job_id': job_id, 'log': log_entry}) # Real-time log streaming

            socketio.emit('training_status_update', {'job': job}) # Notify frontend about progress

            # Random failure chance
            if random.random() < 0.03 and epoch > 5 and epoch < total_epochs * 0.9: # 3% chance to fail after epoch 5
                raise Exception(f"Simulated training failure due to gradient explosion at epoch {epoch}")

            time.sleep(epoch_duration_seconds) # Simulate training time

        # Training completed successfully if loop finishes without break/exception
        if job['status'] == 'training': # If not cancelled or failed by exception
            job['status'] = 'completed'
            job['progress'] = 100
            job['end_time'] = datetime.datetime.now().isoformat()

            # Generate new model version and update settings
            new_model_version = f"{job['model_type'].replace('-pose','').replace('yolov8','yolov8')}_Custom_v{random.randint(1,1000)}" # Ensure model_type part is clean
            current_settings['model_version'] = new_model_version
            _save_settings(current_settings) # Persist the new model version
            job['new_model_version'] = new_model_version # Store in job details

            # Simulate model loading (or trigger actual model loading)
            load_yolo_model() # Reload the (potentially new) model

            logger.info(f"Train Model: Job {job_id} completed. New model: {new_model_version}")

    except Exception as e:
        job['status'] = 'failed'
        job['error'] = f"训练失败: {str(e)}"
        job['end_time'] = datetime.datetime.now().isoformat()
        logger.error(f"Train Model: Job {job_id} failed: {str(e)}", exc_info=True)
    finally:
        socketio.emit('training_status_update', {'job': job}) # Final status update
        with training_thread_lock:
            if training_queue and training_queue[0] == job_id: # Ensure it's the current job
                training_queue.pop(0)
            socketio.emit('training_queue_update', {'queue': [training_jobs[jid] for jid in training_queue]}) # Update queue status
            _start_next_training_job() # Start the next job in queue if any


@app.route('/api/training/status/<job_id>', methods=['GET'])
def training_status_route(job_id):
    """获取训练任务状态"""
    job = training_jobs.get(job_id)
    if not job:
        return jsonify({
            'success': False,
            'error': f'无效的任务ID: {job_id}'
        }), 404
    return jsonify({
        'success': True,
        'job': job
    })

@app.route('/api/training/cancel/<job_id>', methods=['POST'])
def cancel_training(job_id):
    """取消训练任务"""
    global training_queue, training_thread_lock

    job = training_jobs.get(job_id)
    if not job:
        return jsonify({
            'success': False,
            'error': f'无效的任务ID: {job_id}'
        }), 404

    if job['status'] in ['completed', 'failed', 'cancelled']:
        return jsonify({
            'success': False,
            'error': f'任务已处于最终状态: {job["status"]}'
        }), 400

    job['status'] = 'cancelled' # Mark as cancelled
    job['end_time'] = datetime.datetime.now().isoformat()
    job['error'] = "任务已被用户取消。"
    logger.info(f"Train Model: Job {job_id} marked as cancelled by user.")
    socketio.emit('training_status_update', {'job': job}) # Immediately send update

    with training_thread_lock:
        if job_id in training_queue:
            try:
                training_queue.remove(job_id)
                logger.info(f"Train Model: Job {job_id} removed from queue.")
            except ValueError:
                pass # Already removed or not found, ignore
        socketio.emit('training_queue_update', {'queue': [training_jobs[jid] for jid in training_queue]}) # Update queue status


    # If the cancelled job was the currently running one, trigger the next
    # This logic is handled by _training_task_runner's finally block
    return jsonify({
        'success': True,
        'message': f'任务 {job_id} 已取消'
    })

@app.route('/api/training/queue', methods=['GET'])
def training_queue_status():
    """获取训练队列状态"""
    queue_info = []
    for idx, job_id in enumerate(training_queue):
        job = training_jobs.get(job_id, {}) # Get job details
        queue_info.append({
            'job_id': job_id,
            'model_type': job.get('model_type'),
            'dataset_name': job.get('dataset_name'),
            'status': job.get('status', 'queued'), # Default to queued if somehow not set
            'progress': job.get('progress', 0),
            'current_epoch': job.get('current_epoch', 0),
            'epochs': job.get('epochs', 0),
            'estimated_remaining': job.get('estimated_remaining', 'N/A'),
            'queue_position': idx + 1
        })

    active_jobs_count = len([j for j_id, j in training_jobs.items() if j['status'] == 'training' and j_id in training_queue])


    return jsonify({
        'success': True,
        'active_jobs': active_jobs_count,
        'queue': queue_info,
        'queue_length': len(training_queue)
    })


# ================= 千帆API连接健康检查接口 =================
@app.route('/api/qianfan_check', methods=['GET'])
def qianfan_check_route():
    logger.info("AppBuilder Check: Attempting to check AppBuilder API connectivity.")
    if not APPBUILDER_SDK_AVAILABLE:
        return jsonify({'success': False, 'message': "AppBuilder SDK未安装，无法执行检查。"}), 500

    if not current_settings.get('app_builder_app_id'):
        return jsonify({'success': False, 'message': "AppBuilder App ID未配置，请在系统设置中配置。"}), 400

    if not os.environ.get("APPBUILDER_TOKEN"):
        return jsonify({'success': False, 'message': "APPBUILDER_TOKEN 环境变量未设置，无法连接AppBuilder。"}), 400

    try:
        # 尝试创建一个临时的AppBuilderClient并进行一次简单的对话，验证App ID和TOKEN
        # 创建一个临时客户端，使用配置的 App ID
        temp_client = appbuilder.AppBuilderClient(current_settings['app_builder_app_id'])
        test_prompt = "Hello, are you there?"
        # 创建临时会话用于测试
        temp_conversation_id = temp_client.create_conversation()
        response = temp_client.run(temp_conversation_id, test_prompt, timeout=10) # 短暂超时
        response_text = response.content.answer if response and response.content and response.content.answer else ""

        if response_text:
            return jsonify({'success': True, 'message': "AppBuilder AI助手连接和应用正常。", 'response_snippet': response_text[:50] + "..."}), 200
        else:
            return jsonify({'success': False, 'message': f"AppBuilder AI连接成功但响应为空或异常，可能App ID配置不正确或应用内容问题: {response_text}"}), 500
    except appbuilder.AppBuilderServerException as e:
        return jsonify({'success': False, 'message': f"AppBuilder AI连接或应用失败: {e.message}. 请检查App ID和Token。"}), 500
    except requests.exceptions.RequestException as e:
        return jsonify({'success': False, 'message': f"AppBuilder AI连接失败: 网络或请求错误 - {str(e)}。请检查网络连接。"}), 500
    except Exception as e:
        logger.error(f"AppBuilder Check: Unexpected error: {e}", exc_info=True)
        return jsonify({'success': False, 'message': f"AppBuilder AI助手检查异常: {str(e)}。请检查服务器日志。"}), 500


# ================= 系统实时指标接口 =================
@app.route('/api/system_metrics', methods=['GET'])
def get_system_metrics_route():
    try:
        cpu_percent = psutil.cpu_percent(interval=None) # Non-blocking, reads since last call or boot
        memory_percent = psutil.virtual_memory().percent
        disk_usage = psutil.disk_usage('/').percent # Root disk usage

        # For network, calculate actual rate over a short interval
        net_io_before = psutil.net_io_counters()
        time.sleep(0.5) # Wait for a short duration
        net_io_after = psutil.net_io_counters()

        bytes_sent = net_io_after.bytes_sent - net_io_before.bytes_sent
        bytes_recv = net_io_after.bytes_recv - net_io_before.bytes_recv
        total_bandwidth_Mbps = (bytes_sent + bytes_recv) * 8 / (0.5 * 1024 * 1024) # Mbps
        network_percent = min(100, int(total_bandwidth_Mbps / 100 * 100)) # Assuming 100Mbps is max for percentage display

        metrics = {
            'cpu': cpu_percent,
            'memory': memory_percent,
            'storage': disk_usage,
            'network': network_percent, # This is a calculated value
            'appbuilder_client_initialized': appbuilder_client is not None,
            'qianfan_token_available': _cached_qianfan_access_token is not None and time.time() < _qianfan_token_expiry_time
        }

        logger.debug(f"System Metrics: Retrieved {metrics}")
        return jsonify(metrics)
    except Exception as e:
        logger.error(f"System Metrics: Failed to retrieve system metrics: {str(e)}", exc_info=True)
        return jsonify({'error': 'Failed to retrieve system metrics: ' + str(e)}), 500

# ================= 静态文件服务 =================
@app.route('/uploads/<filename>')
def uploaded_file_serve_route(filename):
    return send_from_directory(app.config['UPLOAD_FOLDER'], filename)

@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def serve_react_app(path):
    if path != "" and os.path.exists(os.path.join(app.static_folder, path)):
        return send_from_directory(app.static_folder, path)
    else:
        return send_from_directory(app.static_folder, 'index.html')

# ================= 其他端点 =================
@app.route('/api/status')
def health_check_route():
    logger.info("Status: Health check requested.")
    return jsonify({
        "status": "running",
        "version": "5.2.0-enhanced", # Updated version
        "services": ["YOLOv8-Pose", "AppBuilder-Agent", "AI-Interpretation", "AI-ProactiveAlerts", "AI-Recommendations", "AI-Chatbot", "Campus-Security-API", "Settings-Persistence", "Live-Stream-API-Sim", "History-Persistence", "Realtime-Logs", "Device-Management", "Person-Management", "Stats-Calculations", "Model-Train-Sim", "AppBuilder-HealthCheck", "System-Metrics", "Image-Understanding"],
        "model_loaded": model is not None,
        "appbuilder_client_initialized": appbuilder_client is not None,
        "current_settings": current_settings # Can be useful for debugging
    }), 200

# ================= WebSocket 事件处理 =================
@socketio.on('connect')
def handle_connect():
    logger.info("WebSocket: Client connected.")
    # Consider sending current training queue state upon new connection
    # Or just log to system logs, the frontend will fetch initial states anyway
    socketio.emit('log_message', {'data': 'INFO - WebSocket connection established.'})

@socketio.on('disconnect')
def handle_disconnect():
    logger.info("WebSocket: Client disconnected.")


# ================= 启动应用 =================
if __name__ == '__main__':
    # For local development, you can set environment variables here
    # In production, set these in your server environment
    # !!! IMPORTANT: Replace with your actual AppBuilder and Baidu Cloud API keys !!!
    # 使用您提供的真实的 AppBuilder Token
    os.environ["APPBUILDER_TOKEN"] = os.environ.get("APPBUILDER_TOKEN", "bce-v3/ALTAK-4aM6cuurTA0w0nZlHAR4x/fc6fbe506cd7e6a9a3ef2e12a3454ceba868f703") 
    # 使用您提供的真实的 Access Key ID (AK)
    os.environ["BAIDU_API_KEY"] = os.environ.get("BAIDU_API_KEY", "ALTAKit3MYRfEV0UoqOntaUggR")         
    # 使用您提供的真实的 Secret Access Key (SK)
    os.environ["BAIDU_SECRET_KEY"] = os.environ.get("BAIDU_SECRET_KEY", "469c6dd9696646b99e127c6413d62c77") 

    with app.app_context(): # Create an application context
        if not os.path.exists(SETTINGS_FILE):
            _save_settings(DEFAULT_SETTINGS) # Save defaults if no settings file
            logger.info(f"Created default {SETTINGS_FILE}.")

        db.create_all() # Create database tables if they don't exist
        logger.info("Database tables checked/created.")

        load_yolo_model() # Load the model at startup
        initialize_appbuilder_client() # Initialize AppBuilder client at startup
        get_qianfan_access_token() # Attempt to get Qianfan token at startup as well

        # Populate mock data if DB is empty (for first run)
        if Device.query.count() == 0:
            logger.info("Initializing mock devices...")
            mock_devices = []
            for i in range(1, 6): # Create 5 mock devices
                status_obj = random.choice(DEVICE_STATUS_TYPES)
                location_val = random.choice(CAMPUS_LOCATIONS)
                mock_devices.append(Device(
                    id=f"device_{i}",
                    name=f"摄像头_{i:02d}",
                    ip=f"192.168.1.{100 + i}",
                    location=location_val,
                    status_value=status_obj['value'],
                    status_label=status_obj['label'],
                    status_color=status_obj['color'],
                    lastActive=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    device_type='camera'
                ))
            db.session.add_all(mock_devices)
            db.session.commit()
            logger.info(f"Added {len(mock_devices)} mock devices.")

        if Person.query.count() == 0:
            logger.info("Initializing mock persons...")
            mock_persons = []
            for i in range(1, 11): # Create 10 mock persons
                behavior_obj = random.choice(BEHAVIOR_TYPES)
                location_val = random.choice(CAMPUS_LOCATIONS)
                mock_persons.append(Person(
                    name=f"学生_{i:02d}",
                    studentId=f"S{10000 + i}",
                    department=random.choice(['计算机工程系', '机电系', '化工系', '土木系', '经管系', '艺术设计学院', '外国语学院']),
                    behavior_label=behavior_obj['label'],
                    behavior_value=behavior_obj['value'],
                    behavior_color=behavior_obj['color'],
                    location=location_val,
                    time=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    confidence='1.00' # Assume high confidence for mock data
                ))
            db.session.add_all(mock_persons)
            db.session.commit()
            logger.info(f"Added {len(mock_persons)} mock persons.")

        if HistoryRecord.query.count() == 0:
            logger.info("Initializing mock history records...")
            mock_history = []
            # Define weights for more realistic behavior distribution
            behavior_weights = [ # Corresponds to BEHAVIOR_TYPES order
                0.50, # normal
                0.05, # danger
                0.10, # suspicious
                0.03, # fall
                0.07, # run
                0.05, # crowd
                0.10, # stand
                0.08, # walk
                0.02  # bend
            ]
            if len(behavior_weights) != len(BEHAVIOR_TYPES): # Safety check
                logger.error("Behavior weights length does not match BEHAVIOR_TYPES length. Using uniform distribution.")
                behavior_weights = None

            # Generate a base64 placeholder image (a tiny transparent PNG)
            PLACEHOLDER_B64_IMAGE = ""

            for i in range(20): # Create 20 mock history records
                timestamp = (datetime.datetime.now() - datetime.timedelta(minutes=i*10)).strftime('%Y-%m-%d %H:%M:%S')
                num_persons = random.randint(0, 5)
                loc = random.choice(CAMPUS_LOCATIONS)

                current_persons_data = []
                is_abnormal_occurrence = False
                if num_persons > 0:
                    for p_idx in range(num_persons):
                        behavior_obj = random.choices(BEHAVIOR_TYPES, weights=behavior_weights, k=1)[0]
                        if behavior_obj['value'] not in ['normal', 'stand', 'walk']: # Check for common abnormal behaviors
                            is_abnormal_occurrence = True
                        current_persons_data.append({
                            "id": f"mock_person_{i}_{p_idx}",
                            "confidence": f"{random.uniform(0.7, 0.99):.2f}",
                            "behavior": behavior_obj,
                            "safety_info": SAFETY_INFO.get(behavior_obj['label'], SAFETY_INFO['未知']),
                            "box_coords": [random.randint(10,50),random.randint(10,50),random.randint(100,150),random.randint(150,250)], # Random box
                            "keypoints_coords": [] # Empty for mock data
                        })

                # Simulate crowd detection for some records
                if num_persons >= 4 and random.random() < 0.3: # 30% chance if 4 or more people
                    is_abnormal_occurrence = True
                    if '聚集' not in [p['behavior']['label'] for p in current_persons_data]:
                        behavior_for_record = get_behavior_obj_by_label('聚集')
                        current_persons_data.append({
                            "id": f"mock_person_{i}_crowd", # Special ID for scenario
                            "confidence": "0.95",
                            "behavior": behavior_for_record,
                            "safety_info": SAFETY_INFO['聚集'],
                            "box_coords": [10,10,200,300], # Example crowd box
                            "keypoints_coords": []
                        })

                safety_text = "未进行AI分析。"
                error_msg = None
                record_type = 'detection'

                # Simulate AI analysis for mock data only if AppBuilder is available and enabled
                if APPBUILDER_SDK_AVAILABLE and current_settings.get('ai_interpret_results', True) and os.environ.get("APPBUILDER_TOKEN"):
                    # For mock data, we'll call the agent if it's available
                    mock_prompt = f"场景位于{loc}，检测到{num_persons}人，行为包括：{', '.join([p['behavior']['label'] for p in current_persons_data if p['behavior']['label'] != '未知'])}。请分析安全态势。"
                    mock_analysis_result = call_appbuilder_agent(mock_prompt, None) # Use temp conversation
                    if "AI助手服务错误" in mock_analysis_result or "AI助手调用失败" in mock_analysis_result:
                        error_msg = f"AI分析服务错误: {mock_analysis_result}"
                        safety_text = "AI分析失败。请检查AppBuilder配置。"
                    else:
                        safety_text = mock_analysis_result[:200] + "..." if len(mock_analysis_result) > 200 else mock_analysis_result
                elif not APPBUILDER_SDK_AVAILABLE:
                    safety_text = "AI分析功能需要AppBuilder SDK，但SDK未就绪。"
                    error_msg = "AppBuilder SDK not available."
                elif not os.environ.get("APPBUILDER_TOKEN"):
                    safety_text = "AI分析功能需要APPBUILDER_TOKEN环境变量，但未设置。"
                    error_msg = "APPBUILDER_TOKEN not set."
                else: # AI interpretation disabled in settings
                    safety_text = "AI分析功能已在系统设置中关闭。"


                mock_history.append(HistoryRecord(
                    timestamp=timestamp,
                    person_count=num_persons,
                    original_image_b64=PLACEHOLDER_B64_IMAGE,
                    result_image_b64=PLACEHOLDER_B64_IMAGE,
                    safety_analysis=safety_text,
                    error_message=error_msg, # Store potential error from AppBuilder
                    persons_json=json.dumps(current_persons_data),
                    device_id=random.choice([d.id for d in Device.query.all()]) if Device.query.count() > 0 else None,
                    location=loc,
                    record_type=record_type
                ))

            # Add some mock image interpretation records as well
            for i in range(5):
                timestamp = (datetime.datetime.now() - datetime.timedelta(minutes=i*10 + 1)).strftime('%Y-%m-%d %H:%M:%S')
                mock_interpretation_result = random.choice([
                    "图片显示一个校园场景，有学生在草坪上。环境宜人。",
                    "图像是图书馆内部，看起来很安静，没有异常。",
                    "这张图片捕捉到校门口的交通情况，人流车流正常有序。"
                ])
                mock_history.append(HistoryRecord(
                    timestamp=timestamp,
                    person_count=0,
                    original_image_b64=PLACEHOLDER_B64_IMAGE,
                    result_image_b64=PLACEHOLDER_B64_IMAGE,
                    safety_analysis=f"图像理解：问题“图片里有什么？”\n结果：{mock_interpretation_result}",
                    error_message=None,
                    persons_json="[]",
                    device_id=None,
                    location="多模态图像理解",
                    record_type='image_interpretation'
                ))

            db.session.add_all(mock_history)
            db.session.commit()
            logger.info(f"Added {len(mock_history)} mock history records.")

    logger.info("系统启动中...")
    socketio.run(
        app,
        host='0.0.0.0',
        port=8000, # Standard port
        debug=True, # Enable debug mode for development
        allow_unsafe_werkzeug=True # Necessary for newer Werkzeug versions with SocketIO
    )