"""
仪表板模块
提供前端页面和统计看板功能
"""
from flask import Blueprint, render_template, request, jsonify
from flask_login import login_required, current_user
from datetime import datetime, timedelta
import sys
import os

# 添加项目根目录到路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))

from services.shared.models import db
from services.shared.models.user import User
from services.shared.models.task import Task, TaskExecution
from services.shared.models.system import SystemLog
from services.shared.models.enums import TaskStatus, TaskType
from services.shared.utils import format_duration, create_response
from services.shared.database import get_db_stats
from services.shared.log_utils import log_system_event
from services.shared.logger import get_logger
from services.web.auth import admin_required

# 初始化日志记录器
logger = get_logger('web.dashboard')

# 创建蓝图
dashboard_bp = Blueprint('dashboard', __name__)


@dashboard_bp.route('/overview')
@login_required
def overview():
    """概览页面"""
    logger.info(f"用户 {current_user.username} 访问概览页面 - IP: {request.remote_addr}")
    return render_template('dashboard/overview.html')


@dashboard_bp.route('/tasks')
@login_required
def tasks():
    """任务管理页面"""
    logger.info(f"用户 {current_user.username} 访问任务管理页面 - IP: {request.remote_addr}")
    return render_template('dashboard/tasks.html')


@dashboard_bp.route('/tasks/new')
@login_required
def new_task():
    """新建任务页面"""
    logger.info(f"用户 {current_user.username} 访问新建任务页面 - IP: {request.remote_addr}")
    return render_template('dashboard/task_form.html', task=None)


@dashboard_bp.route('/tasks/<int:task_id>/edit')
@login_required
def edit_task(task_id):
    """编辑任务页面"""
    logger.info(f"用户 {current_user.username} 访问编辑任务页面 - 任务ID: {task_id} - IP: {request.remote_addr}")
    task = Task.query.get_or_404(task_id)
    
    # 权限检查 - 所有用户只能编辑自己的任务
    if task.user_id != current_user.id:
        logger.warning(f"用户 {current_user.username} 尝试编辑非自己的任务 - 任务ID: {task_id} - IP: {request.remote_addr}")
        return render_template('errors/403.html'), 403
    
    # 将任务对象转换为字典，确保environment_variables和dependencies正确解析
    task_dict = task.to_dict()
    
    return render_template('dashboard/task_form.html', task=task_dict)


@dashboard_bp.route('/tasks/<int:task_id>/detail')
@login_required
def task_detail(task_id):
    """任务详情页面"""
    logger.info(f"用户 {current_user.username} 访问任务详情页面 - 任务ID: {task_id} - IP: {request.remote_addr}")
    task = Task.query.get_or_404(task_id)
    
    # 权限检查 - 所有用户只能查看自己的任务
    if task.user_id != current_user.id:
        logger.warning(f"用户 {current_user.username} 尝试查看非自己的任务详情 - 任务ID: {task_id} - IP: {request.remote_addr}")
        return render_template('errors/403.html'), 403
    
    return render_template('dashboard/task_detail.html', task=task)


@dashboard_bp.route('/executions')
@login_required
def executions():
    """执行记录页面"""
    logger.info(f"用户 {current_user.username} 访问执行记录页面 - IP: {request.remote_addr}")
    return render_template('dashboard/executions.html')


@dashboard_bp.route('/scripts')
@login_required
def scripts():
    """脚本管理页面"""
    logger.info(f"用户 {current_user.username} 访问脚本管理页面 - IP: {request.remote_addr}")
    return render_template('dashboard/scripts.html')


@dashboard_bp.route('/script-editor/<int:script_id>')
@login_required
def script_editor(script_id):
    """脚本编辑器页面"""
    logger.info(f"用户 {current_user.username} 访问脚本编辑器页面 - 脚本ID: {script_id} - IP: {request.remote_addr}")
    return render_template('dashboard/script_editor.html', script_id=script_id)


@dashboard_bp.route('/recycle')
@login_required
def recycle():
    """回收站管理页面"""
    logger.info(f"用户 {current_user.username} 访问回收站管理页面 - IP: {request.remote_addr}")
    return render_template('dashboard/recycle.html')


@dashboard_bp.route('/dependencies')
@login_required
def dependencies():
    """依赖管理页面"""
    logger.info(f"用户 {current_user.username} 访问依赖管理页面 - IP: {request.remote_addr}")
    return render_template('dashboard/dependencies.html')


@dashboard_bp.route('/logs')
@login_required
@admin_required
def logs():
    """系统日志页面（管理员专用）"""
    logger.info(f"管理员 {current_user.username} 访问系统日志页面 - IP: {request.remote_addr}")
    from sqlalchemy import func
    
    # 获取日志统计数据
    query = SystemLog.query
    
    # 按级别统计
    level_stats = db.session.query(
        SystemLog.level,
        func.count(SystemLog.id).label('count')
    ).group_by(SystemLog.level).all()
    
    # 计算各级别日志数量
    total_logs = query.count()
    error_logs = 0
    warning_logs = 0
    
    for stat in level_stats:
        if stat.level == 'ERROR':
            error_logs = stat.count
        elif stat.level == 'WARNING':
            warning_logs = stat.count
    
    # 传递统计数据到模板
    stats = {
        'total_logs': total_logs,
        'error_logs': error_logs,
        'warning_logs': warning_logs
    }
    
    return render_template('dashboard/logs.html', stats=stats)


@dashboard_bp.route('/users')
@login_required
@admin_required
def users():
    """用户管理页面（管理员专用）"""
    logger.info(f"管理员 {current_user.username} 访问用户管理页面 - IP: {request.remote_addr}")
    return render_template('dashboard/users.html')


@dashboard_bp.route('/settings')
@login_required
@admin_required
def settings():
    """设置页面（管理员专用）"""
    logger.info(f"管理员 {current_user.username} 访问设置页面 - IP: {request.remote_addr}")
    return render_template('dashboard/settings.html')


@dashboard_bp.route('/about')
@login_required
def about():
    """关于项目页面"""
    logger.info(f"用户 {current_user.username} 访问关于项目页面 - IP: {request.remote_addr}")
    return render_template('dashboard/about.html')


@dashboard_bp.route('/notification-config')
@login_required
def notification_config():
    """通知配置页面"""
    logger.info(f"用户 {current_user.username} 访问通知配置页面 - IP: {request.remote_addr}")
    return render_template('dashboard/notification_config.html')


@dashboard_bp.route('/global-parameters')
@login_required
def global_parameters():
    """公共参数配置页面"""
    logger.info(f"用户 {current_user.username} 访问公共参数配置页面 - IP: {request.remote_addr}")
    return render_template('dashboard/global_parameters.html')


# API接口用于前端AJAX调用
@dashboard_bp.route('/api/stats/dashboard')
@login_required
def api_dashboard_stats():
    """仪表板统计数据"""
    logger.debug(f"用户 {current_user.username} 请求仪表板统计数据 - IP: {request.remote_addr}")
    from sqlalchemy import func, and_
    
    # 时间范围 - 使用本地时间
    now = datetime.now()
    today = now.replace(hour=0, minute=0, second=0, microsecond=0)
    week_ago = today - timedelta(days=7)
    month_ago = today - timedelta(days=30)
    
    # 构建基础查询 - 所有用户只能查看自己的数据
    task_query = Task.query.filter_by(user_id=current_user.id)
    execution_query = TaskExecution.query.join(Task).filter(Task.user_id == current_user.id)
    
    # 基础统计
    stats = {
        'total_tasks': task_query.count(),
        'active_tasks': task_query.filter_by(is_enabled=True).count(),
        'total_executions': execution_query.count(),
        'today_executions': execution_query.filter(TaskExecution.started_at >= today).count()
    }
    
    # 执行状态统计
    status_stats = {}
    for status in TaskStatus:
        count = execution_query.filter(TaskExecution.status == status).count()
        status_stats[status.value] = count
    
    # 任务类型统计
    type_stats = {}
    for task_type in TaskType:
        count = task_query.filter_by(task_type=task_type).count()
        type_stats[task_type.value] = count
    
    # 最近7天执行趋势
    daily_stats = []
    for i in range(7):
        date = today - timedelta(days=i)
        next_date = date + timedelta(days=1)
        
        day_executions = execution_query.filter(
            and_(
                TaskExecution.started_at >= date,
                TaskExecution.started_at < next_date
            )
        ).count()
        
        day_success = execution_query.filter(
            and_(
                TaskExecution.started_at >= date,
                TaskExecution.started_at < next_date,
                TaskExecution.status == TaskStatus.SUCCESS
            )
        ).count()
        
        daily_stats.append({
            'date': date.strftime('%Y-%m-%d'),
            'executions': day_executions,
            'success': day_success
        })
    
    daily_stats.reverse()  # 按时间正序
    
    # 最近执行记录
    recent_executions = execution_query.order_by(TaskExecution.started_at.desc()).limit(10).all()
    
    # 即将执行的任务 - 获取所有启用的任务，然后实时计算下次执行时间
    enabled_tasks = task_query.filter(Task.is_enabled == True).all()
    upcoming_tasks = []
    
    # 导入计算下次执行时间的工具函数
    from services.shared.utils import get_next_run_time
    
    # 为每个启用的任务计算下次执行时间
    task_with_next_run = []
    for task in enabled_tasks:
        if task.cron_expression:
            next_run = get_next_run_time(task.cron_expression, now)
            if next_run and next_run > now:
                task_with_next_run.append({
                    'task': task,
                    'next_run_at': next_run
                })
    
    # 按下次执行时间排序，取前5个
    task_with_next_run.sort(key=lambda x: x['next_run_at'])
    upcoming_tasks = [item['task'] for item in task_with_next_run[:5]]
    
    # 系统信息
    system_info = {
        'python_version': sys.version.split()[0],
        'database_stats': get_db_stats()
    }
    
    # 安全序列化最近执行记录
    serialized_recent_executions = []
    for execution in recent_executions:
        duration_str = None
        if execution.duration:
            if isinstance(execution.duration, timedelta):
                # 转换timedelta为秒数
                duration_str = str(int(execution.duration.total_seconds()))
            else:
                duration_str = str(execution.duration)
        
        # 获取关联的任务名称
        task_name = 'Unknown'
        if execution.task:
            task_name = execution.task.name
        
        serialized_recent_executions.append({
            'id': execution.id,
            'task_id': execution.task_id,
            'task_name': task_name,
            'status': execution.status.value if execution.status else 'unknown',
            'started_at': execution.started_at.isoformat() if execution.started_at else None,
            'completed_at': execution.completed_at.isoformat() if execution.completed_at else None,
            'duration': duration_str
        })
    
    # 安全序列化即将执行的任务
    serialized_upcoming_tasks = []
    for task in upcoming_tasks:
        # 重新计算下次执行时间以确保准确性
        next_run = get_next_run_time(task.cron_expression, now) if task.cron_expression else None
        serialized_upcoming_tasks.append({
            'id': task.id,
            'name': task.name,
            'task_type': task.task_type.value if task.task_type else 'unknown',
            'next_run_at': next_run.isoformat() if next_run else None,
            'is_enabled': task.is_enabled
        })
    
    return jsonify(create_response(True, '获取成功', {
        'basic_stats': stats,
        'status_stats': status_stats,
        'type_stats': type_stats,
        'daily_trend': daily_stats,
        'recent_executions': serialized_recent_executions,
        'upcoming_tasks': serialized_upcoming_tasks,
        'system_info': system_info
    }))


@dashboard_bp.route('/api/logs')
@login_required
def api_get_logs():
    """获取系统日志"""
    logger.debug(f"用户 {current_user.username} 请求系统日志 - IP: {request.remote_addr}")
    
    try:
        # 参数验证和默认值
        page = max(1, request.args.get('page', 1, type=int))
        per_page = min(100, max(10, request.args.get('per_page', 20, type=int)))  # 限制每页最大100条
        level = request.args.get('level')
        module = request.args.get('module')
        action = request.args.get('action')
        resource_type = request.args.get('resource_type')
        severity = request.args.get('severity', type=int)
        date = request.args.get('date')
        search = request.args.get('search')
        tags = request.args.get('tags')
        start_date = request.args.get('start_date')
        end_date = request.args.get('end_date')
        
        # 构建基础查询，使用索引优化
        query = SystemLog.query
        
        # 非管理员只能看自己的日志
        if not current_user.is_admin:
            logger.debug(f"非管理员用户 {current_user.username} 只能查看自己的日志")
            query = query.filter(SystemLog.user_id == current_user.id)
        
        # 优化过滤条件顺序，先应用索引字段
        if level:
            query = query.filter(SystemLog.level == level)
        
        if module:
            query = query.filter(SystemLog.module == module)
        
        if action:
            query = query.filter(SystemLog.action == action)
        
        if resource_type:
            query = query.filter(SystemLog.resource_type == resource_type)
        
        if severity is not None:
            query = query.filter(SystemLog.severity >= severity)
        
        # 日期范围查询优化
        if start_date and end_date:
            try:
                from datetime import datetime as dt
                start_dt = dt.strptime(start_date, '%Y-%m-%d')
                end_dt = dt.strptime(end_date, '%Y-%m-%d').replace(hour=23, minute=59, second=59)
                query = query.filter(
                    SystemLog.created_at >= start_dt,
                    SystemLog.created_at <= end_dt
                )
            except ValueError:
                logger.warning(f"无效的日期范围: {start_date} - {end_date}")
        elif date:
            try:
                from datetime import datetime as dt
                filter_date = dt.strptime(date, '%Y-%m-%d').date()
                query = query.filter(
                    db.func.date(SystemLog.created_at) == filter_date
                )
            except ValueError:
                logger.warning(f"无效的日期格式: {date}")
        
        # 搜索优化 - 限制搜索长度
        if search and len(search.strip()) >= 2:
            search_term = search.strip()[:100]  # 限制搜索词长度
            query = query.filter(
                db.or_(
                    SystemLog.message.contains(search_term),
                    SystemLog.details.contains(search_term)
                )
            )
        
        # 标签搜索优化
        if tags:
            tags_term = tags.strip()[:50]  # 限制标签长度
            query = query.filter(
                SystemLog.tags.contains(f'"{tags_term}"')
            )
        
        # 执行分页查询，使用索引排序
        pagination = query.order_by(SystemLog.created_at.desc())\
            .paginate(page=page, per_page=per_page, error_out=False)
        
        # 优化数据序列化
        logs = []
        for log in pagination.items:
            log_dict = {
                'id': log.id,
                'level': log.level,
                'module': log.module,
                'message': log.message[:500] if log.message else '',  # 限制消息长度
                'action': log.action,
                'resource_type': log.resource_type,
                'resource_id': log.resource_id,
                'severity': log.severity,
                'execution_time': log.execution_time,
                'status_code': log.status_code,
                'timestamp': log.created_at.isoformat(),
                'username': log.user.username if log.user else None,
                'ip_address': log.ip_address,
                'tags': log.tags
            }
            logs.append(log_dict)
        
        return jsonify(create_response(True, '获取成功', {
            'logs': logs,
            'pagination': {
                'page': page,
                'per_page': per_page,
                'total': pagination.total,
                'pages': pagination.pages,
                'has_prev': pagination.has_prev,
                'has_next': pagination.has_next
            },
            'filters_applied': {
                'level': level,
                'module': module,
                'action': action,
                'resource_type': resource_type,
                'severity': severity,
                'date': date,
                'search': search,
                'tags': tags
            }
        }))
        
    except Exception as e:
        logger.error(f"获取日志失败: {str(e)}", exc_info=True)
        return jsonify(create_response(False, f'获取日志失败: {str(e)}', {})), 500


@dashboard_bp.route('/api/logs/<int:log_id>')
@login_required
def api_get_log_detail(log_id):
    """获取日志详情"""
    logger.debug(f"用户 {current_user.username} 请求日志详情 - 日志ID: {log_id} - IP: {request.remote_addr}")
    
    try:
        log = SystemLog.query.get_or_404(log_id)
        
        # 权限检查
        if not current_user.is_admin and log.user_id != current_user.id:
            logger.warning(f"用户 {current_user.username} 尝试访问无权限的日志详情 - 日志ID: {log_id}")
            return jsonify(create_response(False, '权限不足')), 403
        
        log_dict = log.to_dict()
        log_dict['timestamp'] = log.created_at.isoformat()
        if log.user:
            log_dict['username'] = log.user.username
        else:
            log_dict['username'] = None
        
        return jsonify(create_response(True, '获取成功', log_dict))
        
    except Exception as e:
        logger.error(f"获取日志详情失败: {str(e)}", exc_info=True)
        return jsonify(create_response(False, f'获取日志详情失败: {str(e)}', {})), 500


@dashboard_bp.route('/api/logs/batch', methods=['POST'])
@login_required
@admin_required
def api_batch_log_operations():
    """批量日志操作（仅管理员）"""
    logger.info(f"管理员 {current_user.username} 请求批量日志操作 - IP: {request.remote_addr}")
    
    try:
        data = request.get_json()
        if not data:
            return jsonify(create_response(False, '请求数据不能为空', {})), 400
        
        operation = data.get('operation')
        log_ids = data.get('log_ids', [])
        filters = data.get('filters', {})
        
        if not operation:
            return jsonify(create_response(False, '操作类型不能为空', {})), 400
        
        if operation == 'delete':
            # 批量删除日志
            if log_ids:
                # 按ID删除
                deleted_count = SystemLog.query.filter(SystemLog.id.in_(log_ids)).delete(synchronize_session=False)
            else:
                # 按条件删除
                query = SystemLog.query
                if filters.get('level'):
                    query = query.filter(SystemLog.level == filters['level'])
                if filters.get('module'):
                    query = query.filter(SystemLog.module == filters['module'])
                if filters.get('before_date'):
                    from datetime import datetime as dt
                    before_date = dt.strptime(filters['before_date'], '%Y-%m-%d')
                    query = query.filter(SystemLog.created_at < before_date)
                
                deleted_count = query.delete(synchronize_session=False)
            
            db.session.commit()
            log_system_event('INFO', 'web.dashboard', f'管理员 {current_user.username} 批量删除了 {deleted_count} 条日志')
            
            return jsonify(create_response(True, f'成功删除 {deleted_count} 条日志', {'deleted_count': deleted_count}))
        
        elif operation == 'export':
            # 批量导出日志
            query = SystemLog.query
            if log_ids:
                query = query.filter(SystemLog.id.in_(log_ids))
            else:
                # 应用筛选条件
                if filters.get('level'):
                    query = query.filter(SystemLog.level == filters['level'])
                if filters.get('module'):
                    query = query.filter(SystemLog.module == filters['module'])
                if filters.get('start_date') and filters.get('end_date'):
                    from datetime import datetime as dt
                    start_date = dt.strptime(filters['start_date'], '%Y-%m-%d')
                    end_date = dt.strptime(filters['end_date'], '%Y-%m-%d').replace(hour=23, minute=59, second=59)
                    query = query.filter(SystemLog.created_at >= start_date, SystemLog.created_at <= end_date)
            
            logs = query.order_by(SystemLog.created_at.desc()).limit(10000).all()
            
            # 生成导出数据
            export_data = []
            for log in logs:
                export_data.append({
                    'id': log.id,
                    'timestamp': log.created_at.isoformat(),
                    'level': log.level,
                    'module': log.module,
                    'action': log.action,
                    'message': log.message,
                    'username': log.user.username if log.user else '系统',
                    'ip_address': log.ip_address,
                    'resource_type': log.resource_type,
                    'resource_id': log.resource_id
                })
            
            return jsonify(create_response(True, f'成功导出 {len(export_data)} 条日志', {'logs': export_data}))
        
        else:
            return jsonify(create_response(False, '不支持的操作类型', {})), 400
            
    except Exception as e:
        logger.error(f"批量日志操作失败: {str(e)}", exc_info=True)
        db.session.rollback()
        return jsonify(create_response(False, f'批量操作失败: {str(e)}', {})), 500


@dashboard_bp.route('/api/logs/search/advanced', methods=['POST'])
@login_required
def api_advanced_log_search():
    """高级日志搜索"""
    logger.debug(f"用户 {current_user.username} 请求高级日志搜索 - IP: {request.remote_addr}")
    
    try:
        data = request.get_json()
        if not data:
            return jsonify(create_response(False, '搜索条件不能为空', {})), 400
        
        # 基础查询
        query = SystemLog.query
        
        # 权限控制
        if not current_user.is_admin:
            query = query.filter(SystemLog.user_id == current_user.id)
        
        # 高级搜索条件
        conditions = data.get('conditions', [])
        logic_operator = data.get('logic_operator', 'AND')  # AND 或 OR
        
        from sqlalchemy import and_, or_
        
        search_filters = []
        for condition in conditions:
            field = condition.get('field')
            operator = condition.get('operator')
            value = condition.get('value')
            
            if not all([field, operator, value]):
                continue
            
            # 构建搜索条件
            if field == 'message':
                if operator == 'contains':
                    search_filters.append(SystemLog.message.contains(value))
                elif operator == 'equals':
                    search_filters.append(SystemLog.message == value)
                elif operator == 'starts_with':
                    search_filters.append(SystemLog.message.startswith(value))
            elif field == 'level':
                if operator == 'equals':
                    search_filters.append(SystemLog.level == value)
                elif operator == 'in':
                    search_filters.append(SystemLog.level.in_(value if isinstance(value, list) else [value]))
            elif field == 'module':
                if operator == 'equals':
                    search_filters.append(SystemLog.module == value)
                elif operator == 'contains':
                    search_filters.append(SystemLog.module.contains(value))
            elif field == 'created_at':
                from datetime import datetime as dt
                if operator == 'after':
                    date_value = dt.strptime(value, '%Y-%m-%d %H:%M:%S')
                    search_filters.append(SystemLog.created_at > date_value)
                elif operator == 'before':
                    date_value = dt.strptime(value, '%Y-%m-%d %H:%M:%S')
                    search_filters.append(SystemLog.created_at < date_value)
                elif operator == 'between':
                    start_date = dt.strptime(value[0], '%Y-%m-%d %H:%M:%S')
                    end_date = dt.strptime(value[1], '%Y-%m-%d %H:%M:%S')
                    search_filters.append(and_(SystemLog.created_at >= start_date, SystemLog.created_at <= end_date))
            elif field == 'severity':
                if operator == 'equals':
                    search_filters.append(SystemLog.severity == int(value))
                elif operator == 'greater_than':
                    search_filters.append(SystemLog.severity > int(value))
                elif operator == 'less_than':
                    search_filters.append(SystemLog.severity < int(value))
        
        # 应用搜索条件
        if search_filters:
            if logic_operator == 'OR':
                query = query.filter(or_(*search_filters))
            else:
                query = query.filter(and_(*search_filters))
        
        # 分页参数
        page = data.get('page', 1)
        per_page = min(100, max(10, data.get('per_page', 20)))
        
        # 排序
        sort_field = data.get('sort_field', 'created_at')
        sort_order = data.get('sort_order', 'desc')
        
        if sort_field == 'created_at':
            if sort_order == 'asc':
                query = query.order_by(SystemLog.created_at.asc())
            else:
                query = query.order_by(SystemLog.created_at.desc())
        elif sort_field == 'level':
            if sort_order == 'asc':
                query = query.order_by(SystemLog.level.asc())
            else:
                query = query.order_by(SystemLog.level.desc())
        
        # 执行查询
        pagination = query.paginate(page=page, per_page=per_page, error_out=False)
        
        # 构建结果
        logs = []
        for log in pagination.items:
            log_dict = {
                'id': log.id,
                'level': log.level,
                'module': log.module,
                'message': log.message[:500] if log.message else '',
                'action': log.action,
                'resource_type': log.resource_type,
                'severity': log.severity,
                'timestamp': log.created_at.isoformat(),
                'username': log.user.username if log.user else None,
                'ip_address': log.ip_address
            }
            logs.append(log_dict)
        
        return jsonify(create_response(True, '搜索成功', {
            'logs': logs,
            'pagination': {
                'page': page,
                'per_page': per_page,
                'total': pagination.total,
                'pages': pagination.pages,
                'has_prev': pagination.has_prev,
                'has_next': pagination.has_next
            },
            'search_summary': {
                'conditions_count': len(conditions),
                'logic_operator': logic_operator,
                'results_count': pagination.total
            }
        }))
        
    except Exception as e:
        logger.error(f"高级日志搜索失败: {str(e)}", exc_info=True)
        return jsonify(create_response(False, f'搜索失败: {str(e)}', {})), 500


@dashboard_bp.route('/api/logs/clear', methods=['POST'])
@login_required
@admin_required
def api_clear_logs():
    """清理系统日志（仅管理员）"""
    logger.warning(f"管理员 {current_user.username} 请求清理系统日志 - IP: {request.remote_addr}")
    data = request.get_json()
    clear_type = data.get('type', '7')
    
    try:
        from datetime import timedelta
        
        if clear_type == 'all':
            # 清理所有日志
            logger.warning(f"管理员 {current_user.username} 清理所有系统日志")
            deleted_count = SystemLog.query.delete()
        else:
            # 清理指定天数前的日志
            days = int(clear_type)
            logger.warning(f"管理员 {current_user.username} 清理 {days} 天前的系统日志")
            cutoff_date = datetime.utcnow() - timedelta(days=days)
            deleted_count = SystemLog.query.filter(
                SystemLog.created_at < cutoff_date
            ).delete()
        
        db.session.commit()
        logger.info(f"管理员 {current_user.username} 成功清理了 {deleted_count} 条系统日志")
        
        # 记录清理操作的系统事件日志（使用独立的异常处理，避免影响主要操作）
        try:
            log_system_event(
                level='INFO',
                module='web.dashboard',
                message=f'管理员 {current_user.username} 清理了 {deleted_count} 条日志记录',
                details=f'清理类型: {clear_type}, 清理数量: {deleted_count}',
                action='CLEAR_LOGS',
                resource_type='SYSTEM_LOG',
                tags=['admin_operation', 'log_cleanup']
            )
        except Exception as log_error:
            # 日志记录失败不应该影响主要的清理操作
            logger.warning(f"记录清理操作日志失败: {log_error}")
        
        return jsonify(create_response(True, f'成功清理 {deleted_count} 条日志记录'))
        
    except Exception as e:
        db.session.rollback()
        return jsonify(create_response(False, f'清理日志失败: {str(e)}')), 500


@dashboard_bp.route('/api/logs/stats')
@login_required
def api_log_stats():
    """获取日志统计信息"""
    logger.debug(f"用户 {current_user.username} 请求日志统计信息 - IP: {request.remote_addr}")
    
    try:
        from sqlalchemy import func, and_
        
        base_query = SystemLog.query
        
        # 非管理员只能看自己的统计
        if not current_user.is_admin:
            base_query = base_query.filter(SystemLog.user_id == current_user.id)
        
        # 获取时间范围
        now = datetime.now()
        today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
        week_start = now - timedelta(days=7)
        month_start = now - timedelta(days=30)
        
        # 基础统计
        total_logs = base_query.count()
        today_logs = base_query.filter(SystemLog.created_at >= today_start).count()
        week_logs = base_query.filter(SystemLog.created_at >= week_start).count()
        month_logs = base_query.filter(SystemLog.created_at >= month_start).count()
        
        # 按级别统计（优化查询）
        level_stats_query = db.session.query(
            SystemLog.level,
            func.count(SystemLog.id).label('count')
        )
        if not current_user.is_admin:
            level_stats_query = level_stats_query.filter(SystemLog.user_id == current_user.id)
        level_stats = level_stats_query.group_by(SystemLog.level).all()
        
        # 按模块统计（限制前10个）
        module_stats_query = db.session.query(
            SystemLog.module,
            func.count(SystemLog.id).label('count')
        )
        if not current_user.is_admin:
            module_stats_query = module_stats_query.filter(SystemLog.user_id == current_user.id)
        module_stats = module_stats_query.group_by(SystemLog.module)\
            .order_by(func.count(SystemLog.id).desc()).limit(10).all()
        
        # 按操作类型统计
        action_stats_query = db.session.query(
            SystemLog.action,
            func.count(SystemLog.id).label('count')
        ).filter(SystemLog.action.isnot(None))
        if not current_user.is_admin:
            action_stats_query = action_stats_query.filter(SystemLog.user_id == current_user.id)
        action_stats = action_stats_query.group_by(SystemLog.action)\
            .order_by(func.count(SystemLog.id).desc()).limit(10).all()
        
        # 按资源类型统计
        resource_stats_query = db.session.query(
            SystemLog.resource_type,
            func.count(SystemLog.id).label('count')
        ).filter(SystemLog.resource_type.isnot(None))
        if not current_user.is_admin:
            resource_stats_query = resource_stats_query.filter(SystemLog.user_id == current_user.id)
        resource_stats = resource_stats_query.group_by(SystemLog.resource_type)\
            .order_by(func.count(SystemLog.id).desc()).limit(10).all()
        
        # 错误统计
        error_levels = ['ERROR', 'CRITICAL', 'ALERT', 'EMERGENCY']
        error_query = base_query.filter(SystemLog.level.in_(error_levels))
        error_logs = error_query.count()
        recent_errors = error_query.filter(SystemLog.created_at >= today_start).count()
        
        # 警告统计
        warning_query = base_query.filter(SystemLog.level == 'WARNING')
        warning_logs = warning_query.count()
        recent_warnings = warning_query.filter(SystemLog.created_at >= today_start).count()
        
        # 最近7天日志量趋势（优化查询）
        trend_data = []
        for i in range(7):
            date = now - timedelta(days=6-i)
            next_date = date + timedelta(days=1)
            
            day_query = base_query.filter(
                and_(
                    SystemLog.created_at >= date.replace(hour=0, minute=0, second=0, microsecond=0),
                    SystemLog.created_at < next_date.replace(hour=0, minute=0, second=0, microsecond=0)
                )
            )
            
            day_count = day_query.count()
            day_errors = day_query.filter(SystemLog.level.in_(error_levels)).count()
            day_warnings = day_query.filter(SystemLog.level == 'WARNING').count()
            
            trend_data.append({
                'date': date.strftime('%Y-%m-%d'),
                'total': day_count,
                'errors': day_errors,
                'warnings': day_warnings
            })
        
        # 热门用户（仅管理员可见）
        top_users = []
        if current_user.is_admin:
            top_users_query = db.session.query(
                User.username,
                func.count(SystemLog.id).label('count')
            ).join(SystemLog, User.id == SystemLog.user_id)\
             .group_by(User.username)\
             .order_by(func.count(SystemLog.id).desc()).limit(5)
            top_users = [{'username': user.username, 'count': user.count} for user in top_users_query.all()]
        
        # 性能统计（执行时间）
        avg_execution_time = db.session.query(func.avg(SystemLog.execution_time))\
            .filter(and_(
                SystemLog.execution_time.isnot(None),
                SystemLog.user_id == current_user.id if not current_user.is_admin else True
            )).scalar()
        
        return jsonify(create_response(True, '获取成功', {
            'summary': {
                'total_logs': total_logs,
                'today_logs': today_logs,
                'week_logs': week_logs,
                'month_logs': month_logs,
                'error_logs': error_logs,
                'warning_logs': warning_logs,
                'recent_errors': recent_errors,
                'recent_warnings': recent_warnings,
                'avg_execution_time': round(avg_execution_time, 2) if avg_execution_time else None
            },
            'level_stats': [{'level': stat.level, 'count': stat.count} for stat in level_stats],
            'module_stats': [{'module': stat.module, 'count': stat.count} for stat in module_stats],
            'action_stats': [{'action': stat.action, 'count': stat.count} for stat in action_stats],
            'resource_stats': [{'resource_type': stat.resource_type, 'count': stat.count} for stat in resource_stats],
            'trend_data': trend_data,
            'top_users': top_users
        }))
        
    except Exception as e:
        logger.error(f"获取日志统计失败: {str(e)}", exc_info=True)
        return jsonify(create_response(False, f'获取统计信息失败: {str(e)}', {})), 500





@dashboard_bp.route('/api/logs/export')
@login_required
def api_export_logs():
    """导出系统日志"""
    logger.info(f"用户 {current_user.username} 请求导出系统日志 - IP: {request.remote_addr}")
    from flask import make_response
    import csv
    import io
    
    # 获取过滤条件
    level = request.args.get('level')
    module = request.args.get('module')
    date = request.args.get('date')
    search = request.args.get('search')
    
    query = SystemLog.query
    
    # 非管理员只能导出自己的日志
    if not current_user.is_admin:
        logger.debug(f"非管理员用户 {current_user.username} 只能导出自己的日志")
        query = query.filter_by(user_id=current_user.id)
    
    # 应用过滤条件
    if level:
        query = query.filter_by(level=level)
    
    if module:
        query = query.filter_by(module=module)
    
    if date:
        try:
            from datetime import datetime as dt
            filter_date = dt.strptime(date, '%Y-%m-%d').date()
            query = query.filter(
                db.func.date(SystemLog.created_at) == filter_date
            )
        except ValueError:
            pass
    
    if search:
        query = query.filter(
            SystemLog.message.contains(search)
        )
    
    # 限制导出数量（最多10000条）
    logs = query.order_by(SystemLog.created_at.desc()).limit(10000).all()
    
    # 创建 CSV 数据
    output = io.StringIO()
    writer = csv.writer(output)
    
    # 写入标题行
    writer.writerow(['时间', '级别', '模块', '用户', 'IP地址', '消息', '详细信息'])
    
    # 写入数据行
    for log in logs:
        writer.writerow([
            log.created_at.strftime('%Y-%m-%d %H:%M:%S'),
            log.level,
            log.module,
            log.user.username if log.user else '系统',
            log.ip_address or '',
            log.message,
            log.details or ''
        ])
    
    # 创建响应
    output.seek(0)
    response = make_response(output.getvalue())
    response.headers['Content-Type'] = 'text/csv; charset=utf-8-sig'  # 支持中文
    response.headers['Content-Disposition'] = f'attachment; filename=system_logs_{datetime.now().strftime("%Y%m%d_%H%M%S")}.csv'
    
    return response


@dashboard_bp.route('/api/system/status')
@login_required
def api_system_status():
    """获取系统状态"""
    logger.debug(f"用户 {current_user.username} 请求系统状态信息 - IP: {request.remote_addr}")
    try:
        from services.shared.utils import get_system_info
        from services.shared.service_client import get_service_registry
        system_info = get_system_info()
        
        # 数据库连接状态
        try:
            db.session.execute(db.text('SELECT 1'))
            db_status = 'healthy'
        except Exception:
            db_status = 'error'
        
        # 获取服务注册表
        service_registry = get_service_registry()
        
        # 检查调度器和执行器状态
        health_status = service_registry.health_check_all()
        
        # 服务状态检查
        services_status = {
            'web': 'healthy',  # 当前服务
            'database': db_status,
            'scheduler': 'healthy' if health_status.get('scheduler', False) else 'error',
            'executor': 'healthy' if health_status.get('executor', False) else 'error'
        }
        
        return jsonify(create_response(True, '获取成功', {
            'system_info': system_info,
            'services_status': services_status,
            'uptime': 'unknown'  # TODO: 实现运行时间统计
        }))
    
    except Exception as e:
        return jsonify(create_response(False, f'获取系统状态失败: {str(e)}')), 500


@dashboard_bp.route('/api/stats/execution_trend')
@login_required
def api_execution_trend():
    """获取执行趋势统计"""
    logger.debug(f"用户 {current_user.username} 请求执行趋势统计 - IP: {request.remote_addr}")
    days = request.args.get('days', 30, type=int)
    
    from sqlalchemy import func, and_
    
    now = datetime.now()
    start_date = now - timedelta(days=days)
    
    # 构建基础查询
    if current_user.is_admin:
        execution_query = TaskExecution.query
    else:
        execution_query = TaskExecution.query.join(Task).filter(Task.user_id == current_user.id)
    
    # 按天统计
    trend_data = []
    for i in range(days):
        date = start_date + timedelta(days=i)
        next_date = date + timedelta(days=1)
        
        day_total = execution_query.filter(
            and_(
                TaskExecution.started_at >= date,
                TaskExecution.started_at < next_date
            )
        ).count()
        
        day_success = execution_query.filter(
            and_(
                TaskExecution.started_at >= date,
                TaskExecution.started_at < next_date,
                TaskExecution.status == TaskStatus.SUCCESS
            )
        ).count()
        
        day_failed = execution_query.filter(
            and_(
                TaskExecution.started_at >= date,
                TaskExecution.started_at < next_date,
                TaskExecution.status == TaskStatus.FAILED
            )
        ).count()
        
        trend_data.append({
            'date': date.strftime('%Y-%m-%d'),
            'total': day_total,
            'success': day_success,
            'failed': day_failed,
            'success_rate': round(day_success / day_total * 100, 2) if day_total > 0 else 0
        })
    
    return jsonify(create_response(True, '获取成功', {
        'trend_data': trend_data,
        'period': f'{days}天'
    }))