#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
复盘分析系统
实现运营数据分析、性能评估和优化建议
"""

import time
import logging
import threading
from typing import Dict, List, Optional, Any, Tuple
from dataclasses import dataclass
from datetime import datetime, timedelta
import json
import statistics
from pathlib import Path

logger = logging.getLogger(__name__)


@dataclass
class PerformanceMetrics:
    """性能指标数据类"""
    timestamp: float
    response_time_avg: float
    response_time_p95: float
    response_time_p99: float
    throughput: float
    error_rate: float
    cpu_utilization: float
    memory_utilization: float
    gpu_utilization: float


@dataclass
class CostAnalysis:
    """成本分析数据类"""
    period: str
    total_cost: float
    gpu_cost: float
    traffic_cost: float
    cost_per_task: float
    cost_efficiency: float
    cost_trend: str  # "increasing", "decreasing", "stable"


@dataclass
class UserBehaviorAnalysis:
    """用户行为分析数据类"""
    period: str
    total_users: int
    active_users: int
    avg_tasks_per_user: float
    user_retention_rate: float
    peak_usage_hours: List[int]
    usage_patterns: Dict[str, Any]


class DataAnalyzer:
    """数据分析器"""
    
    def __init__(self, structured_logger, log_dir: str = "logs"):
        """
        初始化数据分析器
        
        Args:
            structured_logger: 结构化日志记录器
            log_dir: 日志目录
        """
        self.structured_logger = structured_logger
        self.log_dir = Path(log_dir)
        
        # 分析配置
        self.analysis_intervals = {
            'hourly': 3600,    # 1小时
            'daily': 86400,    # 1天
            'weekly': 604800,  # 1周
            'monthly': 2592000 # 1月
        }
        
        # 缓存分析结果
        self.analysis_cache: Dict[str, Any] = {}
        self.cache_lock = threading.Lock()
        
        logger.info("数据分析器已初始化")
    
    def analyze_task_processing_performance(self, start_time: float, end_time: float) -> Dict[str, Any]:
        """
        分析任务处理性能
        
        Args:
            start_time: 开始时间戳
            end_time: 结束时间戳
            
        Returns:
            性能分析结果
        """
        try:
            # 从日志文件读取任务处理数据
            task_logs = self._read_task_logs(start_time, end_time)
            
            if not task_logs:
                return {"error": "没有找到任务处理日志"}
            
            # 计算性能指标
            response_times = [log.get('processing_duration', 0) for log in task_logs if log.get('processing_duration')]
            wait_times = [log.get('wait_duration', 0) for log in task_logs if log.get('wait_duration')]
            
            # 成功率统计
            total_tasks = len(task_logs)
            successful_tasks = len([log for log in task_logs if log.get('status') == 'success'])
            error_rate = (total_tasks - successful_tasks) / total_tasks if total_tasks > 0 else 0
            
            # 响应时间统计
            response_time_avg = statistics.mean(response_times) if response_times else 0
            response_time_p95 = self._calculate_percentile(response_times, 95) if response_times else 0
            response_time_p99 = self._calculate_percentile(response_times, 99) if response_times else 0
            
            # 等待时间统计
            wait_time_avg = statistics.mean(wait_times) if wait_times else 0
            wait_time_p95 = self._calculate_percentile(wait_times, 95) if wait_times else 0
            
            # 吞吐量计算
            duration_hours = (end_time - start_time) / 3600
            throughput = total_tasks / duration_hours if duration_hours > 0 else 0
            
            # 按服务器统计
            server_stats = self._analyze_server_performance(task_logs)
            
            # 按地域统计
            region_stats = self._analyze_region_performance(task_logs)
            
            # 按GPU类型统计
            gpu_stats = self._analyze_gpu_performance(task_logs)
            
            return {
                "period": {
                    "start_time": start_time,
                    "end_time": end_time,
                    "duration_hours": duration_hours
                },
                "overall_metrics": {
                    "total_tasks": total_tasks,
                    "successful_tasks": successful_tasks,
                    "error_rate": error_rate,
                    "throughput_tasks_per_hour": throughput
                },
                "response_time_metrics": {
                    "average": response_time_avg,
                    "p95": response_time_p95,
                    "p99": response_time_p99
                },
                "wait_time_metrics": {
                    "average": wait_time_avg,
                    "p95": wait_time_p95
                },
                "server_performance": server_stats,
                "region_performance": region_stats,
                "gpu_performance": gpu_stats
            }
            
        except Exception as e:
            logger.error(f"分析任务处理性能失败: {e}")
            return {"error": str(e)}
    
    def analyze_cost_efficiency(self, start_time: float, end_time: float) -> Dict[str, Any]:
        """
        分析成本效率
        
        Args:
            start_time: 开始时间戳
            end_time: 结束时间戳
            
        Returns:
            成本分析结果
        """
        try:
            # 从日志文件读取成本数据
            cost_logs = self._read_cost_logs(start_time, end_time)
            
            if not cost_logs:
                return {"error": "没有找到成本日志"}
            
            # 计算总成本
            total_gpu_cost = sum(log.get('gpu_cost', 0) for log in cost_logs)
            total_traffic_cost = sum(log.get('traffic_cost', 0) for log in cost_logs)
            total_cost = sum(log.get('total_cost', 0) for log in cost_logs)
            
            # 计算任务数量
            task_logs = self._read_task_logs(start_time, end_time)
            total_tasks = len(task_logs)
            
            # 成本效率指标
            cost_per_task = total_cost / total_tasks if total_tasks > 0 else 0
            cost_efficiency = total_tasks / total_cost if total_cost > 0 else 0
            
            # 成本趋势分析
            cost_trend = self._analyze_cost_trend(cost_logs)
            
            # 按地域成本分析
            region_cost_analysis = self._analyze_region_costs(cost_logs)
            
            # 按GPU类型成本分析
            gpu_cost_analysis = self._analyze_gpu_costs(cost_logs)
            
            return {
                "period": {
                    "start_time": start_time,
                    "end_time": end_time,
                    "duration_hours": (end_time - start_time) / 3600
                },
                "cost_summary": {
                    "total_cost": total_cost,
                    "gpu_cost": total_gpu_cost,
                    "traffic_cost": total_traffic_cost,
                    "gpu_cost_ratio": total_gpu_cost / total_cost if total_cost > 0 else 0,
                    "traffic_cost_ratio": total_traffic_cost / total_cost if total_cost > 0 else 0
                },
                "efficiency_metrics": {
                    "total_tasks": total_tasks,
                    "cost_per_task": cost_per_task,
                    "cost_efficiency": cost_efficiency
                },
                "cost_trend": cost_trend,
                "region_cost_analysis": region_cost_analysis,
                "gpu_cost_analysis": gpu_cost_analysis
            }
            
        except Exception as e:
            logger.error(f"分析成本效率失败: {e}")
            return {"error": str(e)}
    
    def analyze_user_behavior(self, start_time: float, end_time: float) -> Dict[str, Any]:
        """
        分析用户行为
        
        Args:
            start_time: 开始时间戳
            end_time: 结束时间戳
            
        Returns:
            用户行为分析结果
        """
        try:
            # 从日志文件读取用户数据
            task_logs = self._read_task_logs(start_time, end_time)
            
            if not task_logs:
                return {"error": "没有找到用户数据"}
            
            # 用户统计
            user_tasks = {}
            user_ips = {}
            
            for log in task_logs:
                user_id = log.get('user_id', '')
                client_ip = log.get('client_ip', '')
                
                if user_id:
                    if user_id not in user_tasks:
                        user_tasks[user_id] = []
                    user_tasks[user_id].append(log)
                
                if client_ip:
                    if client_ip not in user_ips:
                        user_ips[client_ip] = []
                    user_ips[client_ip].append(log)
            
            # 计算用户指标
            total_users = len(user_tasks)
            active_users = len([user for user, tasks in user_tasks.items() if len(tasks) > 0])
            avg_tasks_per_user = sum(len(tasks) for tasks in user_tasks.values()) / total_users if total_users > 0 else 0
            
            # 使用时间分析
            usage_hours = self._analyze_usage_hours(task_logs)
            peak_hours = self._find_peak_usage_hours(usage_hours)
            
            # 用户行为模式
            usage_patterns = self._analyze_usage_patterns(user_tasks)
            
            # IP分析
            ip_analysis = self._analyze_ip_usage(user_ips)
            
            return {
                "period": {
                    "start_time": start_time,
                    "end_time": end_time,
                    "duration_hours": (end_time - start_time) / 3600
                },
                "user_metrics": {
                    "total_users": total_users,
                    "active_users": active_users,
                    "user_activity_rate": active_users / total_users if total_users > 0 else 0,
                    "avg_tasks_per_user": avg_tasks_per_user
                },
                "usage_analysis": {
                    "peak_usage_hours": peak_hours,
                    "usage_distribution": usage_hours
                },
                "behavior_patterns": usage_patterns,
                "ip_analysis": ip_analysis
            }
            
        except Exception as e:
            logger.error(f"分析用户行为失败: {e}")
            return {"error": str(e)}
    
    def generate_optimization_recommendations(self, performance_data: Dict, cost_data: Dict, user_data: Dict) -> List[Dict[str, Any]]:
        """
        生成优化建议
        
        Args:
            performance_data: 性能数据
            cost_data: 成本数据
            user_data: 用户数据
            
        Returns:
            优化建议列表
        """
        recommendations = []
        
        try:
            # 性能优化建议
            if performance_data.get('overall_metrics', {}).get('error_rate', 0) > 0.05:  # 错误率超过5%
                recommendations.append({
                    "type": "performance",
                    "priority": "high",
                    "title": "降低错误率",
                    "description": f"当前错误率为 {performance_data['overall_metrics']['error_rate']:.2%}，建议检查服务器状态和任务处理逻辑",
                    "expected_improvement": "错误率降低到2%以下"
                })
            
            if performance_data.get('response_time_metrics', {}).get('average', 0) > 5:  # 平均响应时间超过5秒
                recommendations.append({
                    "type": "performance",
                    "priority": "high",
                    "title": "优化响应时间",
                    "description": f"当前平均响应时间为 {performance_data['response_time_metrics']['average']:.2f}秒，建议优化任务处理流程",
                    "expected_improvement": "响应时间降低到3秒以下"
                })
            
            # 成本优化建议
            if cost_data.get('efficiency_metrics', {}).get('cost_per_task', 0) > 0.1:  # 每任务成本超过0.1元
                recommendations.append({
                    "type": "cost",
                    "priority": "medium",
                    "title": "降低任务成本",
                    "description": f"当前每任务成本为 {cost_data['efficiency_metrics']['cost_per_task']:.4f}元，建议优化资源使用",
                    "expected_improvement": "任务成本降低20%"
                })
            
            # 用户行为优化建议
            if user_data.get('usage_analysis', {}).get('peak_usage_hours'):
                peak_hours = user_data['usage_analysis']['peak_usage_hours']
                if len(peak_hours) > 0:
                    recommendations.append({
                        "type": "capacity",
                        "priority": "medium",
                        "title": "优化峰值时段容量",
                        "description": f"检测到峰值使用时段: {peak_hours}，建议在此时段增加服务器容量",
                        "expected_improvement": "减少用户等待时间"
                    })
            
            # 资源利用率建议
            if performance_data.get('server_performance'):
                server_stats = performance_data['server_performance']
                for server_id, stats in server_stats.items():
                    if stats.get('utilization_rate', 0) < 0.3:  # 利用率低于30%
                        recommendations.append({
                            "type": "resource",
                            "priority": "low",
                            "title": f"优化服务器 {server_id} 利用率",
                            "description": f"服务器利用率仅为 {stats['utilization_rate']:.2%}，建议重新分配任务或考虑缩容",
                            "expected_improvement": "提高资源利用率"
                        })
            
        except Exception as e:
            logger.error(f"生成优化建议失败: {e}")
            recommendations.append({
                "type": "system",
                "priority": "high",
                "title": "系统错误",
                "description": f"生成优化建议时发生错误: {str(e)}",
                "expected_improvement": "修复系统错误"
            })
        
        return recommendations
    
    def _read_task_logs(self, start_time: float, end_time: float) -> List[Dict[str, Any]]:
        """读取任务处理日志"""
        logs = []
        
        try:
            # 遍历日志文件
            for log_file in self.log_dir.glob("task_processing_*.log"):
                with open(log_file, 'r', encoding='utf-8') as f:
                    for line in f:
                        try:
                            log_entry = json.loads(line.strip())
                            log_time = datetime.fromisoformat(log_entry['timestamp'].replace('Z', '+00:00')).timestamp()
                            
                            if start_time <= log_time <= end_time:
                                logs.append(log_entry.get('data', {}))
                        except (json.JSONDecodeError, ValueError):
                            continue
        except Exception as e:
            logger.error(f"读取任务日志失败: {e}")
        
        return logs
    
    def _read_cost_logs(self, start_time: float, end_time: float) -> List[Dict[str, Any]]:
        """读取成本日志"""
        logs = []
        
        try:
            # 遍历日志文件
            for log_file in self.log_dir.glob("cost_traffic_*.log"):
                with open(log_file, 'r', encoding='utf-8') as f:
                    for line in f:
                        try:
                            log_entry = json.loads(line.strip())
                            log_time = datetime.fromisoformat(log_entry['timestamp'].replace('Z', '+00:00')).timestamp()
                            
                            if start_time <= log_time <= end_time:
                                logs.append(log_entry.get('data', {}))
                        except (json.JSONDecodeError, ValueError):
                            continue
        except Exception as e:
            logger.error(f"读取成本日志失败: {e}")
        
        return logs
    
    def _calculate_percentile(self, data: List[float], percentile: int) -> float:
        """计算百分位数"""
        if not data:
            return 0.0
        
        sorted_data = sorted(data)
        index = int(len(sorted_data) * percentile / 100)
        return sorted_data[min(index, len(sorted_data) - 1)]
    
    def _analyze_server_performance(self, task_logs: List[Dict]) -> Dict[str, Any]:
        """分析服务器性能"""
        server_stats = {}
        
        for log in task_logs:
            server_id = log.get('server_id', '')
            if server_id:
                if server_id not in server_stats:
                    server_stats[server_id] = {
                        'total_tasks': 0,
                        'successful_tasks': 0,
                        'total_processing_time': 0,
                        'total_wait_time': 0
                    }
                
                stats = server_stats[server_id]
                stats['total_tasks'] += 1
                
                if log.get('status') == 'success':
                    stats['successful_tasks'] += 1
                
                if log.get('processing_duration'):
                    stats['total_processing_time'] += log['processing_duration']
                
                if log.get('wait_duration'):
                    stats['total_wait_time'] += log['wait_duration']
        
        # 计算性能指标
        for server_id, stats in server_stats.items():
            if stats['total_tasks'] > 0:
                stats['success_rate'] = stats['successful_tasks'] / stats['total_tasks']
                stats['avg_processing_time'] = stats['total_processing_time'] / stats['total_tasks']
                stats['avg_wait_time'] = stats['total_wait_time'] / stats['total_tasks']
                stats['utilization_rate'] = stats['total_processing_time'] / (stats['total_tasks'] * 60)  # 假设每任务最多60秒
        
        return server_stats
    
    def _analyze_region_performance(self, task_logs: List[Dict]) -> Dict[str, Any]:
        """分析地域性能"""
        region_stats = {}
        
        for log in task_logs:
            region = log.get('region', '')
            if region:
                if region not in region_stats:
                    region_stats[region] = {
                        'total_tasks': 0,
                        'successful_tasks': 0,
                        'total_processing_time': 0
                    }
                
                stats = region_stats[region]
                stats['total_tasks'] += 1
                
                if log.get('status') == 'success':
                    stats['successful_tasks'] += 1
                
                if log.get('processing_duration'):
                    stats['total_processing_time'] += log['processing_duration']
        
        # 计算性能指标
        for region, stats in region_stats.items():
            if stats['total_tasks'] > 0:
                stats['success_rate'] = stats['successful_tasks'] / stats['total_tasks']
                stats['avg_processing_time'] = stats['total_processing_time'] / stats['total_tasks']
        
        return region_stats
    
    def _analyze_gpu_performance(self, task_logs: List[Dict]) -> Dict[str, Any]:
        """分析GPU性能"""
        gpu_stats = {}
        
        for log in task_logs:
            gpu_type = log.get('gpu_type', '')
            if gpu_type:
                if gpu_type not in gpu_stats:
                    gpu_stats[gpu_type] = {
                        'total_tasks': 0,
                        'successful_tasks': 0,
                        'total_processing_time': 0
                    }
                
                stats = gpu_stats[gpu_type]
                stats['total_tasks'] += 1
                
                if log.get('status') == 'success':
                    stats['successful_tasks'] += 1
                
                if log.get('processing_duration'):
                    stats['total_processing_time'] += log['processing_duration']
        
        # 计算性能指标
        for gpu_type, stats in gpu_stats.items():
            if stats['total_tasks'] > 0:
                stats['success_rate'] = stats['successful_tasks'] / stats['total_tasks']
                stats['avg_processing_time'] = stats['total_processing_time'] / stats['total_tasks']
        
        return gpu_stats
    
    def _analyze_cost_trend(self, cost_logs: List[Dict]) -> str:
        """分析成本趋势"""
        if len(cost_logs) < 2:
            return "insufficient_data"
        
        # 按时间排序
        sorted_logs = sorted(cost_logs, key=lambda x: x.get('timestamp', 0))
        
        # 计算趋势
        first_cost = sorted_logs[0].get('total_cost', 0)
        last_cost = sorted_logs[-1].get('total_cost', 0)
        
        if last_cost > first_cost * 1.1:
            return "increasing"
        elif last_cost < first_cost * 0.9:
            return "decreasing"
        else:
            return "stable"
    
    def _analyze_region_costs(self, cost_logs: List[Dict]) -> Dict[str, Any]:
        """分析地域成本"""
        region_costs = {}
        
        for log in cost_logs:
            region_breakdown = log.get('region_breakdown', {})
            for region, cost in region_breakdown.items():
                if region not in region_costs:
                    region_costs[region] = 0
                region_costs[region] += cost
        
        return region_costs
    
    def _analyze_gpu_costs(self, cost_logs: List[Dict]) -> Dict[str, Any]:
        """分析GPU成本"""
        try:
            # 从任务日志中获取GPU类型信息
            gpu_costs = {}
            
            for log in cost_logs:
                gpu_type = log.get('data', {}).get('gpu_type', 'unknown')
                cost = log.get('data', {}).get('gpu_cost', 0)
                
                if gpu_type not in gpu_costs:
                    gpu_costs[gpu_type] = 0
                gpu_costs[gpu_type] += cost
            
            return gpu_costs
        except Exception as e:
            logger.error(f"分析GPU成本失败: {e}")
            return {}
    
    def _analyze_usage_hours(self, task_logs: List[Dict]) -> Dict[int, int]:
        """分析使用时间分布"""
        usage_hours = {}
        
        for log in task_logs:
            submit_time = log.get('submit_time')
            if submit_time:
                try:
                    # 解析时间戳
                    if isinstance(submit_time, str):
                        dt = datetime.fromisoformat(submit_time.replace('Z', '+00:00'))
                    else:
                        dt = datetime.fromtimestamp(submit_time)
                    
                    hour = dt.hour
                    usage_hours[hour] = usage_hours.get(hour, 0) + 1
                except (ValueError, TypeError):
                    continue
        
        return usage_hours
    
    def _find_peak_usage_hours(self, usage_hours: Dict[int, int]) -> List[int]:
        """找出峰值使用时段"""
        if not usage_hours:
            return []
        
        # 计算平均值
        avg_usage = sum(usage_hours.values()) / len(usage_hours)
        
        # 找出超过平均值1.5倍的时段
        peak_hours = [hour for hour, usage in usage_hours.items() if usage > avg_usage * 1.5]
        
        return sorted(peak_hours)
    
    def _analyze_usage_patterns(self, user_tasks: Dict[str, List[Dict]]) -> Dict[str, Any]:
        """分析使用模式"""
        patterns = {
            'heavy_users': 0,  # 重度用户（任务数>10）
            'light_users': 0,  # 轻度用户（任务数<=3）
            'regular_users': 0,  # 普通用户（3<任务数<=10）
            'avg_session_duration': 0
        }
        
        session_durations = []
        
        for user_id, tasks in user_tasks.items():
            task_count = len(tasks)
            
            if task_count > 10:
                patterns['heavy_users'] += 1
            elif task_count <= 3:
                patterns['light_users'] += 1
            else:
                patterns['regular_users'] += 1
            
            # 计算会话持续时间
            if len(tasks) > 1:
                first_task = min(tasks, key=lambda x: x.get('submit_time', 0))
                last_task = max(tasks, key=lambda x: x.get('submit_time', 0))
                
                try:
                    if isinstance(first_task.get('submit_time'), str):
                        first_time = datetime.fromisoformat(first_task['submit_time'].replace('Z', '+00:00')).timestamp()
                        last_time = datetime.fromisoformat(last_task['submit_time'].replace('Z', '+00:00')).timestamp()
                    else:
                        first_time = first_task.get('submit_time', 0)
                        last_time = last_task.get('submit_time', 0)
                    
                    duration = last_time - first_time
                    if duration > 0:
                        session_durations.append(duration)
                except (ValueError, TypeError):
                    continue
        
        if session_durations:
            patterns['avg_session_duration'] = statistics.mean(session_durations)
        
        return patterns
    
    def _analyze_ip_usage(self, user_ips: Dict[str, List[Dict]]) -> Dict[str, Any]:
        """分析IP使用情况"""
        ip_stats = {
            'unique_ips': len(user_ips),
            'top_ips': [],
            'ip_distribution': {}
        }
        
        # 统计每个IP的任务数
        for ip, tasks in user_ips.items():
            task_count = len(tasks)
            ip_stats['ip_distribution'][ip] = task_count
        
        # 找出使用最多的IP
        sorted_ips = sorted(ip_stats['ip_distribution'].items(), key=lambda x: x[1], reverse=True)
        ip_stats['top_ips'] = sorted_ips[:10]  # 前10个IP
        
        return ip_stats


class ReportGenerator:
    """报告生成器"""
    
    def __init__(self, data_analyzer: DataAnalyzer, structured_logger):
        """
        初始化报告生成器
        
        Args:
            data_analyzer: 数据分析器
            structured_logger: 结构化日志记录器
        """
        self.data_analyzer = data_analyzer
        self.structured_logger = structured_logger
        
        logger.info("报告生成器已初始化")
    
    def generate_daily_report(self, date: str) -> Dict[str, Any]:
        """生成每日报告"""
        try:
            # 计算时间范围
            start_time = datetime.strptime(date, '%Y-%m-%d').timestamp()
            end_time = start_time + 86400  # 24小时
            
            # 分析数据
            performance_data = self.data_analyzer.analyze_task_processing_performance(start_time, end_time)
            cost_data = self.data_analyzer.analyze_cost_efficiency(start_time, end_time)
            user_data = self.data_analyzer.analyze_user_behavior(start_time, end_time)
            
            # 生成优化建议
            recommendations = self.data_analyzer.generate_optimization_recommendations(
                performance_data, cost_data, user_data
            )
            
            # 生成报告
            report = {
                "report_type": "daily",
                "date": date,
                "generated_at": datetime.utcnow().isoformat() + "Z",
                "summary": {
                    "total_tasks": performance_data.get('overall_metrics', {}).get('total_tasks', 0),
                    "success_rate": 1 - performance_data.get('overall_metrics', {}).get('error_rate', 0),
                    "avg_response_time": performance_data.get('response_time_metrics', {}).get('average', 0),
                    "total_cost": cost_data.get('cost_summary', {}).get('total_cost', 0),
                    "active_users": user_data.get('user_metrics', {}).get('active_users', 0)
                },
                "performance_analysis": performance_data,
                "cost_analysis": cost_data,
                "user_analysis": user_data,
                "recommendations": recommendations
            }
            
            # 记录报告生成日志
            self.structured_logger.log_alert(
                alert_type="report_generated",
                severity="info",
                message=f"每日报告已生成: {date}",
                details={
                    "report_type": "daily",
                    "date": date,
                    "total_tasks": report['summary']['total_tasks'],
                    "recommendations_count": len(recommendations)
                }
            )
            
            return report
            
        except Exception as e:
            logger.error(f"生成每日报告失败: {e}")
            return {"error": str(e)}
    
    def generate_weekly_report(self, week_start_date: str) -> Dict[str, Any]:
        """生成每周报告"""
        try:
            # 计算时间范围
            start_time = datetime.strptime(week_start_date, '%Y-%m-%d').timestamp()
            end_time = start_time + 604800  # 7天
            
            # 分析数据
            performance_data = self.data_analyzer.analyze_task_processing_performance(start_time, end_time)
            cost_data = self.data_analyzer.analyze_cost_efficiency(start_time, end_time)
            user_data = self.data_analyzer.analyze_user_behavior(start_time, end_time)
            
            # 生成优化建议
            recommendations = self.data_analyzer.generate_optimization_recommendations(
                performance_data, cost_data, user_data
            )
            
            # 生成报告
            report = {
                "report_type": "weekly",
                "week_start_date": week_start_date,
                "generated_at": datetime.utcnow().isoformat() + "Z",
                "summary": {
                    "total_tasks": performance_data.get('overall_metrics', {}).get('total_tasks', 0),
                    "success_rate": 1 - performance_data.get('overall_metrics', {}).get('error_rate', 0),
                    "avg_response_time": performance_data.get('response_time_metrics', {}).get('average', 0),
                    "total_cost": cost_data.get('cost_summary', {}).get('total_cost', 0),
                    "active_users": user_data.get('user_metrics', {}).get('active_users', 0)
                },
                "performance_analysis": performance_data,
                "cost_analysis": cost_data,
                "user_analysis": user_data,
                "recommendations": recommendations
            }
            
            return report
            
        except Exception as e:
            logger.error(f"生成每周报告失败: {e}")
            return {"error": str(e)}
    
    def generate_monthly_report(self, month: str) -> Dict[str, Any]:
        """生成每月报告"""
        try:
            # 计算时间范围
            year, month_num = month.split('-')
            start_time = datetime(int(year), int(month_num), 1).timestamp()
            
            if int(month_num) == 12:
                end_time = datetime(int(year) + 1, 1, 1).timestamp()
            else:
                end_time = datetime(int(year), int(month_num) + 1, 1).timestamp()
            
            # 分析数据
            performance_data = self.data_analyzer.analyze_task_processing_performance(start_time, end_time)
            cost_data = self.data_analyzer.analyze_cost_efficiency(start_time, end_time)
            user_data = self.data_analyzer.analyze_user_behavior(start_time, end_time)
            
            # 生成优化建议
            recommendations = self.data_analyzer.generate_optimization_recommendations(
                performance_data, cost_data, user_data
            )
            
            # 生成报告
            report = {
                "report_type": "monthly",
                "month": month,
                "generated_at": datetime.utcnow().isoformat() + "Z",
                "summary": {
                    "total_tasks": performance_data.get('overall_metrics', {}).get('total_tasks', 0),
                    "success_rate": 1 - performance_data.get('overall_metrics', {}).get('error_rate', 0),
                    "avg_response_time": performance_data.get('response_time_metrics', {}).get('average', 0),
                    "total_cost": cost_data.get('cost_summary', {}).get('total_cost', 0),
                    "active_users": user_data.get('user_metrics', {}).get('active_users', 0)
                },
                "performance_analysis": performance_data,
                "cost_analysis": cost_data,
                "user_analysis": user_data,
                "recommendations": recommendations
            }
            
            return report
            
        except Exception as e:
            logger.error(f"生成每月报告失败: {e}")
            return {"error": str(e)}


# 全局实例
data_analyzer = None
report_generator = None


def initialize_analysis_system(structured_logger, log_dir: str = "logs"):
    """初始化复盘分析系统"""
    global data_analyzer, report_generator
    
    data_analyzer = DataAnalyzer(structured_logger, log_dir)
    report_generator = ReportGenerator(data_analyzer, structured_logger)
    
    return data_analyzer, report_generator


if __name__ == "__main__":
    # 测试代码
    logging.basicConfig(level=logging.INFO)
    
    # 创建模拟日志记录器
    class MockLogger:
        def log_alert(self, **kwargs):
            print(f"Alert: {kwargs}")
    
    # 测试数据分析
    analyzer = DataAnalyzer(MockLogger())
    
    # 测试报告生成
    generator = ReportGenerator(analyzer, MockLogger())
    
    # 生成测试报告
    today = datetime.now().strftime('%Y-%m-%d')
    daily_report = generator.generate_daily_report(today)
    print(f"每日报告: {json.dumps(daily_report, indent=2, ensure_ascii=False)}")
    
    print("复盘分析系统测试完成")
