"""
报告生成模块
"""

import json
import csv
from typing import Dict, List, Any
from pathlib import Path
from datetime import datetime
import logging

class ReportGenerator:
    """报告生成器"""
    
    def __init__(self, config: Dict[str, Any]):
        """初始化报告生成器"""
        self.config = config
        self.logger = logging.getLogger('image_cropping_system')
        
        # 创建报告目录
        self.reports_dir = Path('output/reports')
        self.reports_dir.mkdir(parents=True, exist_ok=True)
    
    def generate_summary_report(self, processing_results: List[Dict[str, Any]], 
                              processing_stats: Dict[str, Any]) -> Dict[str, Any]:
        """生成处理摘要报告"""
        try:
            timestamp = datetime.now().isoformat()
            
            # 统计信息
            total_files = len(processing_results)
            successful_files = sum(1 for r in processing_results 
                                 if r.get('processing_status') == 'success')
            failed_files = total_files - successful_files
            
            # 裁剪统计
            total_crops = 0
            size_distribution = {}
            
            for result in processing_results:
                cropped_files = result.get('cropped_files', [])
                total_crops += len(cropped_files)
                
                for cropped_file in cropped_files:
                    size_name = cropped_file.get('size_name', 'unknown')
                    size_distribution[size_name] = size_distribution.get(size_name, 0) + 1
            
            # 文件大小分类统计
            size_category_stats = {}
            for result in processing_results:
                category = result.get('size_category', 'unknown')
                size_category_stats[category] = size_category_stats.get(category, 0) + 1
            
            # 生成报告
            report = {
                'timestamp': timestamp,
                'summary': {
                    'total_files': total_files,
                    'successful_files': successful_files,
                    'failed_files': failed_files,
                    'success_rate': successful_files / total_files if total_files > 0 else 0,
                    'total_crops': total_crops,
                    'average_crops_per_file': total_crops / total_files if total_files > 0 else 0
                },
                'size_distribution': size_distribution,
                'size_category_distribution': size_category_stats,
                'processing_stats': processing_stats,
                'configuration': {
                    'target_sizes': self.config.get('target_sizes', {}),
                    'processing': self.config.get('processing', {}),
                    'output': self.config.get('output', {})
                },
                'detailed_results': processing_results
            }
            
            # 保存报告
            report_file = self.reports_dir / f'summary_report_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json'
            with open(report_file, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"摘要报告已生成: {report_file}")
            return report
            
        except Exception as e:
            self.logger.error(f"生成摘要报告时发生错误: {e}")
            return {}
    
    def generate_csv_report(self, processing_results: List[Dict[str, Any]]) -> str:
        """生成CSV格式报告"""
        try:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            csv_file = self.reports_dir / f'detailed_report_{timestamp}.csv'
            
            with open(csv_file, 'w', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                
                # 写入标题行
                headers = [
                    '原始文件名', '处理后文件名', '宽度', '高度', '宽高比', 
                    '尺寸分类', '内容区域', '置信度', '识别状态', '裁剪状态',
                    '裁剪文件数', '处理时间'
                ]
                writer.writerow(headers)
                
                # 写入数据行
                for result in processing_results:
                    row = [
                        result.get('original_name', ''),
                        result.get('processed_name', ''),
                        result.get('width', 0),
                        result.get('height', 0),
                        f"{result.get('aspect_ratio', 0):.2f}",
                        result.get('size_category', ''),
                        str(result.get('content_bbox', [])),
                        f"{result.get('confidence', 0):.2f}",
                        result.get('recognition_status', ''),
                        result.get('cropping_status', ''),
                        len(result.get('cropped_files', [])),
                        result.get('processing_time', 0)
                    ]
                    writer.writerow(row)
            
            self.logger.info(f"CSV报告已生成: {csv_file}")
            return str(csv_file)
            
        except Exception as e:
            self.logger.error(f"生成CSV报告时发生错误: {e}")
            return ""
    
    def generate_error_report(self, error_log: List[Dict[str, Any]]) -> Dict[str, Any]:
        """生成错误报告"""
        try:
            if not error_log:
                return {}
            
            timestamp = datetime.now().isoformat()
            
            # 统计错误类型
            error_types = {}
            severity_distribution = {}
            
            for error in error_log:
                error_type = error['error_type']
                severity = error['severity']
                
                error_types[error_type] = error_types.get(error_type, 0) + 1
                severity_distribution[severity] = severity_distribution.get(severity, 0) + 1
            
            # 生成错误报告
            error_report = {
                'timestamp': timestamp,
                'total_errors': len(error_log),
                'error_types': error_types,
                'severity_distribution': severity_distribution,
                'errors': error_log
            }
            
            # 保存错误报告
            error_report_file = self.reports_dir / f'error_report_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json'
            with open(error_report_file, 'w', encoding='utf-8') as f:
                json.dump(error_report, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"错误报告已生成: {error_report_file}")
            return error_report
            
        except Exception as e:
            self.logger.error(f"生成错误报告时发生错误: {e}")
            return {}
    
    def generate_performance_report(self, performance_stats: Dict[str, Any]) -> Dict[str, Any]:
        """生成性能报告"""
        try:
            timestamp = datetime.now().isoformat()
            
            performance_report = {
                'timestamp': timestamp,
                'performance_stats': performance_stats,
                'analysis': {
                    'efficiency': self._analyze_efficiency(performance_stats),
                    'bottlenecks': self._identify_bottlenecks(performance_stats),
                    'recommendations': self._generate_recommendations(performance_stats)
                }
            }
            
            # 保存性能报告
            performance_report_file = self.reports_dir / f'performance_report_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json'
            with open(performance_report_file, 'w', encoding='utf-8') as f:
                json.dump(performance_report, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"性能报告已生成: {performance_report_file}")
            return performance_report
            
        except Exception as e:
            self.logger.error(f"生成性能报告时发生错误: {e}")
            return {}
    
    def _analyze_efficiency(self, stats: Dict[str, Any]) -> Dict[str, Any]:
        """分析效率"""
        total_tasks = stats.get('total_tasks', 0)
        completed_tasks = stats.get('completed_tasks', 0)
        average_time = stats.get('average_processing_time', 0)
        
        if total_tasks == 0:
            return {'overall_efficiency': 0, 'task_completion_rate': 0}
        
        completion_rate = completed_tasks / total_tasks
        
        # 效率评分 (0-100)
        efficiency_score = min(100, completion_rate * 100)
        
        if average_time > 0:
            # 根据平均处理时间调整效率评分
            time_factor = max(0.1, 10 / average_time)  # 10秒为基准
            efficiency_score *= time_factor
        
        return {
            'overall_efficiency': min(100, efficiency_score),
            'task_completion_rate': completion_rate,
            'average_processing_time': average_time
        }
    
    def _identify_bottlenecks(self, stats: Dict[str, Any]) -> List[str]:
        """识别瓶颈"""
        bottlenecks = []
        
        # 检查失败率
        total_tasks = stats.get('total_tasks', 0)
        failed_tasks = stats.get('failed_tasks', 0)
        
        if total_tasks > 0 and failed_tasks / total_tasks > 0.1:
            bottlenecks.append("高失败率，建议检查错误日志")
        
        # 检查内存使用
        memory_usage = stats.get('memory_usage', 0)
        if memory_usage > 1024:  # 超过1GB
            bottlenecks.append("内存使用过高，建议优化内存管理")
        
        # 检查CPU使用
        cpu_usage = stats.get('cpu_usage', 0)
        if cpu_usage > 80:  # 超过80%
            bottlenecks.append("CPU使用率过高，建议优化算法或增加线程")
        
        # 检查处理时间
        average_time = stats.get('average_processing_time', 0)
        if average_time > 30:  # 超过30秒
            bottlenecks.append("平均处理时间过长，建议优化处理流程")
        
        return bottlenecks
    
    def _generate_recommendations(self, stats: Dict[str, Any]) -> List[str]:
        """生成优化建议"""
        recommendations = []
        
        # 基于统计信息生成建议
        completion_rate = stats.get('completed_tasks', 0) / max(1, stats.get('total_tasks', 1))
        
        if completion_rate < 0.9:
            recommendations.append("提高错误处理和重试机制的健壮性")
        
        memory_usage = stats.get('memory_usage', 0)
        if memory_usage > 512:
            recommendations.append("考虑实现更积极的垃圾回收策略")
        
        average_time = stats.get('average_processing_time', 0)
        if average_time > 10:
            recommendations.append("考虑增加批处理大小或优化算法")
        
        if not recommendations:
            recommendations.append("系统运行良好，继续保持当前配置")
        
        return recommendations