"""
改进效果跟踪服务实现

提供改进效果的跟踪和评估功能，包括：
- 改进前后对比
- 效果量化评估
- 跟踪报告生成
- 趋势分析
"""
import math
import statistics
from typing import List, Dict, Any, Optional, Tuple
from datetime import datetime, timedelta
from dataclasses import asdict

from ..models.analysis_models import (
    QualityAssessment, Improvement, EffectReport, QualityIssue
)


class ImprovementTrackingService:
    """改进效果跟踪服务"""
    
    def __init__(self):
        # 效果评估指标权重
        self.metric_weights = {
            'overall_score': 0.3,
            'dimension_scores': 0.4,
            'issue_reduction': 0.2,
            'user_satisfaction': 0.1
        }
        
        # 改进效果阈值
        self.effect_thresholds = {
            'significant': 0.15,  # 15%以上为显著改进
            'moderate': 0.08,     # 8-15%为中等改进
            'minor': 0.03,        # 3-8%为轻微改进
            'negligible': 0.03    # 3%以下为微小改进
        }
        
        # 跟踪指标定义
        self.tracking_metrics = {
            'quality_metrics': [
                'overall_score',
                'methodology_score',
                'novelty_score',
                'impact_score',
                'reproducibility_score',
                'presentation_score',
                'ethics_score'
            ],
            'issue_metrics': [
                'total_issues',
                'critical_issues',
                'high_priority_issues',
                'resolved_issues'
            ],
            'process_metrics': [
                'implementation_time',
                'resource_usage',
                'completion_rate'
            ]
        }
    
    async def create_baseline_snapshot(self, assessment: QualityAssessment, 
                                     issues: List[QualityIssue],
                                     metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """创建基线快照"""
        try:
            snapshot = {
                'timestamp': datetime.now(),
                'type': 'baseline',
                'quality_assessment': {
                    'overall_score': assessment.overall_score,
                    'dimension_scores': assessment.dimension_scores.copy(),
                    'strengths': assessment.strengths.copy(),
                    'weaknesses': assessment.weaknesses.copy(),
                    'grade': assessment.get_grade()
                },
                'issues_summary': {
                    'total_issues': len(issues),
                    'by_severity': self._count_issues_by_severity(issues),
                    'by_category': self._count_issues_by_category(issues),
                    'issue_details': [asdict(issue) for issue in issues]
                },
                'metadata': metadata or {}
            }
            
            # 计算基线指标
            baseline_metrics = await self._calculate_baseline_metrics(assessment, issues)
            snapshot['baseline_metrics'] = baseline_metrics
            
            return snapshot
            
        except Exception as e:
            print(f"创建基线快照失败: {e}")
            return {}
    
    async def track_improvement_effect(self, improvement_id: str,
                                     before_snapshot: Dict[str, Any],
                                     after_assessment: QualityAssessment,
                                     after_issues: List[QualityIssue],
                                     implementation_data: Optional[Dict[str, Any]] = None) -> EffectReport:
        """跟踪改进效果"""
        try:
            # 创建改进后快照
            after_snapshot = await self.create_baseline_snapshot(
                after_assessment, after_issues, 
                {'type': 'after_improvement', 'improvement_id': improvement_id}
            )
            
            # 计算改进效果
            effect_report = EffectReport(
                improvement_id=improvement_id,
                before_metrics={},
                after_metrics={},
                improvement_percentage={},
                overall_effectiveness=0.0
            )
            
            # 提取前后指标
            before_metrics = before_snapshot.get('baseline_metrics', {})
            after_metrics = after_snapshot.get('baseline_metrics', {})
            
            effect_report.before_metrics = before_metrics
            effect_report.after_metrics = after_metrics
            
            # 计算改进百分比
            await self._calculate_improvement_percentages(effect_report)
            
            # 计算整体效果
            effect_report.overall_effectiveness = await self._calculate_overall_effectiveness(
                effect_report, implementation_data
            )
            
            return effect_report
            
        except Exception as e:
            print(f"跟踪改进效果失败: {e}")
            return EffectReport(improvement_id=improvement_id)
    
    async def _calculate_baseline_metrics(self, assessment: QualityAssessment, 
                                        issues: List[QualityIssue]) -> Dict[str, float]:
        """计算基线指标"""
        metrics = {}
        
        # 质量指标
        metrics['overall_score'] = assessment.overall_score
        for dimension, score in assessment.dimension_scores.items():
            metrics[f'{dimension}_score'] = score
        
        # 问题指标
        metrics['total_issues'] = len(issues)
        
        severity_counts = self._count_issues_by_severity(issues)
        for severity, count in severity_counts.items():
            metrics[f'{severity}_issues'] = count
        
        category_counts = self._count_issues_by_category(issues)
        for category, count in category_counts.items():
            metrics[f'{category}_issues'] = count
        
        # 计算问题严重程度分数
        severity_weights = {'critical': 1.0, 'high': 0.8, 'medium': 0.6, 'low': 0.4}
        total_severity_score = sum(
            severity_weights.get(issue.severity, 0.5) for issue in issues
        )
        metrics['issue_severity_score'] = total_severity_score / len(issues) if issues else 0
        
        # 计算质量分布指标
        if assessment.dimension_scores:
            scores = list(assessment.dimension_scores.values())
            metrics['score_variance'] = statistics.variance(scores) if len(scores) > 1 else 0
            metrics['min_dimension_score'] = min(scores)
            metrics['max_dimension_score'] = max(scores)
        
        return metrics
    
    def _count_issues_by_severity(self, issues: List[QualityIssue]) -> Dict[str, int]:
        """按严重程度统计问题"""
        counts = {'critical': 0, 'high': 0, 'medium': 0, 'low': 0}
        for issue in issues:
            if issue.severity in counts:
                counts[issue.severity] += 1
        return counts
    
    def _count_issues_by_category(self, issues: List[QualityIssue]) -> Dict[str, int]:
        """按类别统计问题"""
        counts = {}
        for issue in issues:
            category = issue.issue_type
            counts[category] = counts.get(category, 0) + 1
        return counts
    
    async def _calculate_improvement_percentages(self, effect_report: EffectReport):
        """计算改进百分比"""
        for metric in effect_report.before_metrics:
            if metric in effect_report.after_metrics:
                before_value = effect_report.before_metrics[metric]
                after_value = effect_report.after_metrics[metric]
                
                if before_value != 0:
                    # 对于问题数量等指标，减少是好的
                    if 'issues' in metric or 'severity_score' in metric:
                        improvement = ((before_value - after_value) / before_value) * 100
                    else:
                        # 对于质量分数等指标，增加是好的
                        improvement = ((after_value - before_value) / before_value) * 100
                    
                    effect_report.improvement_percentage[metric] = improvement
                else:
                    # 处理分母为0的情况
                    if after_value > 0:
                        effect_report.improvement_percentage[metric] = 100.0
                    else:
                        effect_report.improvement_percentage[metric] = 0.0
    
    async def _calculate_overall_effectiveness(self, effect_report: EffectReport,
                                             implementation_data: Optional[Dict[str, Any]]) -> float:
        """计算整体效果"""
        if not effect_report.improvement_percentage:
            return 0.0
        
        # 加权计算整体效果
        weighted_improvements = []
        
        # 质量分数改进（权重高）
        quality_metrics = ['overall_score'] + [f'{dim}_score' for dim in 
                          ['methodology', 'novelty', 'impact', 'reproducibility', 'presentation', 'ethics']]
        
        for metric in quality_metrics:
            if metric in effect_report.improvement_percentage:
                improvement = effect_report.improvement_percentage[metric]
                weight = 0.8 if metric == 'overall_score' else 0.6
                weighted_improvements.append(improvement * weight)
        
        # 问题减少改进（权重中等）
        issue_metrics = [key for key in effect_report.improvement_percentage.keys() if 'issues' in key]
        for metric in issue_metrics:
            improvement = effect_report.improvement_percentage[metric]
            weight = 0.7 if 'critical' in metric else 0.5
            weighted_improvements.append(improvement * weight)
        
        # 计算加权平均
        if weighted_improvements:
            overall_effectiveness = sum(weighted_improvements) / len(weighted_improvements)
            
            # 考虑实施成本和时间
            if implementation_data:
                cost_factor = implementation_data.get('cost_efficiency', 1.0)
                time_factor = implementation_data.get('time_efficiency', 1.0)
                overall_effectiveness *= (cost_factor * time_factor) ** 0.5
            
            return max(0.0, min(100.0, overall_effectiveness))  # 限制在0-100%范围内
        
        return 0.0
    
    async def generate_tracking_report(self, effect_reports: List[EffectReport],
                                     time_period: Optional[Tuple[datetime, datetime]] = None) -> str:
        """生成跟踪报告"""
        if not effect_reports:
            return "无改进效果数据可报告。"
        
        report_lines = [
            "# 改进效果跟踪报告",
            f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
            f"跟踪改进数量: {len(effect_reports)}",
            ""
        ]
        
        # 时间范围
        if time_period:
            start_date, end_date = time_period
            report_lines.extend([
                f"跟踪时间范围: {start_date.strftime('%Y-%m-%d')} 至 {end_date.strftime('%Y-%m-%d')}",
                ""
            ])
        
        # 整体效果统计
        overall_stats = await self._calculate_overall_statistics(effect_reports)
        report_lines.extend([
            "## 整体效果统计",
            f"- 平均改进效果: {overall_stats['average_effectiveness']:.1f}%",
            f"- 最佳改进效果: {overall_stats['best_effectiveness']:.1f}%",
            f"- 显著改进数量: {overall_stats['significant_improvements']}个",
            f"- 改进成功率: {overall_stats['success_rate']:.1f}%",
            ""
        ])
        
        # 按效果分类
        effect_categories = await self._categorize_effects(effect_reports)
        report_lines.extend([
            "## 改进效果分类",
        ])
        
        for category, reports in effect_categories.items():
            if reports:
                category_names = {
                    'significant': '显著改进',
                    'moderate': '中等改进',
                    'minor': '轻微改进',
                    'negligible': '微小改进',
                    'negative': '负面效果'
                }
                category_name = category_names.get(category, category)
                report_lines.append(f"- {category_name}: {len(reports)}个")
        
        report_lines.append("")
        
        # 详细改进分析
        report_lines.extend([
            "## 详细改进分析",
        ])
        
        # 显示最佳改进
        best_improvements = sorted(effect_reports, 
                                 key=lambda x: x.overall_effectiveness, 
                                 reverse=True)[:5]
        
        report_lines.append("### 最佳改进效果 (前5个)")
        for i, report in enumerate(best_improvements, 1):
            report_lines.extend([
                f"\n**{i}. 改进ID: {report.improvement_id}**",
                f"- 整体效果: {report.overall_effectiveness:.1f}%",
                f"- 主要改进指标:"
            ])
            
            # 显示最显著的改进指标
            top_improvements = sorted(
                [(k, v) for k, v in report.improvement_percentage.items() if v > 0],
                key=lambda x: x[1], reverse=True
            )[:3]
            
            for metric, improvement in top_improvements:
                report_lines.append(f"  - {metric}: +{improvement:.1f}%")
        
        # 趋势分析
        if len(effect_reports) > 1:
            trend_analysis = await self._analyze_improvement_trends(effect_reports)
            report_lines.extend([
                "",
                "## 改进趋势分析",
                f"- 效果趋势: {trend_analysis['trend_direction']}",
                f"- 平均改进率: {trend_analysis['average_improvement_rate']:.1f}%/次",
                f"- 效果稳定性: {trend_analysis['stability']}",
            ])
        
        # 建议和总结
        recommendations = await self._generate_recommendations(effect_reports)
        report_lines.extend([
            "",
            "## 建议和总结",
        ])
        
        for recommendation in recommendations:
            report_lines.append(f"- {recommendation}")
        
        return "\n".join(report_lines)
    
    async def _calculate_overall_statistics(self, effect_reports: List[EffectReport]) -> Dict[str, float]:
        """计算整体统计信息"""
        if not effect_reports:
            return {}
        
        effectiveness_scores = [report.overall_effectiveness for report in effect_reports]
        
        stats = {
            'average_effectiveness': statistics.mean(effectiveness_scores),
            'best_effectiveness': max(effectiveness_scores),
            'worst_effectiveness': min(effectiveness_scores),
            'effectiveness_std': statistics.stdev(effectiveness_scores) if len(effectiveness_scores) > 1 else 0
        }
        
        # 计算显著改进数量
        significant_count = sum(1 for score in effectiveness_scores 
                              if score >= self.effect_thresholds['significant'] * 100)
        stats['significant_improvements'] = significant_count
        
        # 计算成功率（效果为正的改进比例）
        positive_count = sum(1 for score in effectiveness_scores if score > 0)
        stats['success_rate'] = (positive_count / len(effectiveness_scores)) * 100
        
        return stats
    
    async def _categorize_effects(self, effect_reports: List[EffectReport]) -> Dict[str, List[EffectReport]]:
        """按效果分类改进报告"""
        categories = {
            'significant': [],
            'moderate': [],
            'minor': [],
            'negligible': [],
            'negative': []
        }
        
        for report in effect_reports:
            effectiveness = report.overall_effectiveness / 100  # 转换为小数
            
            if effectiveness < 0:
                categories['negative'].append(report)
            elif effectiveness >= self.effect_thresholds['significant']:
                categories['significant'].append(report)
            elif effectiveness >= self.effect_thresholds['moderate']:
                categories['moderate'].append(report)
            elif effectiveness >= self.effect_thresholds['minor']:
                categories['minor'].append(report)
            else:
                categories['negligible'].append(report)
        
        return categories
    
    async def _analyze_improvement_trends(self, effect_reports: List[EffectReport]) -> Dict[str, Any]:
        """分析改进趋势"""
        # 按时间排序（假设improvement_id包含时间信息或使用创建顺序）
        sorted_reports = sorted(effect_reports, key=lambda x: x.improvement_id)
        
        effectiveness_scores = [report.overall_effectiveness for report in sorted_reports]
        
        # 计算趋势方向
        if len(effectiveness_scores) >= 2:
            # 简单线性趋势
            x_values = list(range(len(effectiveness_scores)))
            slope = self._calculate_slope(x_values, effectiveness_scores)
            
            if slope > 1:
                trend_direction = "上升趋势"
            elif slope < -1:
                trend_direction = "下降趋势"
            else:
                trend_direction = "稳定趋势"
        else:
            trend_direction = "数据不足"
            slope = 0
        
        # 计算平均改进率
        if len(effectiveness_scores) > 1:
            improvements = [effectiveness_scores[i] - effectiveness_scores[i-1] 
                          for i in range(1, len(effectiveness_scores))]
            average_improvement_rate = statistics.mean(improvements)
        else:
            average_improvement_rate = 0
        
        # 评估稳定性
        if len(effectiveness_scores) > 2:
            variance = statistics.variance(effectiveness_scores)
            if variance < 25:  # 方差小于25
                stability = "高稳定性"
            elif variance < 100:
                stability = "中等稳定性"
            else:
                stability = "低稳定性"
        else:
            stability = "数据不足"
        
        return {
            'trend_direction': trend_direction,
            'slope': slope,
            'average_improvement_rate': average_improvement_rate,
            'stability': stability,
            'variance': statistics.variance(effectiveness_scores) if len(effectiveness_scores) > 1 else 0
        }
    
    def _calculate_slope(self, x_values: List[float], y_values: List[float]) -> float:
        """计算线性回归斜率"""
        if len(x_values) != len(y_values) or len(x_values) < 2:
            return 0
        
        n = len(x_values)
        sum_x = sum(x_values)
        sum_y = sum(y_values)
        sum_xy = sum(x * y for x, y in zip(x_values, y_values))
        sum_x2 = sum(x * x for x in x_values)
        
        denominator = n * sum_x2 - sum_x * sum_x
        if denominator == 0:
            return 0
        
        slope = (n * sum_xy - sum_x * sum_y) / denominator
        return slope
    
    async def _generate_recommendations(self, effect_reports: List[EffectReport]) -> List[str]:
        """生成建议"""
        recommendations = []
        
        if not effect_reports:
            return ["无足够数据生成建议"]
        
        # 分析整体效果
        avg_effectiveness = statistics.mean([r.overall_effectiveness for r in effect_reports])
        
        if avg_effectiveness < 10:
            recommendations.append("整体改进效果较低，建议重新评估改进策略的有效性")
        elif avg_effectiveness > 50:
            recommendations.append("改进效果显著，建议继续推广成功的改进方法")
        
        # 分析效果分布
        effectiveness_scores = [r.overall_effectiveness for r in effect_reports]
        if len(effectiveness_scores) > 1:
            variance = statistics.variance(effectiveness_scores)
            if variance > 400:  # 高方差
                recommendations.append("改进效果差异较大，建议分析成功案例的关键因素")
        
        # 分析负面效果
        negative_effects = [r for r in effect_reports if r.overall_effectiveness < 0]
        if negative_effects:
            recommendations.append(f"发现{len(negative_effects)}个负面效果，建议深入分析原因并调整策略")
        
        # 分析改进类型
        improvement_types = {}
        for report in effect_reports:
            # 从improvement_id中提取类型信息（简化实现）
            improvement_type = report.improvement_id.split('_')[0] if '_' in report.improvement_id else 'unknown'
            if improvement_type not in improvement_types:
                improvement_types[improvement_type] = []
            improvement_types[improvement_type].append(report.overall_effectiveness)
        
        # 找出最有效的改进类型
        if improvement_types:
            type_averages = {t: statistics.mean(scores) for t, scores in improvement_types.items()}
            best_type = max(type_averages.items(), key=lambda x: x[1])
            if best_type[1] > avg_effectiveness:
                recommendations.append(f"'{best_type[0]}'类型的改进效果最佳，建议优先采用")
        
        # 通用建议
        if len(effect_reports) < 5:
            recommendations.append("建议收集更多改进数据以获得更准确的效果分析")
        
        return recommendations
    
    async def compare_improvement_methods(self, effect_reports: List[EffectReport],
                                        grouping_key: str = 'improvement_type') -> Dict[str, Any]:
        """比较不同改进方法的效果"""
        if not effect_reports:
            return {}
        
        # 按指定键分组
        groups = {}
        for report in effect_reports:
            # 简化实现：从improvement_id提取分组信息
            if grouping_key == 'improvement_type':
                group_key = report.improvement_id.split('_')[0] if '_' in report.improvement_id else 'unknown'
            else:
                group_key = 'default'
            
            if group_key not in groups:
                groups[group_key] = []
            groups[group_key].append(report)
        
        # 计算各组统计信息
        comparison = {}
        for group_name, group_reports in groups.items():
            effectiveness_scores = [r.overall_effectiveness for r in group_reports]
            
            comparison[group_name] = {
                'count': len(group_reports),
                'average_effectiveness': statistics.mean(effectiveness_scores),
                'max_effectiveness': max(effectiveness_scores),
                'min_effectiveness': min(effectiveness_scores),
                'success_rate': sum(1 for score in effectiveness_scores if score > 0) / len(effectiveness_scores) * 100,
                'reports': group_reports
            }
            
            if len(effectiveness_scores) > 1:
                comparison[group_name]['std_deviation'] = statistics.stdev(effectiveness_scores)
            else:
                comparison[group_name]['std_deviation'] = 0
        
        # 排序并添加排名
        sorted_groups = sorted(comparison.items(), 
                             key=lambda x: x[1]['average_effectiveness'], 
                             reverse=True)
        
        for i, (group_name, stats) in enumerate(sorted_groups, 1):
            comparison[group_name]['rank'] = i
        
        return comparison
    
    async def predict_future_improvements(self, effect_reports: List[EffectReport],
                                        prediction_horizon: int = 3) -> Dict[str, Any]:
        """预测未来改进效果"""
        if len(effect_reports) < 3:
            return {'error': '需要至少3个历史数据点进行预测'}
        
        # 提取时间序列数据
        sorted_reports = sorted(effect_reports, key=lambda x: x.improvement_id)
        effectiveness_scores = [r.overall_effectiveness for r in sorted_reports]
        
        # 简单线性预测
        x_values = list(range(len(effectiveness_scores)))
        slope = self._calculate_slope(x_values, effectiveness_scores)
        intercept = statistics.mean(effectiveness_scores) - slope * statistics.mean(x_values)
        
        # 生成预测
        predictions = []
        for i in range(1, prediction_horizon + 1):
            next_x = len(effectiveness_scores) + i - 1
            predicted_value = slope * next_x + intercept
            predictions.append(max(0, min(100, predicted_value)))  # 限制在合理范围内
        
        # 计算预测置信度
        if len(effectiveness_scores) > 2:
            residuals = []
            for i, actual in enumerate(effectiveness_scores):
                predicted = slope * i + intercept
                residuals.append(abs(actual - predicted))
            
            avg_error = statistics.mean(residuals)
            confidence = max(0, min(1, 1 - (avg_error / 100)))  # 基于平均误差计算置信度
        else:
            confidence = 0.5
        
        return {
            'predictions': predictions,
            'confidence': confidence,
            'trend_slope': slope,
            'prediction_horizon': prediction_horizon,
            'base_effectiveness': effectiveness_scores[-1] if effectiveness_scores else 0
        }
    
    async def generate_improvement_insights(self, effect_reports: List[EffectReport]) -> List[str]:
        """生成改进洞察"""
        insights = []
        
        if not effect_reports:
            return ["无足够数据生成洞察"]
        
        # 效果分布洞察
        effectiveness_scores = [r.overall_effectiveness for r in effect_reports]
        avg_effectiveness = statistics.mean(effectiveness_scores)
        
        if avg_effectiveness > 30:
            insights.append(f"改进效果整体良好，平均效果达到{avg_effectiveness:.1f}%")
        elif avg_effectiveness > 10:
            insights.append(f"改进效果中等，平均效果为{avg_effectiveness:.1f}%，仍有提升空间")
        else:
            insights.append(f"改进效果较低，平均效果仅为{avg_effectiveness:.1f}%，需要重新评估策略")
        
        # 一致性洞察
        if len(effectiveness_scores) > 1:
            std_dev = statistics.stdev(effectiveness_scores)
            if std_dev < 10:
                insights.append("改进效果一致性高，说明改进方法稳定可靠")
            elif std_dev > 25:
                insights.append("改进效果差异较大，建议分析成功案例的关键因素")
        
        # 成功率洞察
        success_count = sum(1 for score in effectiveness_scores if score > 5)
        success_rate = success_count / len(effectiveness_scores) * 100
        
        if success_rate > 80:
            insights.append(f"改进成功率高达{success_rate:.1f}%，改进策略有效")
        elif success_rate < 50:
            insights.append(f"改进成功率仅为{success_rate:.1f}%，需要优化改进方法")
        
        # 最佳实践洞察
        best_report = max(effect_reports, key=lambda x: x.overall_effectiveness)
        if best_report.overall_effectiveness > 50:
            insights.append(f"最佳改进案例效果达到{best_report.overall_effectiveness:.1f}%，可作为标杆学习")
        
        # 改进指标洞察
        all_improvements = {}
        for report in effect_reports:
            for metric, improvement in report.improvement_percentage.items():
                if metric not in all_improvements:
                    all_improvements[metric] = []
                all_improvements[metric].append(improvement)
        
        # 找出改进效果最显著的指标
        metric_averages = {metric: statistics.mean(improvements) 
                          for metric, improvements in all_improvements.items() 
                          if len(improvements) > 0}
        
        if metric_averages:
            best_metric = max(metric_averages.items(), key=lambda x: x[1])
            if best_metric[1] > 20:
                insights.append(f"'{best_metric[0]}'指标改进效果最显著，平均提升{best_metric[1]:.1f}%")
        
        return insights