import sys
import os
from typing import Dict, List, Optional, Tuple
from datetime import datetime, timedelta
import statistics
import json
# 添加上级目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from .DataModels import UserPerformanceMetrics, QuestionDifficultyMetrics
from .UserEvaluation import UserEvaluator
from .QuestionEvaluation import QuestionEvaluator
import DataCollectionTools as DCTools
import BehaviorAnalysisTools as BATools
class CompetitionSummarizer:
    """比赛总结器"""
    def __init__(self, headers: dict = None):
        """
        初始化比赛总结器
        Args:
            headers (dict, optional): API请求头
        """
        self.headers = headers
        self.user_evaluator = UserEvaluator(headers)
        self.question_evaluator = QuestionEvaluator(headers)
        self.competition_cache = {}
        # 比赛质量评价标准
        self.quality_thresholds = {
            "participation": {"excellent": 0.8, "good": 0.6, "fair": 0.4},
            "completion": {"excellent": 0.7, "good": 0.5, "fair": 0.3},
            "difficulty_balance": {"excellent": 85, "good": 70, "fair": 55},
            "engagement": {"excellent": 80, "good": 60, "fair": 40}
        }
    def generate_competition_summary(self, competition_id: str = None) -> Dict:
        """
        生成比赛总结报告
        Args:
            competition_id (str, optional): 比赛ID，如果为None则分析整体表现
        Returns:
            Dict: 比赛总结报告
        """
        summary = {
            "competition_id": competition_id,
            "generation_time": datetime.now().isoformat(),
            "overview": {},
            "participant_analysis": {},
            "question_analysis": {},
            "performance_insights": {},
            "rankings": {},
            "recommendations": []
        }
        # 获取参赛者信息
        participants = self.get_participants(competition_id)
        questions = self.get_competition_questions(competition_id)
        # 基础统计
        summary["overview"] = self.calculate_overview_statistics(participants, questions)
        # 参赛者分析
        summary["participant_analysis"] = self.analyze_participants(participants)
        # 题目分析
        summary["question_analysis"] = self.analyze_competition_questions(questions)
        # 表现洞察
        summary["performance_insights"] = self.generate_performance_insights(participants, questions)
        # 排名分析
        summary["rankings"] = self.generate_rankings(participants)
        # 改进建议
        summary["recommendations"] = self.generate_competition_recommendations(summary)
        return summary
    def get_participants(self, competition_id: str = None) -> List[Dict]:
        """
        获取参赛者信息
        Args:
            competition_id (str, optional): 比赛ID
        Returns:
            List[Dict]: 参赛者信息列表
        """
        try:
            user_info = DCTools.get_user_information(headers=self.headers)
            participants = []
            for user in user_info:
                user_id = user.get("user_id")
                if user_id:
                    # 计算用户指标
                    metrics = self.user_evaluator.calculate_user_metrics(user_id)
                    participant_data = {
                        "user_id": user_id,
                        "metrics": metrics,
                        "competition_specific": self.get_competition_specific_data(user_id, competition_id)
                    }
                    participants.append(participant_data)
            return participants
        except Exception as e:
            print(f"获取参赛者信息失败: {e}")
            return []
    def get_competition_questions(self, competition_id: str = None) -> List[Dict]:
        """
        获取比赛题目信息
        Args:
            competition_id (str, optional): 比赛ID
        Returns:
            List[Dict]: 题目信息列表
        """
        try:
            question_list = DCTools.get_question_list(headers=self.headers)
            questions = []
            for question in question_list:
                question_id = question.get("question_id")
                if question_id:
                    # 计算题目难度指标
                    metrics = self.question_evaluator.calculate_question_difficulty(question_id)
                    question_data = {
                        "question_id": question_id,
                        "metrics": metrics,
                        "competition_specific": self.get_question_competition_data(question_id, competition_id)
                    }
                    questions.append(question_data)
            return questions
        except Exception as e:
            print(f"获取题目信息失败: {e}")
            return []
    def get_competition_specific_data(self, user_id: str, competition_id: str = None) -> Dict:
        """
        获取用户在特定比赛中的数据
        Args:
            user_id (str): 用户ID
            competition_id (str, optional): 比赛ID
        Returns:
            Dict: 比赛特定数据
        """
        # 这里可以根据实际比赛系统的API来获取比赛特定数据
        # 当前实现为占位符
        return {
            "start_time": None,
            "end_time": None,
            "submission_count": 0,
            "first_solve_time": None,
            "last_solve_time": None
        }
    def get_question_competition_data(self, question_id: str, competition_id: str = None) -> Dict:
        """
        获取题目在特定比赛中的数据
        Args:
            question_id (str): 题目ID
            competition_id (str, optional): 比赛ID
        Returns:
            Dict: 题目比赛数据
        """
        # 这里可以根据实际比赛系统的API来获取题目比赛数据
        # 当前实现为占位符
        return {
            "first_solve_time": None,
            "solve_count": 0,
            "attempt_count": 0,
            "category": "unknown"
        }
    def calculate_overview_statistics(self, participants: List[Dict], questions: List[Dict]) -> Dict:
        """
        计算概览统计
        Args:
            participants (List[Dict]): 参赛者信息
            questions (List[Dict]): 题目信息
        Returns:
            Dict: 概览统计
        """
        overview = {
            "total_participants": len(participants),
            "total_questions": len(questions),
            "overall_stats": {},
            "participation_stats": {},
            "question_stats": {}
        }
        if not participants:
            return overview
        # 计算整体统计
        solve_rates = []
        overall_scores = []
        total_attempts = 0
        total_solved = 0
        for participant in participants:
            metrics = participant["metrics"]
            solve_rates.append(metrics.solve_rate)
            overall_scores.append(metrics.overall_score)
            total_attempts += metrics.total_attempts
            total_solved += metrics.solved_questions
        overview["overall_stats"] = {
            "average_solve_rate": statistics.mean(solve_rates) * 100,
            "median_solve_rate": statistics.median(solve_rates) * 100,
            "average_overall_score": statistics.mean(overall_scores),
            "median_overall_score": statistics.median(overall_scores),
            "total_attempts": total_attempts,
            "total_solved": total_solved,
            "average_attempts_per_user": total_attempts / len(participants)
        }
        # 参与统计
        active_participants = sum(1 for p in participants if p["metrics"].total_attempts > 0)
        solving_participants = sum(1 for p in participants if p["metrics"].solved_questions > 0)
        overview["participation_stats"] = {
            "active_participants": active_participants,
            "solving_participants": solving_participants,
            "participation_rate": active_participants / len(participants) * 100,
            "solving_rate": solving_participants / len(participants) * 100
        }
        # 题目统计
        if questions:
            difficulty_distribution = {"Easy": 0, "Medium": 0, "Hard": 0, "Expert": 0}
            pass_rates = []
            for question in questions:
                metrics = question["metrics"]
                difficulty_distribution[metrics.difficulty_level] += 1
                pass_rates.append(metrics.pass_rate)
            overview["question_stats"] = {
                "difficulty_distribution": difficulty_distribution,
                "average_pass_rate": statistics.mean(pass_rates) * 100,
                "median_pass_rate": statistics.median(pass_rates) * 100,
                "most_common_difficulty": max(difficulty_distribution, key=difficulty_distribution.get)
            }
        return overview
    def analyze_participants(self, participants: List[Dict]) -> Dict:
        """
        分析参赛者表现
        Args:
            participants (List[Dict]): 参赛者信息
        Returns:
            Dict: 参赛者分析结果
        """
        analysis = {
            "performance_distribution": {},
            "skill_analysis": {},
            "engagement_analysis": {},
            "top_performers": [],
            "struggling_users": []
        }
        if not participants:
            return analysis
        # 性能分布分析
        performance_levels = {"A": 0, "B": 0, "C": 0, "D": 0}
        dimension_scores = {
            "efficiency": [],
            "consistency": [],
            "writeup_quality": [],
            "technical_skill": [],
            "progress_velocity": []
        }
        for participant in participants:
            metrics = participant["metrics"]
            # 性能等级分布
            grade = self.user_evaluator.get_performance_grade(metrics.overall_score)
            grade_letter = grade[0]  # 取A、B、C、D等级
            if grade_letter in performance_levels:
                performance_levels[grade_letter] += 1
            # 维度分数收集
            dimension_scores["efficiency"].append(metrics.efficiency_score)
            dimension_scores["consistency"].append(metrics.consistency_score)
            dimension_scores["writeup_quality"].append(metrics.writeup_quality_score)
            dimension_scores["technical_skill"].append(metrics.technical_skill_score)
            dimension_scores["progress_velocity"].append(metrics.progress_velocity_score)
        analysis["performance_distribution"] = performance_levels
        # 技能分析
        skill_stats = {}
        for dimension, scores in dimension_scores.items():
            skill_stats[dimension] = {
                "average": statistics.mean(scores),
                "median": statistics.median(scores),
                "std": statistics.stdev(scores) if len(scores) > 1 else 0,
                "min": min(scores),
                "max": max(scores)
            }
        analysis["skill_analysis"] = skill_stats
        # 参与度分析
        attempt_counts = [p["metrics"].total_attempts for p in participants]
        solve_counts = [p["metrics"].solved_questions for p in participants]
        analysis["engagement_analysis"] = {
            "average_attempts": statistics.mean(attempt_counts),
            "median_attempts": statistics.median(attempt_counts),
            "average_solves": statistics.mean(solve_counts),
            "median_solves": statistics.median(solve_counts),
            "high_engagement_users": sum(1 for count in attempt_counts if count > statistics.mean(attempt_counts) * 1.5),
            "low_engagement_users": sum(1 for count in attempt_counts if count < statistics.mean(attempt_counts) * 0.5)
        }
        # 顶尖表现者
        top_performers = sorted(participants, key=lambda p: p["metrics"].overall_score, reverse=True)[:5]
        analysis["top_performers"] = [
            {
                "user_id": p["user_id"],
                "overall_score": round(p["metrics"].overall_score, 2),
                "solve_rate": round(p["metrics"].solve_rate * 100, 2),
                "grade": self.user_evaluator.get_performance_grade(p["metrics"].overall_score)
            }
            for p in top_performers
        ]
        # 需要帮助的用户
        struggling_users = [p for p in participants if p["metrics"].overall_score < 40 or p["metrics"].solve_rate < 0.2]
        analysis["struggling_users"] = [
            {
                "user_id": p["user_id"],
                "overall_score": round(p["metrics"].overall_score, 2),
                "solve_rate": round(p["metrics"].solve_rate * 100, 2),
                "main_issues": self.identify_main_issues(p["metrics"])
            }
            for p in struggling_users
        ]
        return analysis
    def analyze_competition_questions(self, questions: List[Dict]) -> Dict:
        """
        分析比赛题目
        Args:
            questions (List[Dict]): 题目信息
        Returns:
            Dict: 题目分析结果
        """
        analysis = {
            "difficulty_balance": {},
            "question_performance": {},
            "problematic_questions": [],
            "well_designed_questions": [],
            "recommendations": []
        }
        if not questions:
            return analysis
        # 难度平衡分析
        question_ids = [q["question_id"] for q in questions]
        difficulty_analysis = self.question_evaluator.get_difficulty_distribution(question_ids)
        analysis["difficulty_balance"] = difficulty_analysis
        # 题目表现分析
        pass_rates = []
        attempt_averages = []
        difficulty_scores = []
        for question in questions:
            metrics = question["metrics"]
            pass_rates.append(metrics.pass_rate)
            attempt_averages.append(metrics.average_attempts)
            difficulty_scores.append(metrics.difficulty_score)
        analysis["question_performance"] = {
            "average_pass_rate": statistics.mean(pass_rates) * 100,
            "median_pass_rate": statistics.median(pass_rates) * 100,
            "pass_rate_std": statistics.stdev(pass_rates) * 100 if len(pass_rates) > 1 else 0,
            "average_attempts": statistics.mean(attempt_averages),
            "median_attempts": statistics.median(attempt_averages),
            "difficulty_variance": statistics.variance(difficulty_scores) if len(difficulty_scores) > 1 else 0
        }
        # 问题题目识别
        problematic_questions = []
        well_designed_questions = []
        for question in questions:
            metrics = question["metrics"]
            issues = []
            strengths = []
            # 识别问题
            if metrics.pass_rate < 0.05:
                issues.append("通过率过低")
            if metrics.pass_rate > 0.95:
                issues.append("通过率过高")
            if metrics.average_attempts > 10:
                issues.append("平均尝试次数过多")
            if metrics.completion_rate < 0.2:
                issues.append("完成率过低")
            # 识别优势
            if 0.2 <= metrics.pass_rate <= 0.8:
                strengths.append("通过率适中")
            if 1.5 <= metrics.average_attempts <= 4:
                strengths.append("尝试次数合理")
            if metrics.completion_rate > 0.6:
                strengths.append("完成率良好")
            if issues:
                problematic_questions.append({
                    "question_id": question["question_id"],
                    "difficulty_level": metrics.difficulty_level,
                    "issues": issues,
                    "pass_rate": round(metrics.pass_rate * 100, 2),
                    "average_attempts": round(metrics.average_attempts, 2)
                })
            if strengths and not issues:
                well_designed_questions.append({
                    "question_id": question["question_id"],
                    "difficulty_level": metrics.difficulty_level,
                    "strengths": strengths,
                    "pass_rate": round(metrics.pass_rate * 100, 2),
                    "average_attempts": round(metrics.average_attempts, 2)
                })
        analysis["problematic_questions"] = problematic_questions
        analysis["well_designed_questions"] = well_designed_questions
        return analysis
    def generate_performance_insights(self, participants: List[Dict], questions: List[Dict]) -> Dict:
        """
        生成表现洞察
        Args:
            participants (List[Dict]): 参赛者信息
            questions (List[Dict]): 题目信息
        Returns:
            Dict: 表现洞察
        """
        insights = {
            "key_findings": [],
            "trends": {},
            "correlations": {},
            "success_factors": [],
            "improvement_areas": []
        }
        if not participants or not questions:
            return insights
        # 关键发现
        key_findings = []
        # 参与度分析
        active_rate = sum(1 for p in participants if p["metrics"].total_attempts > 0) / len(participants)
        if active_rate < 0.5:
            key_findings.append(f"参与度较低，只有{active_rate:.1%}的用户有实际尝试")
        elif active_rate > 0.8:
            key_findings.append(f"参与度很高，{active_rate:.1%}的用户都有积极尝试")
        # 解题率分析
        solve_rates = [p["metrics"].solve_rate for p in participants]
        avg_solve_rate = statistics.mean(solve_rates)
        if avg_solve_rate < 0.3:
            key_findings.append(f"整体解题率较低({avg_solve_rate:.1%})，题目可能过于困难")
        elif avg_solve_rate > 0.7:
            key_findings.append(f"整体解题率很高({avg_solve_rate:.1%})，题目难度适中")
        # 难度分布分析
        difficulty_dist = {}
        for question in questions:
            level = question["metrics"].difficulty_level
            difficulty_dist[level] = difficulty_dist.get(level, 0) + 1
        if difficulty_dist.get("Easy", 0) > len(questions) * 0.5:
            key_findings.append("简单题目占比过高，可能缺乏挑战性")
        elif difficulty_dist.get("Expert", 0) > len(questions) * 0.3:
            key_findings.append("专家级题目较多，对初学者不够友好")
        insights["key_findings"] = key_findings
        # 趋势分析
        efficiency_scores = [p["metrics"].efficiency_score for p in participants]
        consistency_scores = [p["metrics"].consistency_score for p in participants]
        technical_scores = [p["metrics"].technical_skill_score for p in participants]
        insights["trends"] = {
            "efficiency_trend": self.analyze_score_trend(efficiency_scores),
            "consistency_trend": self.analyze_score_trend(consistency_scores),
            "technical_trend": self.analyze_score_trend(technical_scores)
        }
        # 相关性分析
        insights["correlations"] = self.analyze_correlations(participants)
        # 成功因素识别
        top_performers = sorted(participants, key=lambda p: p["metrics"].overall_score, reverse=True)[:len(participants)//4]
        success_factors = []
        if top_performers:
            avg_efficiency = statistics.mean([p["metrics"].efficiency_score for p in top_performers])
            avg_consistency = statistics.mean([p["metrics"].consistency_score for p in top_performers])
            avg_technical = statistics.mean([p["metrics"].technical_skill_score for p in top_performers])
            if avg_efficiency > 80:
                success_factors.append("高效率解题能力")
            if avg_consistency > 75:
                success_factors.append("稳定的表现一致性")
            if avg_technical > 85:
                success_factors.append("强大的技术能力")
        insights["success_factors"] = success_factors
        # 改进领域识别
        improvement_areas = []
        all_efficiency = [p["metrics"].efficiency_score for p in participants]
        all_consistency = [p["metrics"].consistency_score for p in participants]
        all_writeup = [p["metrics"].writeup_quality_score for p in participants]
        if statistics.mean(all_efficiency) < 60:
            improvement_areas.append("解题效率需要提升")
        if statistics.mean(all_consistency) < 55:
            improvement_areas.append("表现一致性有待改善")
        if statistics.mean(all_writeup) < 50:
            improvement_areas.append("题解质量需要加强")
        insights["improvement_areas"] = improvement_areas
        return insights
    def analyze_score_trend(self, scores: List[float]) -> Dict:
        """
        分析分数趋势
        Args:
            scores (List[float]): 分数列表
        Returns:
            Dict: 趋势分析结果
        """
        if not scores:
            return {"trend": "无数据", "average": 0, "distribution": {}}
        avg_score = statistics.mean(scores)
        # 分数分布
        distribution = {"低分(0-40)": 0, "中等(40-70)": 0, "高分(70-100)": 0}
        for score in scores:
            if score < 40:
                distribution["低分(0-40)"] += 1
            elif score < 70:
                distribution["中等(40-70)"] += 1
            else:
                distribution["高分(70-100)"] += 1
        # 趋势判断
        if avg_score >= 75:
            trend = "优秀"
        elif avg_score >= 60:
            trend = "良好"
        elif avg_score >= 45:
            trend = "一般"
        else:
            trend = "需要改进"
        return {
            "trend": trend,
            "average": round(avg_score, 2),
            "distribution": distribution,
            "std": statistics.stdev(scores) if len(scores) > 1 else 0
        }
    def analyze_correlations(self, participants: List[Dict]) -> Dict:
        """
        分析参赛者指标间的相关性
        Args:
            participants (List[Dict]): 参赛者信息
        Returns:
            Dict: 相关性分析结果
        """
        if len(participants) < 3:
            return {"correlations": [], "insights": []}
        # 提取各维度数据
        efficiency_scores = [p["metrics"].efficiency_score for p in participants]
        consistency_scores = [p["metrics"].consistency_score for p in participants]
        technical_scores = [p["metrics"].technical_skill_score for p in participants]
        writeup_scores = [p["metrics"].writeup_quality_score for p in participants]
        overall_scores = [p["metrics"].overall_score for p in participants]
        correlations = []
        insights = []
        # 计算关键相关性（简化的皮尔逊相关系数）
        def simple_correlation(x, y):
            if len(x) != len(y) or len(x) < 2:
                return 0
            n = len(x)
            sum_x = sum(x)
            sum_y = sum(y)
            sum_xy = sum(x[i] * y[i] for i in range(n))
            sum_x2 = sum(x[i] ** 2 for i in range(n))
            sum_y2 = sum(y[i] ** 2 for i in range(n))
            denominator = ((n * sum_x2 - sum_x ** 2) * (n * sum_y2 - sum_y ** 2)) ** 0.5
            if denominator == 0:
                return 0
            return (n * sum_xy - sum_x * sum_y) / denominator
        # 效率与技术能力相关性
        efficiency_technical_corr = simple_correlation(efficiency_scores, technical_scores)
        correlations.append({
            "variables": ["效率能力", "技术能力"],
            "correlation": round(efficiency_technical_corr, 3),
            "strength": self.interpret_correlation(efficiency_technical_corr)
        })
        # 一致性与整体表现相关性
        consistency_overall_corr = simple_correlation(consistency_scores, overall_scores)
        correlations.append({
            "variables": ["一致性表现", "整体表现"],
            "correlation": round(consistency_overall_corr, 3),
            "strength": self.interpret_correlation(consistency_overall_corr)
        })
        # 生成洞察
        if abs(efficiency_technical_corr) > 0.5:
            insights.append(f"效率能力与技术能力{'正相关' if efficiency_technical_corr > 0 else '负相关'}性较强")
        if abs(consistency_overall_corr) > 0.6:
            insights.append(f"一致性表现对整体表现有重要影响")
        return {
            "correlations": correlations,
            "insights": insights
        }
    def interpret_correlation(self, corr: float) -> str:
        """解释相关性强度"""
        abs_corr = abs(corr)
        if abs_corr >= 0.7:
            return "强相关"
        elif abs_corr >= 0.3:
            return "中等相关"
        elif abs_corr >= 0.1:
            return "弱相关"
        else:
            return "无相关"
    def generate_rankings(self, participants: List[Dict]) -> Dict:
        """
        生成排名分析
        Args:
            participants (List[Dict]): 参赛者信息
        Returns:
            Dict: 排名分析结果
        """
        rankings = {
            "overall_ranking": [],
            "dimension_rankings": {},
            "ranking_insights": []
        }
        if not participants:
            return rankings
        # 更新排名
        for i, participant in enumerate(participants):
            participant["metrics"].performance_rank = i + 1
        # 整体排名
        overall_sorted = sorted(participants, key=lambda p: p["metrics"].overall_score, reverse=True)
        rankings["overall_ranking"] = [
            {
                "rank": i + 1,
                "user_id": p["user_id"],
                "overall_score": round(p["metrics"].overall_score, 2),
                "grade": self.user_evaluator.get_performance_grade(p["metrics"].overall_score),
                "solve_rate": round(p["metrics"].solve_rate * 100, 2)
            }
            for i, p in enumerate(overall_sorted)
        ]
        # 各维度排名
        dimensions = ["efficiency_score", "consistency_score", "writeup_quality_score", 
                     "technical_skill_score", "progress_velocity_score"]
        dimension_names = ["效率能力", "一致性表现", "题解质量", "技术能力", "进步速度"]
        for dim, dim_name in zip(dimensions, dimension_names):
            dim_sorted = sorted(participants, key=lambda p: getattr(p["metrics"], dim), reverse=True)
            rankings["dimension_rankings"][dim_name] = [
                {
                    "rank": i + 1,
                    "user_id": p["user_id"],
                    "score": round(getattr(p["metrics"], dim), 2)
                }
                for i, p in enumerate(dim_sorted[:10])  # 只显示前10名
            ]
        # 排名洞察
        insights = []
        # 分析前三名的特征
        top_3 = overall_sorted[:3]
        if len(top_3) >= 3:
            top_3_avg_efficiency = statistics.mean([p["metrics"].efficiency_score for p in top_3])
            top_3_avg_technical = statistics.mean([p["metrics"].technical_skill_score for p in top_3])
            if top_3_avg_efficiency > 85:
                insights.append("前三名在效率能力上表现突出")
            if top_3_avg_technical > 80:
                insights.append("前三名具备强大的技术能力")
        # 分析排名分布
        if len(participants) >= 10:
            top_10_pct = len([p for p in participants if p["metrics"].overall_score >= 80]) / len(participants) * 100
            if top_10_pct > 20:
                insights.append(f"高分用户占比较高({top_10_pct:.1f}%)，整体水平不错")
            elif top_10_pct < 5:
                insights.append(f"高分用户占比较低({top_10_pct:.1f}%)，整体仍有提升空间")
        rankings["ranking_insights"] = insights
        return rankings
    def identify_main_issues(self, metrics: UserPerformanceMetrics) -> List[str]:
        """
        识别用户主要问题
        Args:
            metrics (UserPerformanceMetrics): 用户指标
        Returns:
            List[str]: 主要问题列表
        """
        issues = []
        if metrics.efficiency_score < 40:
            issues.append("解题效率低")
        if metrics.consistency_score < 40:
            issues.append("表现不稳定")
        if metrics.writeup_quality_score < 30:
            issues.append("题解质量差")
        if metrics.technical_skill_score < 35:
            issues.append("技术能力不足")
        if metrics.progress_velocity_score < 30:
            issues.append("学习进步缓慢")
        if metrics.solve_rate < 0.2:
            issues.append("解题成功率低")
        return issues
    def generate_competition_recommendations(self, summary: Dict) -> List[Dict]:
        """
        生成比赛改进建议
        Args:
            summary (Dict): 比赛总结
        Returns:
            List[Dict]: 改进建议列表
        """
        recommendations = []
        # 基于概览统计的建议
        overview = summary.get("overview", {})
        overall_stats = overview.get("overall_stats", {})
        participation_stats = overview.get("participation_stats", {})
        # 参与度建议
        if participation_stats.get("participation_rate", 0) < 60:
            recommendations.append({
                "category": "参与度",
                "priority": "高",
                "issue": "用户参与度较低",
                "suggestion": "增加题目吸引力，提供更清晰的指导，考虑设置激励机制",
                "expected_impact": "提高用户参与度和活跃度"
            })
        # 解题率建议
        if overall_stats.get("average_solve_rate", 0) < 30:
            recommendations.append({
                "category": "难度调整",
                "priority": "高",
                "issue": "整体解题率过低",
                "suggestion": "降低题目难度，增加提示信息，提供更多学习资源",
                "expected_impact": "提高解题成功率和用户满意度"
            })
        elif overall_stats.get("average_solve_rate", 0) > 80:
            recommendations.append({
                "category": "难度调整",
                "priority": "中",
                "issue": "整体解题率过高",
                "suggestion": "增加题目难度，添加更多挑战性题目",
                "expected_impact": "提高竞争性和挑战性"
            })
        # 基于题目分析的建议
        question_analysis = summary.get("question_analysis", {})
        difficulty_balance = question_analysis.get("difficulty_balance", {})
        balance_info = difficulty_balance.get("balance_score", 0)
        if balance_info < 70:
            recommendations.append({
                "category": "题目设计",
                "priority": "中",
                "issue": "题目难度分布不均衡",
                "suggestion": "调整题目难度分布，确保各难度等级的合理比例",
                "expected_impact": "改善用户体验，适应不同水平的参赛者"
            })
        # 基于问题题目的建议
        problematic_questions = question_analysis.get("problematic_questions", [])
        if len(problematic_questions) > len(summary.get("questions", [])) * 0.2:
            recommendations.append({
                "category": "题目质量",
                "priority": "高",
                "issue": f"发现{len(problematic_questions)}道问题题目",
                "suggestion": "审查并改进问题题目，修复通过率异常和用户体验问题",
                "expected_impact": "提高题目质量和用户满意度"
            })
        # 基于用户表现的建议
        participant_analysis = summary.get("participant_analysis", {})
        struggling_users = participant_analysis.get("struggling_users", [])
        if len(struggling_users) > len(summary.get("participants", [])) * 0.3:
            recommendations.append({
                "category": "用户支持",
                "priority": "中",
                "issue": "较多用户表现不佳",
                "suggestion": "提供更多学习资源、教程和个性化指导",
                "expected_impact": "帮助更多用户提升能力和参与度"
            })
        # 基于技能分析的建议
        skill_analysis = participant_analysis.get("skill_analysis", {})
        for dimension, stats in skill_analysis.items():
            if stats.get("average", 0) < 50:
                dimension_names = {
                    "efficiency": "效率能力",
                    "consistency": "一致性表现",
                    "writeup_quality": "题解质量",
                    "technical_skill": "技术能力",
                    "progress_velocity": "进步速度"
                }
                dim_name = dimension_names.get(dimension, dimension)
                recommendations.append({
                    "category": "技能提升",
                    "priority": "中",
                    "issue": f"用户在{dim_name}方面表现不佳",
                    "suggestion": f"针对{dim_name}提供专项训练和指导",
                    "expected_impact": f"提升用户{dim_name}水平"
                })
        # 按优先级排序
        priority_order = {"高": 3, "中": 2, "低": 1}
        recommendations.sort(key=lambda x: priority_order.get(x["priority"], 0), reverse=True)
        return recommendations
    def export_competition_report(self, summary: Dict, format: str = "json") -> str:
        """
        导出比赛报告
        Args:
            summary (Dict): 比赛总结
            format (str): 导出格式 ("json", "text")
        Returns:
            str: 导出的报告内容
        """
        if format == "json":
            return json.dumps(summary, indent=2, ensure_ascii=False)
        elif format == "text":
            return self.format_text_report(summary)
        else:
            raise ValueError("不支持的导出格式")
    def format_text_report(self, summary: Dict) -> str:
        """
        格式化文本报告
        Args:
            summary (Dict): 比赛总结
        Returns:
            str: 格式化的文本报告
        """
        report = []
        report.append("=" * 60)
        report.append("比赛总结报告")
        report.append("=" * 60)
        report.append(f"生成时间: {summary.get('generation_time', 'N/A')}")
        report.append(f"比赛ID: {summary.get('competition_id', 'N/A')}")
        report.append("")
        # 概览信息
        overview = summary.get("overview", {})
        report.append("## 概览统计")
        report.append(f"总参赛者: {overview.get('total_participants', 0)}")
        report.append(f"总题目数: {overview.get('total_questions', 0)}")
        overall_stats = overview.get("overall_stats", {})
        if overall_stats:
            report.append(f"平均解题率: {overall_stats.get('average_solve_rate', 0):.1f}%")
            report.append(f"平均总分: {overall_stats.get('average_overall_score', 0):.1f}")
            report.append(f"总尝试次数: {overall_stats.get('total_attempts', 0)}")
        report.append("")
        # 排名信息
        rankings = summary.get("rankings", {})
        overall_ranking = rankings.get("overall_ranking", [])
        if overall_ranking:
            report.append("## 前10名排名")
            for i, user in enumerate(overall_ranking[:10]):
                report.append(f"{i+1}. 用户{user['user_id']} - {user['overall_score']:.1f}分 ({user['grade']})")
        report.append("")
        # 改进建议
        recommendations = summary.get("recommendations", [])
        if recommendations:
            report.append("## 改进建议")
            for i, rec in enumerate(recommendations):
                report.append(f"{i+1}. 【{rec['category']}】{rec['issue']}")
                report.append(f"   建议: {rec['suggestion']}")
                report.append(f"   预期影响: {rec['expected_impact']}")
                report.append("")
        return "\n".join(report)
