import re
import sys
import os
sys.path.append("/home/fepi/JudgeAgent")
import DataCollectionTools as DCTools
from datetime import datetime, timedelta
from typing import Optional, List
import json
import google.generativeai as genai
from dotenv import load_dotenv
def get_question_ac_status(user_id:str, question_id:str, headers:dict=None) -> Optional[dict]:
    """
    Check if a specific question is accepted by a user.
    Args:
        user_id (str): The ID of the user.
        question_id (str): The ID of the question.
        headers (dict, optional): Headers to include in the request.
    Returns:
        dict: The submission object if accepted, None otherwise.
    """
    if not user_id or not question_id:
        return {
            "user_id": user_id,
            "question_id": question_id,
            "is_ac": False,
            "ac_time": None
        }
    param = {
        "user_id": user_id,
        "question_id": question_id
    }
    try:
        ac_time = None
        submissions = DCTools.get_submissions(params=param, headers=headers)
        if not submissions:
            return {
                "user_id": user_id,
                "question_id": question_id,
                "is_ac": False,
                "ac_time": None
            }
        for submission in submissions:
            if submission.get("is_correct", 0) == 1:
                submission_time = submission.get("submission_time", None)
                if submission_time:
                    if ac_time is None:
                        ac_time = submission_time
                    else:
                        ac_time = min(ac_time, submission_time)
        if ac_time is not None:
            try:
                ac_time = datetime.fromisoformat(ac_time)
                return {
                    "user_id": user_id,
                    "question_id": question_id,
                    "is_ac": True,
                    "ac_time": ac_time
                }
            except ValueError as e:
                print(f"Error parsing datetime {ac_time}: {e}")
                return {
                    "user_id": user_id,
                    "question_id": question_id,
                    "is_ac": False,
                    "ac_time": None
                }    
    except Exception as e:
        print(f"Error in get_question_ac_status for user {user_id}, question {question_id}: {e}")
    return {
        "user_id": user_id,
        "question_id": question_id,
        "is_ac": False,
        "ac_time": None
    }
def get_question_resolution_time(user_id:str,question_id:str,headers:dict=None) -> Optional[dict]:
    """
    Get the resolution time for a specific question by a user.
    Args:
        user_id (str): The ID of the user.
        question_id (str): The ID of the question.
        headers (dict, optional): Headers to include in the request.
    Returns:
        dict: The resolution time information of the question for the user, or None if not found.
    """
    if not user_id or not question_id:
        return None   
    try:
        param = {
            "user_id": user_id,
            "question_id": question_id
        }  
        # 获取开始时间
        start_time = DCTools.get_start_task_time(params=param, headers=headers)
        if not start_time:
            return None
        start_time = datetime.fromisoformat(start_time)
        # 检查是否AC
        ac_status = get_question_ac_status(user_id, question_id, headers)
        if not ac_status.get("is_ac", False):
            return {
                "user_id": user_id,
                "question_id": question_id,
                "resolution_time": None,
                "is_solved": False
            }
        ac_time = ac_status.get("ac_time", None)
        if not ac_time:
            return {
                "user_id": user_id,
                "question_id": question_id,
                "resolution_time": None,
                "is_solved": False
            }
        resolution_time = ac_time - start_time
        # 确保解题时间为正数
        if resolution_time.total_seconds() < 0:
            print(f"Warning: Negative resolution time for user {user_id}, question {question_id}")
            return {
                "user_id": user_id,
                "question_id": question_id,
                "resolution_time": None,
                "is_solved": False
            }
        return {
            "user_id": user_id,
            "question_id": question_id,
            "resolution_time": resolution_time,
            "is_solved": True,
            "start_time": start_time,
            "ac_time": ac_time
        }
    except Exception as e:
        print(f"Error in get_question_resolution_time for user {user_id}, question {question_id}: {e}")
        return None
def get_question_attempt_count(user_id:str,question_id:str,headers:dict=None) -> int:
    """
    Get the number of attempts made by a user on a specific question.
    Args:
        user_id (str): The ID of the user.
        question_id (str): The ID of the question.
        headers (dict, optional): Headers to include in the request.
    Returns:
        int: The number of attempts made by the user on the question.
    """
    if not user_id or not question_id:
        return 0
    try:
        param = {
            "user_id": user_id,
            "question_id": question_id
        }
        submissions = DCTools.get_submissions(params=param, headers=headers)
        return len(submissions) if submissions else 0
    except Exception as e:
        print(f"Error in get_question_attempt_count for user {user_id}, question {question_id}: {e}")
        return 0
def get_average_question_resolution_time(user_id:str,headers:dict=None) -> Optional[timedelta]:
    """
    Get the average resolution time for all questions by a user.
    Args:
        user_id (str): The ID of the user.
        headers (dict, optional): Headers to include in the request.
    Returns:
        timedelta: The average resolution time of all questions for the user, or None if not found.
    """
    if not user_id:
        return None
    try:
        all_resolution_times = []
        param = {
            "user_id": user_id
        }
        question_list = DCTools.get_question_list(params=param, headers=headers)
        if not question_list:
            return None
        for question in question_list:
            question_id = question.get("question_id", None)
            if question_id is None:
                continue
            resolution_info = get_question_resolution_time(user_id, question_id, headers)
            if resolution_info and resolution_info.get("is_solved", False):
                resolution_time = resolution_info.get("resolution_time", None)
                if resolution_time and resolution_time.total_seconds() > 0:
                    all_resolution_times.append(resolution_time)
        if not all_resolution_times:
            return None
        # 计算平均值
        total_seconds = sum(rt.total_seconds() for rt in all_resolution_times)
        average_seconds = total_seconds / len(all_resolution_times)
        average_resolution_time = timedelta(seconds=average_seconds)
        return average_resolution_time
    except Exception as e:
        print(f"Error in get_average_question_resolution_time for user {user_id}: {e}")
        return None
def get_question_pass_rate(question_id:str, headers:dict=None) -> float:
    """
    Get the pass rate for a specific question.
    Args:
        question_id (str): The ID of the question.
        headers (dict, optional): Headers to include in the request.
    Returns:
        float: The pass rate of the question, or 0.0 if no submissions found.
    """
    if not question_id:
        return 0.0
    try:
        ac_count = 0
        user_info = DCTools.get_user_information(params=None, headers=headers)
        if not user_info:
            return 0.0
        user_ids = [user.get("user_id") for user in user_info if user.get("user_id")]
        if not user_ids:
            return 0.0
        for user_id in user_ids:
            ac_status = get_question_ac_status(user_id, question_id, headers)
            if ac_status and ac_status.get("is_ac", False):
                ac_count += 1
        return ac_count / len(user_ids)
    except Exception as e:
        print(f"Error in get_question_pass_rate for question {question_id}: {e}")
        return 0.0
def get_writeup_report(user_id:str, question_id:str, headers:dict=None) -> dict:
    """
    Get the writeup report by the LLM for a specific question by a user.
    Args:
        user_id (str): The ID of the user.
        question_id (str): The ID of the question.
        headers (dict, optional): Headers to include in the request.
    Returns:
        dict: The writeup report object if available, otherwise an empty dict.
    """
    if not user_id or not question_id:
        return {}
    try:
        current_dir = os.path.dirname(__file__)
        project_root = os.path.abspath(os.path.join(current_dir, '..'))
        env_path = os.path.join(project_root, ".env")
        if not os.path.exists(env_path):
            print(f"Warning: .env file not found at {env_path}")
            return {} 
        load_dotenv(env_path)
        PROXY = os.getenv("PROXY")
        if PROXY:
            os.environ["http_proxy"] = PROXY
            os.environ["https_proxy"] = PROXY 
            os.environ["NO_PROXY"] = "127.0.0.1,localhost"
        API_KEY = os.getenv("GEMINI_API_KEY")
        if not API_KEY:
            print("Warning: GEMINI_API_KEY not found in environment variables")
            return {}
        genai.configure(api_key=API_KEY)
        prompt = """
            You are a cybersecurity expert tasked with evaluating a CTF (Capture The Flag) writeup.
            Given the following writeup text, analyze it and return a detailed report in JSON format.
            Be objective and only rely on factual content presented in the text. Do not make up missing details.
            The JSON object should contain the following fields:
            {
            "used_tools": [string],                        # List of tools used (e.g., "Burp Suite", "Hydra")
            "attack_chain": [string],                      # Ordered list of main attack steps taken
            "vulnerabilities": [                           # Key vulnerabilities exploited
                {
                "type": string,                            # e.g., "SQL Injection", "Access Control Bypass"
                "description": string                      # How the vulnerability was discovered and used
                }
            ],
            "errors": [                                    # Errors or issues encountered during exploitation
                {
                "description": string,                     # What went wrong
                "fix": string                              # How the issue was fixed
                }
            ],
            "technical_depth": integer (1–100),            # How technically detailed the writeup is
            "completeness": integer (1–100),               # How completely the steps were described
            "tool_explanation": integer (1–100),           # Whether the use of tools was clearly explained
            "language_clarity": integer (1–100),           # Grammar, structure, and readability
            "score": integer (1–100)                       # Final overall score
            }
            Please output strictly in JSON format only, no extra commentary or markdown.
            Writeup content:
        """
        judge_rule = [
            "Technical Depth (0-100): \
            90-100: Demonstrates multiple advanced techniques (e.g., chained exploits, WAF evasion, complex WebShell usage) with clear reasoning and deep understanding of security mechanisms. \
            70-89: Uses intermediate to advanced attack methods (e.g., SSRF, logic flaws) with explanation of rationale and effects. \
            50-69: Employs basic techniques (e.g., brute force, directory scanning) but lacks depth in analysis. \
            30-49: Shows only simple or incorrect methods with unclear technical logic. \
            0-29: No valid techniques described or completely incorrect understanding.",
            "Step Completeness (0-100): \
            90-100: Covers full logical flow including reconnaissance, vulnerability discovery, verification, exploitation, privilege escalation, and flag retrieval with clear step-by-step explanation. \
            70-89: Describes most key steps, missing one or two less critical parts but remains coherent. \
            50-69: Several major steps are missing or only briefly mentioned. \
            30-49: Fragmented steps or partial execution, hard to follow. \
            0-29: No clear steps provided or completely disorganized.",
            "Tool Explanation (0-100): \
            90-100: Clearly lists each tool used, explains its purpose, and describes how and when it was applied. \
            70-89: Mentions most tools with general explanation but lacks some precision. \
            50-69: Lists tools without clear usage context or justification. \
            30-49: Mentions tools vaguely or misuses them. \
            0-29: No tools mentioned or tools are unrelated/incorrect.",
            "Language Clarity (0-100): \
            90-100: Professionally written with accurate terminology, proper grammar, and logical structure. \
            70-89: Generally clear and readable with minor language or style issues. \
            50-69: Noticeable language issues that somewhat affect comprehension. \
            30-49: Poor structure with many grammatical errors, difficult to follow. \
            0-29: Unreadable or highly confusing due to serious language problems.",
            "Overall Score (0-100): \
            Should be close to the average of the four sub-scores (within ±5 points). \
            Final score must reflect overall quality without large deviation from the sub-score average."
        ]
        params = {
            "user_id": user_id,
            "question_id": question_id,
        }
        # 获取实际的writeup文本
        try:
            writeup_text = DCTools.get_writup(params=params, headers=headers) 
            if not writeup_text or writeup_text.strip() == "":
                # 使用测试文本作为后备
                writeup_text = """
                我首先使用了 `dirsearch` 对目标网站进行目录扫描，发现了一个隐藏的 `/admin/` 路径。尝试直接访问时返回 403，于是我使用 `Burp Suite` 配合 `X-Forwarded-For` 头伪造 IP 地址绕过限制，成功进入后台。
                后台登录页面存在用户名枚举漏洞，在输入错误用户名时返回不同提示信息。我用 Python 脚本爆破用户名后，用 `Hydra` 工具配合字典爆破密码，最终成功登录。
                进入后台后发现有一个文件上传功能，但仅校验文件扩展名。尝试上传 `.php` 文件被拒绝，但将文件名改为 `shell.php.jpg` 成功绕过上传限制。在访问时，发现上传目录存在于 `/uploads/` 路径下。
                上传后访问生成的 WebShell，但遇到 500 错误。检查上传内容发现文件内容缩进有误，修复后重新上传成功执行。
                最终通过 WebShell 拿到 flag。
                """
        except Exception as e:
            print(f"Warning: Could not fetch writeup text, using test data: {e}")
            writeup_text = """
            我首先使用了 `dirsearch` 对目标网站进行目录扫描，发现了一个隐藏的 `/admin/` 路径。尝试直接访问时返回 403，于是我使用 `Burp Suite` 配合 `X-Forwarded-For` 头伪造 IP 地址绕过限制，成功进入后台。
            后台登录页面存在用户名枚举漏洞，在输入错误用户名时返回不同提示信息。我用 Python 脚本爆破用户名后，用 `Hydra` 工具配合字典爆破密码，最终成功登录。
            进入后台后发现有一个文件上传功能，但仅校验文件扩展名。尝试上传 `.php` 文件被拒绝，但将文件名改为 `shell.php.jpg` 成功绕过上传限制。在访问时，发现上传目录存在于 `/uploads/` 路径下。
            上传后访问生成的 WebShell，但遇到 500 错误。检查上传内容发现文件内容缩进有误，修复后重新上传成功执行。
            最终通过 WebShell 拿到 flag。
            """
        messages = [
            {"role": "professional network security expert", "content": "you are a professional network security expert."},
            {"role": "user", "content": prompt + "\n\nWriteup:\n" + writeup_text + "\n\nJudge Rules:\n" + "\n".join(judge_rule)}
        ]
        model = genai.GenerativeModel("gemini-2.5-flash")
        chat = model.start_chat()
        response = chat.send_message(str(messages))
        text = response.text
        # 尝试提取JSON
        match = re.search(r"```json\s*(.*?)\s*```", text, re.DOTALL)
        if match:
            result = match.group(1)
        else:
            result = text.strip()
        report = json.loads(result)
        required_fields = ["used_tools", "attack_chain", "vulnerabilities", "errors", 
                          "technical_depth", "completeness", "tool_explanation", 
                          "language_clarity", "score"]
        for field in required_fields:
            if field not in report:
                print(f"Warning: Missing field {field} in writeup report")
                if field in ["technical_depth", "completeness", "tool_explanation", "language_clarity", "score"]:
                    report[field] = 0
                else:
                    report[field] = []
        return report
    except json.JSONDecodeError as e:
        print(f"Error parsing JSON response: {e}")
        return {}
    except Exception as e:
        print(f"Error generating writeup report for user {user_id}, question {question_id}: {e}")
        return {}
def get_question_submission_statistics(question_id: str, headers: dict = None) -> dict:
    """
    获取题目的提交统计信息
    Args:
        question_id (str): 题目ID
        headers (dict, optional): 请求头
    Returns:
        dict: 题目统计信息
    """
    if not question_id:
        return {}
    try:
        user_info = DCTools.get_user_information(headers=headers)
        if not user_info:
            return {}
        total_submissions = 0
        total_users = len(user_info)
        ac_users = 0
        first_ac_time = None
        submission_times = []
        attempt_counts = []
        for user in user_info:
            user_id = user.get("user_id")
            if not user_id:
                continue
            # 获取用户对该题的提交次数
            attempts = get_question_attempt_count(user_id, question_id, headers)
            attempt_counts.append(attempts)
            total_submissions += attempts
            # 检查是否AC
            ac_status = get_question_ac_status(user_id, question_id, headers)
            if ac_status and ac_status.get("is_ac", False):
                ac_users += 1
                ac_time = ac_status.get("ac_time")
                if ac_time:
                    submission_times.append(ac_time)
                    if first_ac_time is None or ac_time < first_ac_time:
                        first_ac_time = ac_time
        return {
            "question_id": question_id,
            "total_users": total_users,
            "total_submissions": total_submissions,
            "ac_users": ac_users,
            "pass_rate": ac_users / total_users if total_users > 0 else 0.0,
            "average_attempts": sum(attempt_counts) / len(attempt_counts) if attempt_counts else 0,
            "first_ac_time": first_ac_time,
            "submission_count_distribution": {
                "min": min(attempt_counts) if attempt_counts else 0,
                "max": max(attempt_counts) if attempt_counts else 0,
                "avg": sum(attempt_counts) / len(attempt_counts) if attempt_counts else 0
            }
        }
    except Exception as e:
        print(f"Error in get_question_submission_statistics for question {question_id}: {e}")
        return {}
def get_question_difficulty_ranking(headers: dict = None) -> List[dict]:
    """
    获取所有题目的难度排名
    Args:
        headers (dict, optional): 请求头
    Returns:
        List[dict]: 按难度排序的题目列表
    """
    try:
        # 获取所有题目
        question_list = DCTools.get_question_list(headers=headers)
        if not question_list:
            return []
        question_difficulties = []
        for question in question_list:
            question_id = question.get("question_id")
            if not question_id:
                continue
            # 计算通过率
            pass_rate = get_question_pass_rate(question_id, headers)
            # 获取统计信息
            stats = get_question_submission_statistics(question_id, headers)
            avg_attempts = stats.get("average_attempts", 0)
            # 计算难度分数 (通过率越低，平均尝试次数越多，难度越高)
            difficulty_score = (1 - pass_rate) * 70 + min(avg_attempts * 5, 30)
            question_difficulties.append({
                "question_id": question_id,
                "question_title": question.get("title", "Unknown"),
                "pass_rate": pass_rate,
                "average_attempts": avg_attempts,
                "difficulty_score": difficulty_score,
                "difficulty_level": _classify_difficulty_by_score(difficulty_score)
            })
        # 按难度分数排序
        question_difficulties.sort(key=lambda x: x["difficulty_score"], reverse=True)
        return question_difficulties
    except Exception as e:
        print(f"Error in get_question_difficulty_ranking: {e}")
        return []
def _classify_difficulty_by_score(score: float) -> str:
    """根据分数分类难度等级的辅助函数"""
    if score >= 80:
        return "Expert"
    elif score >= 60:
        return "Hard"
    elif score >= 40:
        return "Medium"
    else:
        return "Easy"
