#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
结构化日志系统
将 Pre-commit 检查结果保存为结构化 JSON 格式
"""
import json
import re
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any


class StructuredLogger:
    """结构化日志记录器"""

    def __init__(self, output_dir: str = "reports"):
        """
        初始化日志记录器

        Args:
            output_dir: 报告输出目录
        """
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(exist_ok=True)

    def save_error_log(self, errors: Dict[str, Dict[str, Any]]) -> Path:
        """
        保存结构化错误日志

        Args:
            errors: 错误信息字典
                {
                    "ruff": {"code": 1, "output": "...", "stderr": "..."},
                    "mypy": {"code": 0, "output": "...", "stderr": "..."},
                    "pytest": {"code": 0, "output": "...", "stderr": "..."}
                }

        Returns:
            日志文件路径
        """
        log_data = {
            "timestamp": datetime.now().isoformat(),
            "checks": [],
            "summary": {
                "total_errors": 0,
                "total_warnings": 0,
                "status": "unknown",
                "priority": {
                    "P0_critical": [],
                    "P1_high": [],
                    "P2_medium": []
                }
            }
        }

        # 处理每个工具的检查结果
        for tool, result in errors.items():
            check_entry = self._parse_tool_output(tool, result)
            log_data["checks"].append(check_entry)

            # 统计错误和警告
            log_data["summary"]["total_errors"] += check_entry["error_count"]
            log_data["summary"]["total_warnings"] += check_entry["warning_count"]

            # 分类错误优先级
            for error in check_entry.get("errors", []):
                priority = error.get("priority", "P2_medium")
                log_data["summary"]["priority"][priority].append({
                    "tool": tool,
                    "message": error.get("message", ""),
                    "file": error.get("file", ""),
                    "line": error.get("line", 0)
                })

        # 确定整体状态
        if log_data["summary"]["total_errors"] > 0:
            log_data["summary"]["status"] = "failed"
        elif log_data["summary"]["total_warnings"] > 0:
            log_data["summary"]["status"] = "warning"
        else:
            log_data["summary"]["status"] = "passed"

        # 保存 JSON 文件
        log_file = self.output_dir / "last_error.json"
        with open(log_file, "w", encoding="utf-8") as f:
            json.dump(log_data, f, indent=2, ensure_ascii=False)

        # 同时保存文本格式（向后兼容）
        self._save_text_log(log_data)

        return log_file

    def _parse_tool_output(self, tool: str, result: Dict[str, Any]) -> Dict[str, Any]:
        """
        解析工具输出

        Args:
            tool: 工具名称
            result: 工具执行结果

        Returns:
            结构化的检查结果
        """
        output = result.get("output", "")
        stderr = result.get("stderr", "")
        combined = f"{output}\n{stderr}"

        check_entry = {
            "tool": tool,
            "return_code": result.get("code", 0),
            "duration": result.get("duration", 0),
            "error_count": 0,
            "warning_count": 0,
            "errors": []
        }

        if tool == "ruff":
            check_entry["errors"] = self._parse_ruff_output(combined)
        elif tool == "mypy":
            check_entry["errors"] = self._parse_mypy_output(combined)
        elif tool == "pytest":
            check_entry["errors"] = self._parse_pytest_output(combined)

        # 统计错误和警告数量
        for error in check_entry["errors"]:
            if error.get("severity") == "error":
                check_entry["error_count"] += 1
            elif error.get("severity") == "warning":
                check_entry["warning_count"] += 1

        return check_entry

    def _parse_ruff_output(self, output: str) -> List[Dict[str, Any]]:
        """解析 Ruff 输出"""
        errors = []
        # Ruff 格式: path/to/file.py:10:5: E501 line too long
        pattern = r"(.+?):(\d+):(\d+):\s+([A-Z]\d+)\s+(.+)"

        for line in output.split('\n'):
            match = re.match(pattern, line.strip())
            if match:
                file_path, line_num, column, code, message = match.groups()
                errors.append({
                    "file": file_path,
                    "line": int(line_num),
                    "column": int(column),
                    "code": code,
                    "message": message.strip(),
                    "severity": "error" if code.startswith("E") else "warning",
                    "priority": self._classify_priority(code, message)
                })

        return errors

    def _parse_mypy_output(self, output: str) -> List[Dict[str, Any]]:
        """解析 MyPy 输出"""
        errors = []
        # MyPy 格式: path/to/file.py:10: error: message
        pattern = r"(.+?):(\d+):\s+(error|warning|note):\s+(.+)"

        for line in output.split('\n'):
            match = re.match(pattern, line.strip())
            if match:
                file_path, line_num, severity, message = match.groups()
                errors.append({
                    "file": file_path,
                    "line": int(line_num),
                    "column": 0,
                    "code": "TYPE",
                    "message": message.strip(),
                    "severity": severity,
                    "priority": self._classify_priority("TYPE", message)
                })

        return errors

    def _parse_pytest_output(self, output: str) -> List[Dict[str, Any]]:
        """解析 Pytest 输出"""
        errors = []
        # Pytest 格式: test_file.py::test_name FAILED
        pattern = r"(.+?\.py)::(\w+)\s+FAILED"

        for line in output.split('\n'):
            match = re.search(pattern, line)
            if match:
                file_path, test_name = match.groups()
                errors.append({
                    "file": file_path,
                    "line": 0,
                    "column": 0,
                    "code": "TEST",
                    "message": f"Test failed: {test_name}",
                    "severity": "error",
                    "priority": "P1_high"
                })

        return errors

    def _classify_priority(self, code: str, message: str) -> str:
        """
        根据错误代码和消息分类优先级

        Args:
            code: 错误代码
            message: 错误消息

        Returns:
            优先级字符串: P0_critical, P1_high, P2_medium
        """
        # P0: 语法错误、导入错误
        if code in ["E902", "E999", "F821", "F401"]:
            return "P0_critical"
        if "SyntaxError" in message or "ImportError" in message:
            return "P0_critical"

        # P1: 类型错误、未定义变量
        if code.startswith("F") or code == "TYPE":
            return "P1_high"

        # P2: 代码风格问题
        return "P2_medium"

    def _save_text_log(self, log_data: Dict[str, Any]) -> None:
        """保存文本格式日志（向后兼容）"""
        log_file = self.output_dir / "last_error.log"

        with open(log_file, "w", encoding="utf-8") as f:
            f.write(f"=== Pre-commit 检查报告 ===\n")
            f.write(f"时间: {log_data['timestamp']}\n")
            f.write(f"状态: {log_data['summary']['status']}\n")
            f.write(f"错误总数: {log_data['summary']['total_errors']}\n")
            f.write(f"警告总数: {log_data['summary']['total_warnings']}\n\n")

            for check in log_data["checks"]:
                f.write(f"=== {check['tool']} 检查结果 ===\n")
                f.write(f"Return Code: {check['return_code']}\n")
                f.write(f"错误数: {check['error_count']}\n")
                f.write(f"警告数: {check['warning_count']}\n\n")

                if check["errors"]:
                    for error in check["errors"][:10]:  # 只显示前 10 个
                        f.write(f"  [{error['severity'].upper()}] ")
                        f.write(f"{error['file']}:{error['line']}: ")
                        f.write(f"{error['message']}\n")

                    if len(check["errors"]) > 10:
                        f.write(f"  ... 还有 {len(check['errors']) - 10} 个错误\n")

                f.write("\n")

    def load_error_log(self, log_file: str = None) -> Dict[str, Any]:
        """
        加载错误日志

        Args:
            log_file: 日志文件路径，默认为最新日志

        Returns:
            日志数据字典
        """
        if log_file is None:
            log_file = self.output_dir / "last_error.json"
        else:
            log_file = Path(log_file)

        if not log_file.exists():
            raise FileNotFoundError(f"Log file not found: {log_file}")

        with open(log_file, "r", encoding="utf-8") as f:
            return json.load(f)


if __name__ == "__main__":
    # 测试结构化日志
    print("=== Structured Logger Test ===\n")

    logger = StructuredLogger()

    # 模拟错误数据
    errors = {
        "ruff": {
            "code": 1,
            "output": "backend/app/main.py:10:5: E902 系统找不到指定的文件\nbackend/app/test.py:20:1: E501 line too long",
            "stderr": "",
            "duration": 1.23
        },
        "mypy": {
            "code": 0,
            "output": "",
            "stderr": "",
            "duration": 2.45
        },
        "pytest": {
            "code": 0,
            "output": "tests/test_main.py::test_example PASSED",
            "stderr": "",
            "duration": 3.67
        }
    }

    # 保存日志
    log_file = logger.save_error_log(errors)
    print(f"Log saved to: {log_file}")

    # 加载并显示日志
    log_data = logger.load_error_log()
    print(f"\nLog Summary:")
    print(f"  Status: {log_data['summary']['status']}")
    print(f"  Errors: {log_data['summary']['total_errors']}")
    print(f"  Warnings: {log_data['summary']['total_warnings']}")
    print(f"  P0 Critical: {len(log_data['summary']['priority']['P0_critical'])}")
    print(f"  P1 High: {len(log_data['summary']['priority']['P1_high'])}")
    print(f"  P2 Medium: {len(log_data['summary']['priority']['P2_medium'])}")
