#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Code Review Assistant Module

This module provides intelligent code review capabilities by analyzing Git repositories
and diff content using Claude AI. It features automatic project detection, context-aware
analysis, and structured review output.

Key Components:
- GitRepositoryAnalyzer: Analyzes repository structure and context
- DiffParser: Parses and processes Git diff content
- CodeReviewAssistant: Main interface for conducting code reviews

Author: Claude CLI Code Review Extension
Version: 1.0.0
"""

import os
import re
import json
import subprocess
import logging
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union, Any
from dataclasses import dataclass, field
from enum import Enum

from simple_pipe import ClaudeCLI, ClaudeConfig, ClaudeError


class ProjectType(Enum):
    """Enumeration of supported project types."""
    PYTHON = "python"
    JAVASCRIPT = "javascript"
    TYPESCRIPT = "typescript"
    GO = "go"
    RUST = "rust"
    JAVA = "java"
    CPP = "cpp"
    UNKNOWN = "unknown"


class ReviewFocus(Enum):
    """Enumeration of review focus areas."""
    CODE_QUALITY = "code_quality"
    SECURITY = "security"
    PERFORMANCE = "performance"
    MAINTAINABILITY = "maintainability"
    TESTING = "testing"
    DOCUMENTATION = "documentation"


@dataclass
class ProjectInfo:
    """Information about a Git repository project."""
    project_type: ProjectType
    root_path: str
    dependencies: List[str] = field(default_factory=list)
    frameworks: List[str] = field(default_factory=list)
    test_frameworks: List[str] = field(default_factory=list)
    coding_standards: Dict[str, Any] = field(default_factory=dict)
    package_info: Dict[str, Any] = field(default_factory=dict)
    git_info: Dict[str, str] = field(default_factory=dict)
    file_structure: Dict[str, List[str]] = field(default_factory=dict)


@dataclass
class DiffChange:
    """Represents a single change in a diff."""
    file_path: str
    change_type: str  # "added", "deleted", "modified"
    old_start_line: Optional[int] = None
    old_line_count: Optional[int] = None
    new_start_line: Optional[int] = None
    new_line_count: Optional[int] = None
    added_lines: List[str] = field(default_factory=list)
    removed_lines: List[str] = field(default_factory=list)
    context_lines: List[str] = field(default_factory=list)


@dataclass
class CodeReviewConfig:
    """Configuration for code review assistant."""
    focus_areas: List[ReviewFocus] = field(default_factory=lambda: [
        ReviewFocus.CODE_QUALITY,
        ReviewFocus.SECURITY,
        ReviewFocus.MAINTAINABILITY
    ])
    output_format: str = "detailed"  # "brief", "detailed", "json"
    include_suggestions: bool = True
    include_examples: bool = True
    max_context_files: int = 10
    claude_config: Optional[ClaudeConfig] = None
    analyze_related_files: bool = True
    custom_rules: Dict[str, Any] = field(default_factory=dict)


class GitRepositoryAnalyzer:
    """
    Analyzes Git repositories to extract project context and information.

    This class provides comprehensive analysis of repository structure,
    dependencies, coding standards, and project metadata to enhance
    code review quality and context awareness.
    """

    def __init__(self, logger: Optional[logging.Logger] = None):
        """
        Initialize the Git repository analyzer.

        Args:
            logger: Optional logger for operation tracking
        """
        self.logger = logger or logging.getLogger(__name__)
        self._project_detectors = {
            ProjectType.PYTHON: self._detect_python_project,
            ProjectType.JAVASCRIPT: self._detect_javascript_project,
            ProjectType.TYPESCRIPT: self._detect_typescript_project,
            ProjectType.GO: self._detect_go_project,
            ProjectType.RUST: self._detect_rust_project,
            ProjectType.JAVA: self._detect_java_project,
            ProjectType.CPP: self._detect_cpp_project,
        }

    def analyze_repository(self, repo_path: str) -> ProjectInfo:
        """
        Analyze a Git repository and extract comprehensive project information.

        Args:
            repo_path: Path to the Git repository

        Returns:
            ProjectInfo object containing repository analysis

        Raises:
            ValueError: If repo_path is not a valid Git repository
            OSError: If repository access fails
        """
        repo_path = os.path.abspath(repo_path)

        if not self._is_git_repository(repo_path):
            raise ValueError(f"Path {repo_path} is not a Git repository")

        self.logger.info(f"Analyzing repository: {repo_path}")

        # Detect project type
        project_type = self._detect_project_type(repo_path)
        self.logger.info(f"Detected project type: {project_type.value}")

        # Initialize project info
        project_info = ProjectInfo(
            project_type=project_type,
            root_path=repo_path
        )

        # Analyze project-specific details
        if project_type in self._project_detectors:
            self._project_detectors[project_type](repo_path, project_info)

        # Extract Git information
        project_info.git_info = self._extract_git_info(repo_path)

        # Analyze file structure
        project_info.file_structure = self._analyze_file_structure(repo_path)

        self.logger.info(f"Repository analysis completed for {project_type.value} project")
        return project_info

    def _is_git_repository(self, path: str) -> bool:
        """Check if the given path is a Git repository."""
        try:
            result = subprocess.run(
                ["git", "rev-parse", "--git-dir"],
                cwd=path,
                capture_output=True,
                text=True,
                timeout=10
            )
            return result.returncode == 0
        except (subprocess.TimeoutExpired, FileNotFoundError, OSError):
            return False

    def _detect_project_type(self, repo_path: str) -> ProjectType:
        """Detect the primary project type based on files and structure."""
        files = []
        try:
            for root, _, filenames in os.walk(repo_path):
                # Skip hidden directories and common build/cache directories
                if any(part.startswith('.') or part in ['node_modules', 'target', 'build', '__pycache__']
                       for part in Path(root).parts):
                    continue
                files.extend(Path(root) / f for f in filenames)
        except OSError as e:
            self.logger.warning(f"Error walking directory {repo_path}: {e}")
            return ProjectType.UNKNOWN

        # Project type indicators
        type_indicators = {
            ProjectType.PYTHON: ['.py', 'requirements.txt', 'setup.py', 'pyproject.toml', 'Pipfile'],
            ProjectType.JAVASCRIPT: ['package.json', '.js', '.jsx'],
            ProjectType.TYPESCRIPT: ['.ts', '.tsx', 'tsconfig.json'],
            ProjectType.GO: ['.go', 'go.mod', 'go.sum'],
            ProjectType.RUST: ['.rs', 'Cargo.toml', 'Cargo.lock'],
            ProjectType.JAVA: ['.java', 'pom.xml', 'build.gradle', '.gradle'],
            ProjectType.CPP: ['.cpp', '.cc', '.cxx', '.h', '.hpp', 'CMakeLists.txt', 'Makefile']
        }

        # Score each project type
        type_scores = {}
        for project_type, indicators in type_indicators.items():
            score = 0
            for file_path in files:
                file_name = file_path.name
                file_suffix = file_path.suffix

                for indicator in indicators:
                    if indicator.startswith('.'):
                        if file_suffix == indicator:
                            score += 1
                    else:
                        if file_name == indicator:
                            score += 5  # Config files have higher weight

            if score > 0:
                type_scores[project_type] = score

        # Return the project type with highest score
        if type_scores:
            return max(type_scores.items(), key=lambda x: x[1])[0]

        return ProjectType.UNKNOWN

    def _extract_git_info(self, repo_path: str) -> Dict[str, str]:
        """Extract basic Git repository information."""
        git_info = {}

        commands = {
            'current_branch': ['git', 'branch', '--show-current'],
            'remote_url': ['git', 'config', '--get', 'remote.origin.url'],
            'last_commit': ['git', 'log', '-1', '--format=%H'],
            'last_commit_message': ['git', 'log', '-1', '--format=%s'],
            'last_commit_author': ['git', 'log', '-1', '--format=%an'],
            'last_commit_date': ['git', 'log', '-1', '--format=%ad', '--date=iso']
        }

        for key, cmd in commands.items():
            try:
                result = subprocess.run(
                    cmd,
                    cwd=repo_path,
                    capture_output=True,
                    text=True,
                    timeout=10
                )
                if result.returncode == 0:
                    git_info[key] = result.stdout.strip()
                else:
                    git_info[key] = "unknown"
            except (subprocess.TimeoutExpired, FileNotFoundError):
                git_info[key] = "unknown"

        return git_info

    def _analyze_file_structure(self, repo_path: str) -> Dict[str, List[str]]:
        """Analyze repository file structure and organization."""
        structure = {
            'source_dirs': [],
            'test_dirs': [],
            'config_files': [],
            'documentation': [],
            'build_files': []
        }

        try:
            for root, dirs, files in os.walk(repo_path):
                rel_root = os.path.relpath(root, repo_path)

                # Skip hidden and build directories
                dirs[:] = [d for d in dirs if not d.startswith('.') and
                          d not in ['node_modules', 'target', 'build', '__pycache__', 'dist']]

                # Categorize directories
                if any(test_indicator in rel_root.lower()
                       for test_indicator in ['test', 'tests', 'spec', 'specs']):
                    structure['test_dirs'].append(rel_root)
                elif any(src_indicator in rel_root.lower()
                         for src_indicator in ['src', 'source', 'lib', 'app']):
                    structure['source_dirs'].append(rel_root)

                # Categorize files
                for file in files:
                    file_path = os.path.join(rel_root, file)
                    file_lower = file.lower()

                    if file_lower in ['readme.md', 'readme.txt', 'license', 'changelog.md']:
                        structure['documentation'].append(file_path)
                    elif file_lower in ['makefile', 'dockerfile', 'docker-compose.yml',
                                       'cmakelists.txt', 'build.gradle', 'pom.xml']:
                        structure['build_files'].append(file_path)
                    elif file.endswith(('.json', '.yaml', '.yml', '.toml', '.ini', '.cfg')):
                        structure['config_files'].append(file_path)

        except OSError as e:
            self.logger.warning(f"Error analyzing file structure: {e}")

        return structure

    def _detect_python_project(self, repo_path: str, project_info: ProjectInfo) -> None:
        """Detect Python project specifics."""
        # Check for dependency files
        dep_files = ['requirements.txt', 'setup.py', 'pyproject.toml', 'Pipfile', 'environment.yml']
        for dep_file in dep_files:
            dep_path = os.path.join(repo_path, dep_file)
            if os.path.exists(dep_path):
                project_info.dependencies.extend(self._parse_python_dependencies(dep_path))

        # Check for common frameworks
        framework_indicators = {
            'django': ['manage.py', 'settings.py'],
            'flask': ['app.py', 'application.py'],
            'fastapi': ['main.py'],
            'pytest': ['pytest.ini', 'conftest.py'],
            'unittest': [],  # Built-in
        }

        for framework, indicators in framework_indicators.items():
            if not indicators:  # Built-in frameworks
                project_info.frameworks.append(framework)
            elif any(os.path.exists(os.path.join(repo_path, ind)) for ind in indicators):
                project_info.frameworks.append(framework)

        # Check for testing frameworks
        test_indicators = ['pytest', 'unittest', 'nose', 'tox']
        for test_fw in test_indicators:
            if any(test_fw in dep for dep in project_info.dependencies):
                project_info.test_frameworks.append(test_fw)

    def _detect_javascript_project(self, repo_path: str, project_info: ProjectInfo) -> None:
        """Detect JavaScript/Node.js project specifics."""
        package_json_path = os.path.join(repo_path, 'package.json')
        if os.path.exists(package_json_path):
            try:
                with open(package_json_path, 'r', encoding='utf-8') as f:
                    package_data = json.load(f)
                    project_info.package_info = package_data

                    # Extract dependencies
                    for dep_type in ['dependencies', 'devDependencies']:
                        if dep_type in package_data:
                            project_info.dependencies.extend(package_data[dep_type].keys())

                    # Detect frameworks
                    frameworks = ['react', 'vue', 'angular', 'express', 'next', 'nuxt']
                    for framework in frameworks:
                        if framework in project_info.dependencies:
                            project_info.frameworks.append(framework)

                    # Detect test frameworks
                    test_frameworks = ['jest', 'mocha', 'chai', 'cypress', 'playwright']
                    for test_fw in test_frameworks:
                        if test_fw in project_info.dependencies:
                            project_info.test_frameworks.append(test_fw)

            except (json.JSONDecodeError, IOError) as e:
                self.logger.warning(f"Error parsing package.json: {e}")

    def _detect_typescript_project(self, repo_path: str, project_info: ProjectInfo) -> None:
        """Detect TypeScript project specifics."""
        # First detect as JavaScript project
        self._detect_javascript_project(repo_path, project_info)

        # Check for TypeScript config
        ts_config_path = os.path.join(repo_path, 'tsconfig.json')
        if os.path.exists(ts_config_path):
            try:
                with open(ts_config_path, 'r', encoding='utf-8') as f:
                    ts_config = json.load(f)
                    project_info.coding_standards['typescript'] = ts_config
            except (json.JSONDecodeError, IOError) as e:
                self.logger.warning(f"Error parsing tsconfig.json: {e}")

    def _detect_go_project(self, repo_path: str, project_info: ProjectInfo) -> None:
        """Detect Go project specifics."""
        go_mod_path = os.path.join(repo_path, 'go.mod')
        if os.path.exists(go_mod_path):
            try:
                with open(go_mod_path, 'r', encoding='utf-8') as f:
                    content = f.read()
                    # Extract module name and dependencies
                    lines = content.strip().split('\n')
                    for line in lines:
                        line = line.strip()
                        if line.startswith('module '):
                            project_info.package_info['module'] = line.split()[1]
                        elif line.startswith('require '):
                            # Simple parsing - could be enhanced
                            parts = line.split()
                            if len(parts) >= 2:
                                project_info.dependencies.append(parts[1])
            except IOError as e:
                self.logger.warning(f"Error parsing go.mod: {e}")

    def _detect_rust_project(self, repo_path: str, project_info: ProjectInfo) -> None:
        """Detect Rust project specifics."""
        cargo_toml_path = os.path.join(repo_path, 'Cargo.toml')
        if os.path.exists(cargo_toml_path):
            try:
                import tomllib
                with open(cargo_toml_path, 'rb') as f:
                    cargo_data = tomllib.load(f)
                    project_info.package_info = cargo_data

                    # Extract dependencies
                    if 'dependencies' in cargo_data:
                        project_info.dependencies.extend(cargo_data['dependencies'].keys())
                    if 'dev-dependencies' in cargo_data:
                        project_info.dependencies.extend(cargo_data['dev-dependencies'].keys())

            except (ImportError, IOError) as e:
                self.logger.warning(f"Error parsing Cargo.toml: {e}")

    def _detect_java_project(self, repo_path: str, project_info: ProjectInfo) -> None:
        """Detect Java project specifics."""
        # Check for Maven
        pom_path = os.path.join(repo_path, 'pom.xml')
        if os.path.exists(pom_path):
            project_info.frameworks.append('maven')
            # Could parse XML for dependencies - simplified for now

        # Check for Gradle
        gradle_files = ['build.gradle', 'build.gradle.kts']
        for gradle_file in gradle_files:
            if os.path.exists(os.path.join(repo_path, gradle_file)):
                project_info.frameworks.append('gradle')
                break

    def _detect_cpp_project(self, repo_path: str, project_info: ProjectInfo) -> None:
        """Detect C++ project specifics."""
        # Check for CMake
        if os.path.exists(os.path.join(repo_path, 'CMakeLists.txt')):
            project_info.frameworks.append('cmake')

        # Check for Make
        if os.path.exists(os.path.join(repo_path, 'Makefile')):
            project_info.frameworks.append('make')

    def _parse_python_dependencies(self, file_path: str) -> List[str]:
        """Parse Python dependency files."""
        dependencies = []
        try:
            if file_path.endswith('requirements.txt'):
                with open(file_path, 'r', encoding='utf-8') as f:
                    for line in f:
                        line = line.strip()
                        if line and not line.startswith('#'):
                            # Extract package name (before version specifiers)
                            dep_name = re.split(r'[>=<!\s~]', line)[0]
                            if dep_name:
                                dependencies.append(dep_name)
            elif file_path.endswith('pyproject.toml'):
                import tomllib
                with open(file_path, 'rb') as f:
                    data = tomllib.load(f)
                    if 'project' in data and 'dependencies' in data['project']:
                        for dep in data['project']['dependencies']:
                            dep_name = re.split(r'[>=<!\s~]', dep)[0]
                            if dep_name:
                                dependencies.append(dep_name)
        except (IOError, ImportError) as e:
            self.logger.warning(f"Error parsing dependencies from {file_path}: {e}")

        return dependencies

    def get_related_files(self, repo_path: str, changed_files: List[str],
                         max_files: int = 10) -> List[str]:
        """
        Get files related to the changed files for additional context.

        Args:
            repo_path: Repository path
            changed_files: List of files that were changed
            max_files: Maximum number of related files to return

        Returns:
            List of related file paths
        """
        related_files = set()

        for changed_file in changed_files:
            file_path = os.path.join(repo_path, changed_file)
            if os.path.exists(file_path):
                # Add test files
                test_file = self._find_test_file(repo_path, changed_file)
                if test_file:
                    related_files.add(test_file)

                # Add files in same directory
                dir_path = os.path.dirname(file_path)
                try:
                    for file in os.listdir(dir_path):
                        if file.endswith(('.py', '.js', '.ts', '.go', '.rs', '.java', '.cpp', '.h')):
                            related_files.add(os.path.relpath(
                                os.path.join(dir_path, file), repo_path
                            ))
                except OSError:
                    continue

        # Remove changed files from related files
        related_files = related_files - set(changed_files)

        return list(related_files)[:max_files]

    def _find_test_file(self, repo_path: str, source_file: str) -> Optional[str]:
        """Find corresponding test file for a source file."""
        base_name = os.path.splitext(os.path.basename(source_file))[0]
        dir_name = os.path.dirname(source_file)

        test_patterns = [
            f"test_{base_name}.py",
            f"{base_name}_test.py",
            f"{base_name}.test.js",
            f"{base_name}.spec.js",
            f"{base_name}_test.go",
        ]

        # Check in same directory
        for pattern in test_patterns:
            test_path = os.path.join(repo_path, dir_name, pattern)
            if os.path.exists(test_path):
                return os.path.relpath(test_path, repo_path)

        # Check in test directories
        test_dirs = ['tests', 'test', 'spec']
        for test_dir in test_dirs:
            test_dir_path = os.path.join(repo_path, test_dir)
            if os.path.exists(test_dir_path):
                for pattern in test_patterns:
                    test_path = os.path.join(test_dir_path, pattern)
                    if os.path.exists(test_path):
                        return os.path.relpath(test_path, repo_path)

        return None


class DiffParser:
    """
    Parses Git diff content to extract structured change information.

    This class handles unified diff format parsing, extracting file changes,
    line modifications, and contextual information for code review analysis.
    """

    def __init__(self, logger: Optional[logging.Logger] = None):
        """
        Initialize the diff parser.

        Args:
            logger: Optional logger for operation tracking
        """
        self.logger = logger or logging.getLogger(__name__)

    def parse_diff(self, diff_content: str) -> List[DiffChange]:
        """
        Parse diff content into structured change objects.

        Supports both standard Git diff format and plain text with diff-like content.

        Args:
            diff_content: Raw diff content or plain text containing changes

        Returns:
            List of DiffChange objects representing the changes

        Raises:
            ValueError: If diff content is invalid or unsupported
        """
        if not diff_content.strip():
            raise ValueError("Empty diff content provided")

        self.logger.info("Parsing diff content")

        # First try to parse as standard Git diff
        try:
            changes = self._parse_git_diff(diff_content)
            if changes:
                self.logger.info(f"Parsed {len(changes)} file changes from Git diff")
                return changes
        except Exception as e:
            self.logger.debug(f"Git diff parsing failed: {e}")

        # If Git diff parsing fails, try to parse as plain text
        self.logger.info("Attempting to parse as plain text with diff content")
        changes = self._parse_plain_text_diff(diff_content)

        self.logger.info(f"Parsed {len(changes)} changes from plain text")
        return changes

    def _parse_git_diff(self, diff_content: str) -> List[DiffChange]:
        """Parse standard Git diff format."""
        changes = []
        lines = diff_content.split('\n')
        i = 0

        while i < len(lines):
            line = lines[i]

            # Look for file header
            if line.startswith('diff --git'):
                change, next_i = self._parse_file_diff(lines, i)
                if change:
                    changes.append(change)
                i = next_i
            else:
                i += 1

        return changes

    def _parse_plain_text_diff(self, text_content: str) -> List[DiffChange]:
        """
        Parse plain text that might contain diff-like content.

        This method looks for common patterns that indicate code changes:
        - Lines starting with + or -
        - File path mentions
        - Code blocks
        """
        lines = text_content.split('\n')
        changes = []

        # Look for potential file paths in the text
        import re
        file_patterns = [
            r'(?:^|\s)([a-zA-Z_][a-zA-Z0-9_/.-]*\.[a-zA-Z]+)(?:\s|$)',  # filename.ext
            r'(?:File:|Path:|文件:)\s*([^\s]+)',  # File: path
            r'```\w*\s*([^\s]+\.[a-zA-Z]+)',  # ```python filename.py
        ]

        potential_files = set()
        for line in lines:
            for pattern in file_patterns:
                matches = re.findall(pattern, line, re.IGNORECASE)
                potential_files.update(matches)

        # If no specific files found, create a generic change
        if not potential_files:
            potential_files = {'modified_code.txt'}

        for file_path in potential_files:
            # Extract added/removed lines from the text
            added_lines = []
            removed_lines = []
            context_lines = []

            for line in lines:
                line_stripped = line.strip()
                if line_stripped.startswith('+') and not line_stripped.startswith('+++'):
                    added_lines.append(line_stripped[1:].strip())
                elif line_stripped.startswith('-') and not line_stripped.startswith('---'):
                    removed_lines.append(line_stripped[1:].strip())
                elif line_stripped and not line_stripped.startswith(('diff', '@@', 'index')):
                    # Potential context line
                    context_lines.append(line_stripped)

            # If no +/- lines found, treat the entire content as added content
            if not added_lines and not removed_lines:
                # Split text into meaningful chunks
                meaningful_lines = [line.strip() for line in lines
                                  if line.strip() and not line.strip().startswith('#')]
                if meaningful_lines:
                    added_lines = meaningful_lines[:20]  # Limit to first 20 lines

            # Create change object
            change_type = "modified"
            if added_lines and not removed_lines:
                change_type = "added"
            elif removed_lines and not added_lines:
                change_type = "deleted"

            change = DiffChange(
                file_path=file_path,
                change_type=change_type,
                old_start_line=1,
                new_start_line=1,
                old_line_count=len(removed_lines) if removed_lines else 1,
                new_line_count=len(added_lines) if added_lines else 1,
                added_lines=added_lines,
                removed_lines=removed_lines,
                context_lines=context_lines[:10]  # Limit context
            )

            changes.append(change)

        return changes

    def _parse_file_diff(self, lines: List[str], start_idx: int) -> Tuple[Optional[DiffChange], int]:
        """
        Parse a single file's diff section.

        Args:
            lines: All diff lines
            start_idx: Starting index for this file's diff

        Returns:
            Tuple of (DiffChange object or None, next index to process)
        """
        i = start_idx
        change = None

        # Parse file header
        if i < len(lines) and lines[i].startswith('diff --git'):
            # Extract file paths from diff --git a/path b/path
            git_line = lines[i]
            match = re.search(r'diff --git a/([^\s]+) b/([^\s]+)', git_line)
            if match:
                old_path, new_path = match.groups()
                file_path = new_path  # Use new path as primary

                change = DiffChange(
                    file_path=file_path,
                    change_type="modified"  # Default, will be refined
                )
            i += 1

        # Skip index and other metadata lines
        while i < len(lines) and not lines[i].startswith('@@'):
            line = lines[i]

            # Detect file operation type
            if change:
                if line.startswith('new file mode'):
                    change.change_type = "added"
                elif line.startswith('deleted file mode'):
                    change.change_type = "deleted"
                elif line.startswith('rename from'):
                    change.change_type = "renamed"

            i += 1

        # Parse hunks
        while i < len(lines) and lines[i].startswith('@@'):
            i = self._parse_hunk(lines, i, change)

        return change, i

    def _parse_hunk(self, lines: List[str], start_idx: int, change: DiffChange) -> int:
        """
        Parse a single hunk (section of changes) in a file diff.

        Args:
            lines: All diff lines
            start_idx: Starting index of the hunk
            change: DiffChange object to populate

        Returns:
            Next index to process
        """
        i = start_idx

        # Parse hunk header: @@ -old_start,old_count +new_start,new_count @@
        hunk_header = lines[i]
        hunk_match = re.search(r'@@\s*-(\d+)(?:,(\d+))?\s*\+(\d+)(?:,(\d+))?\s*@@', hunk_header)

        if hunk_match:
            old_start = int(hunk_match.group(1))
            old_count = int(hunk_match.group(2) or 1)
            new_start = int(hunk_match.group(3))
            new_count = int(hunk_match.group(4) or 1)

            # Store hunk information (for the first hunk)
            if change.old_start_line is None:
                change.old_start_line = old_start
                change.old_line_count = old_count
                change.new_start_line = new_start
                change.new_line_count = new_count

        i += 1

        # Parse hunk content
        while i < len(lines):
            line = lines[i]

            # Check if we've reached the next hunk or file
            if line.startswith('@@') or line.startswith('diff --git'):
                break

            # Process line content
            if line.startswith('+'):
                change.added_lines.append(line[1:])  # Remove + prefix
            elif line.startswith('-'):
                change.removed_lines.append(line[1:])  # Remove - prefix
            elif line.startswith(' '):
                change.context_lines.append(line[1:])  # Remove space prefix
            elif line.startswith('\\'):
                # Handle "No newline at end of file" messages
                pass

            i += 1

        return i

    def get_changed_files(self, diff_content: str) -> List[str]:
        """
        Extract list of changed file paths from diff content.

        Args:
            diff_content: Raw unified diff content

        Returns:
            List of file paths that were changed
        """
        changes = self.parse_diff(diff_content)
        return [change.file_path for change in changes]

    def get_diff_stats(self, diff_content: str) -> Dict[str, int]:
        """
        Get statistics about the diff content.

        Args:
            diff_content: Raw unified diff content

        Returns:
            Dictionary containing diff statistics
        """
        changes = self.parse_diff(diff_content)

        stats = {
            'files_changed': len(changes),
            'lines_added': sum(len(change.added_lines) for change in changes),
            'lines_removed': sum(len(change.removed_lines) for change in changes),
            'files_added': len([c for c in changes if c.change_type == "added"]),
            'files_deleted': len([c for c in changes if c.change_type == "deleted"]),
            'files_modified': len([c for c in changes if c.change_type == "modified"]),
        }

        stats['lines_changed'] = stats['lines_added'] + stats['lines_removed']
        return stats

    def filter_changes_by_type(self, changes: List[DiffChange],
                              change_types: List[str]) -> List[DiffChange]:
        """
        Filter changes by change type.

        Args:
            changes: List of DiffChange objects
            change_types: List of change types to include

        Returns:
            Filtered list of changes
        """
        return [change for change in changes if change.change_type in change_types]

    def get_context_around_changes(self, change: DiffChange,
                                  context_lines: int = 3) -> Dict[str, List[str]]:
        """
        Get context lines around changes for better analysis.

        Args:
            change: DiffChange object
            context_lines: Number of context lines to include

        Returns:
            Dictionary with before/after context
        """
        # This is a simplified implementation
        # In a full implementation, you might need to read the actual file
        # or use more sophisticated context extraction

        return {
            'before_context': change.context_lines[:context_lines],
            'after_context': change.context_lines[-context_lines:] if len(change.context_lines) > context_lines else change.context_lines,
            'added_lines': change.added_lines,
            'removed_lines': change.removed_lines
        }


@dataclass
class ReviewResult:
    """Result of a code review analysis."""
    summary: str
    categories: Dict[str, Dict[str, Any]]
    suggestions: List[Dict[str, str]]
    files_reviewed: List[str]
    diff_stats: Dict[str, int]
    project_context: Dict[str, Any]
    review_time: str
    focus_areas: List[str]
    raw_response: str = None  # 添加原始响应字段


@dataclass
class ImprovementSuggestion:
    """Represents a simulated improvement suggestion with code comparison."""
    file_path: str
    change_type: str
    summary: str
    original_code: str
    modified_code: str
    rationale: str
    severity: str = "medium"
    line_number: Optional[int] = None



class CodeReviewAssistant:
    """
    Main class for conducting intelligent code reviews using Claude AI.

    This class orchestrates repository analysis, diff parsing, and AI-powered
    code review to provide comprehensive feedback on code changes.
    """

    def __init__(self, config: Optional[CodeReviewConfig] = None,
                 logger: Optional[logging.Logger] = None):
        """
        Initialize the code review assistant.

        Args:
            config: Configuration for review behavior
            logger: Optional logger for operation tracking
        """
        self.config = config or CodeReviewConfig()
        self.logger = logger or logging.getLogger(__name__)

        # Initialize Claude CLI with appropriate configuration
        claude_config = self.config.claude_config or ClaudeConfig(
            timeout=300,  # Longer timeout for complex analysis
            max_history_entries=5,  # Limited history for code review
            log_level="INFO"
        )
        self.claude_cli = ClaudeCLI(config=claude_config, logger=self.logger)

        # Initialize analyzers
        self.repo_analyzer = GitRepositoryAnalyzer(logger=self.logger)
        self.diff_parser = DiffParser(logger=self.logger)

    def review_diff(self, diff_content: str, repo_path: str,
                   focus_areas: Optional[List[ReviewFocus]] = None) -> ReviewResult:
        """
        Conduct a comprehensive code review of the provided diff.

        Args:
            diff_content: Git diff content to review
            repo_path: Path to the Git repository
            focus_areas: Optional list of specific areas to focus on

        Returns:
            ReviewResult containing comprehensive analysis

        Raises:
            ValueError: If inputs are invalid
            ClaudeError: If AI analysis fails
        """
        self.logger.info(f"Starting code review for repository: {repo_path}")

        # Validate inputs
        if not diff_content.strip():
            raise ValueError("Empty diff content provided")

        if not os.path.exists(repo_path):
            raise ValueError(f"Repository path does not exist: {repo_path}")

        # Use provided focus areas or default from config
        review_focus = focus_areas or self.config.focus_areas

        # Parse diff content
        changes = self.diff_parser.parse_diff(diff_content)
        diff_stats = self.diff_parser.get_diff_stats(diff_content)

        self.logger.info(f"Parsed {len(changes)} file changes")

        # Analyze repository context
        project_info = self.repo_analyzer.analyze_repository(repo_path)

        # Get related files for context
        changed_files = [change.file_path for change in changes]
        if self.config.analyze_related_files:
            related_files = self.repo_analyzer.get_related_files(
                repo_path, changed_files, self.config.max_context_files
            )
        else:
            related_files = []

        # Build comprehensive review prompt
        review_prompt = self._build_review_prompt(
            changes, project_info, diff_stats, review_focus, related_files, repo_path
        )

        # Conduct AI-powered review
        try:
            self.logger.info("Conducting AI-powered code review analysis")
            review_response = self.claude_cli.run(review_prompt, verbose=False)
        except ClaudeError as e:
            self.logger.error(f"Claude analysis failed: {e}")
            raise

        # Parse and structure the review result
        result = self._parse_review_response(
            review_response, changes, project_info, diff_stats, review_focus
        )

        self.logger.info("Code review completed successfully")
        return result

    def _build_simulated_suggestion(self, change: DiffChange) -> Optional[ImprovementSuggestion]:
        """Generate a heuristic improvement suggestion from a diff change."""
        original_lines = change.removed_lines or change.context_lines[:10]
        original_code = "\n".join(original_lines).strip()
        if not original_code:
            if change.change_type == "added":
                original_code = "<no previous implementation>"
            else:
                original_code = "<not available>"

        modified_lines = change.added_lines
        modified_code = "\n".join(modified_lines).strip()
        if not modified_code:
            if change.change_type == "deleted":
                modified_code = "<code removed in this change>"
            else:
                modified_code = "<unchanged>"

        summary_prefix = {
            "added": "New code added",
            "deleted": "Code removed",
            "renamed": "File renamed",
            "modified": "Code updated"
        }.get(change.change_type, "Code updated")
        summary = f"{summary_prefix} in {change.file_path}"

        line_number = change.new_start_line if change.change_type != "deleted" else change.old_start_line

        rationale_parts: List[str] = []
        severity = "medium"

        suspicious_patterns = {
            "eval(": ("Using eval can introduce security issues.", "high"),
            "exec(": ("Using exec can execute arbitrary code.", "high"),
            "TODO": ("Left TODO markers should be resolved before merging.", "low"),
            "print(": ("Debug print statements should be removed or replaced with logging.", "low"),
            "pdb.set_trace": ("Remove debugging breakpoints before committing.", "high"),
            "shell=True": ("shell=True in subprocess calls can be risky; validate inputs.", "high"),
        }

        for line in modified_lines:
            for pattern, (message, level) in suspicious_patterns.items():
                if pattern in line:
                    rationale_parts.append(message)
                    if level == "high":
                        severity = "high"
                    elif level == "low" and severity != "high":
                        severity = "low"

        if change.change_type == "deleted" and not modified_lines:
            rationale_parts.append("Confirm that removing this functionality does not break existing workflows.")

        if not rationale_parts:
            rationale_parts.append("Review the updated logic and ensure edge cases are covered by tests.")

        rationale = " ".join(dict.fromkeys(rationale_parts))

        return ImprovementSuggestion(
            file_path=change.file_path,
            change_type=change.change_type,
            summary=summary,
            original_code=original_code,
            modified_code=modified_code,
            rationale=rationale,
            severity=severity,
            line_number=line_number
        )

    def _compose_simulated_summary(self, diff_stats: Dict[str, int],
                                   suggestions: List[ImprovementSuggestion]) -> str:
        """Create a short summary for simulated review runs."""
        suggestion_count = len(suggestions)
        high_risk = sum(1 for suggestion in suggestions if suggestion.severity == "high")
        files_changed = diff_stats.get("files_changed", suggestion_count)
        added = diff_stats.get("lines_added", 0)
        removed = diff_stats.get("lines_removed", 0)

        parts = [
            f"Evaluated {files_changed} file(s)",
            f"{added} additions",
            f"{removed} deletions",
            f"generated {suggestion_count} suggestion(s)"
        ]

        if high_risk:
            parts.append(f"including {high_risk} high severity item(s)")

        return ", ".join(parts) + "."
    def _build_review_prompt(self, changes: List[DiffChange], project_info: ProjectInfo,
                            diff_stats: Dict[str, int], focus_areas: List[ReviewFocus],
                            related_files: List[str], repo_path: str) -> str:
        """
        Build a comprehensive prompt for Claude AI code review.

        Args:
            changes: Parsed diff changes
            project_info: Repository analysis results
            diff_stats: Diff statistics
            focus_areas: Areas to focus the review on
            related_files: Related files for context
            repo_path: Repository path

        Returns:
            Formatted prompt string for Claude
        """
        prompt_parts = []

        # Header and context
        prompt_parts.append("# 代码审查请求")
        prompt_parts.append("请对以下代码变更进行全面的代码审查，并提供具体的修改建议。")
        prompt_parts.append("")

        # Project context
        prompt_parts.append("## 项目背景")
        prompt_parts.append(f"- **项目类型**: {project_info.project_type.value}")
        prompt_parts.append(f"- **使用框架**: {', '.join(project_info.frameworks) or '未检测到'}")
        prompt_parts.append(f"- **依赖包数量**: {len(project_info.dependencies)} 个")
        prompt_parts.append(f"- **测试框架**: {', '.join(project_info.test_frameworks) or '未检测到'}")

        if project_info.git_info.get('current_branch'):
            prompt_parts.append(f"- **当前分支**: {project_info.git_info['current_branch']}")

        prompt_parts.append("")

        # Change statistics
        prompt_parts.append("## 变更摘要")
        prompt_parts.append(f"- **修改文件数**: {diff_stats['files_changed']}")
        prompt_parts.append(f"- **新增行数**: {diff_stats['lines_added']}")
        prompt_parts.append(f"- **删除行数**: {diff_stats['lines_removed']}")
        prompt_parts.append(f"- **新增文件数**: {diff_stats['files_added']}")
        prompt_parts.append(f"- **删除文件数**: {diff_stats['files_deleted']}")
        prompt_parts.append("")

        # Focus areas
        focus_descriptions = {
            ReviewFocus.CODE_QUALITY: "代码风格、可读性和最佳实践",
            ReviewFocus.SECURITY: "安全漏洞和风险",
            ReviewFocus.PERFORMANCE: "性能影响和优化建议",
            ReviewFocus.MAINTAINABILITY: "长期可维护性和技术债务",
            ReviewFocus.TESTING: "测试覆盖率和测试最佳实践",
            ReviewFocus.DOCUMENTATION: "文档质量和完整性"
        }

        prompt_parts.append("## 审查重点")
        for focus in focus_areas:
            desc = focus_descriptions.get(focus, focus.value)
            focus_name = {
                'code_quality': '代码质量',
                'security': '安全性',
                'performance': '性能',
                'maintainability': '可维护性',
                'testing': '测试',
                'documentation': '文档'
            }.get(focus.value, focus.value)
            prompt_parts.append(f"- **{focus_name}**: {desc}")
        prompt_parts.append("")

        # Related files context
        if related_files:
            prompt_parts.append("## 相关文件上下文")
            for rel_file in related_files[:5]:  # Limit to avoid prompt bloat
                file_path = os.path.join(repo_path, rel_file)
                if os.path.exists(file_path):
                    try:
                        with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                            content = f.read()[:1000]  # Limit content size
                        prompt_parts.append(f"### {rel_file}")
                        prompt_parts.append(f"```{self._get_file_language(rel_file)}")
                        prompt_parts.append(content)
                        if len(content) >= 1000:
                            prompt_parts.append("... (已截断)")
                        prompt_parts.append("```")
                        prompt_parts.append("")
                    except (IOError, UnicodeDecodeError):
                        continue

        # Detailed changes
        prompt_parts.append("## 需要审查的代码变更")
        for i, change in enumerate(changes, 1):
            change_type_cn = {
                'added': '新增',
                'deleted': '删除',
                'modified': '修改',
                'renamed': '重命名'
            }.get(change.change_type, change.change_type)

            prompt_parts.append(f"### {i}. {change.file_path} ({change_type_cn})")

            if change.change_type != "deleted":
                # Show added lines with line numbers
                if change.added_lines:
                    prompt_parts.append("**新增代码:**")
                    prompt_parts.append(f"```{self._get_file_language(change.file_path)}")
                    start_line = change.new_start_line or 1
                    for idx, line in enumerate(change.added_lines[:50]):  # Limit to avoid prompt bloat
                        line_num = start_line + idx
                        prompt_parts.append(f"{line_num:4d}: {line}")
                    if len(change.added_lines) > 50:
                        prompt_parts.append(f"... 还有 {len(change.added_lines) - 50} 行")
                    prompt_parts.append("```")
                    prompt_parts.append("")

                # Show removed lines with line numbers
                if change.removed_lines:
                    prompt_parts.append("**删除代码:**")
                    prompt_parts.append(f"```{self._get_file_language(change.file_path)}")
                    start_line = change.old_start_line or 1
                    for idx, line in enumerate(change.removed_lines[:50]):
                        line_num = start_line + idx
                        prompt_parts.append(f"{line_num:4d}: {line}")
                    if len(change.removed_lines) > 50:
                        prompt_parts.append(f"... 还有 {len(change.removed_lines) - 50} 行")
                    prompt_parts.append("```")
                    prompt_parts.append("")

                # Show context if available
                if change.context_lines:
                    prompt_parts.append("**上下文代码:**")
                    prompt_parts.append(f"```{self._get_file_language(change.file_path)}")
                    for line in change.context_lines[:10]:
                        prompt_parts.append(line)
                    prompt_parts.append("```")
                    prompt_parts.append("")

        # Review instructions
        prompt_parts.append("## 审查要求")
        prompt_parts.append("请按照以下结构提供全面的代码审查：")
        prompt_parts.append("如果没找到需要审查的文件，返回异常即可")
        prompt_parts.append("1. **总体评估**: 简要总结")
        prompt_parts.append("2. **分类分析**: 针对每个重点领域的详细分析")
        prompt_parts.append("3. **具体问题**: 列出任何bug、安全问题或违规行为")
        prompt_parts.append("4. **修改建议**: 针对具体代码块的修改建议，请严格按照以下格式提供:")
        prompt_parts.append("   - 指出具体的行号和问题")
        prompt_parts.append("   - **原始代码**: 显示需要修改的代码块（用代码块标注）")
        prompt_parts.append("   - **修改后代码**: 显示修改后的代码块（用代码块标注）")
        prompt_parts.append("   - **修改原因**: 解释为什么要这样修改")
        prompt_parts.append("   - **修改好处**: 说明修改带来的好处")
        prompt_parts.append("   - **潜在风险**: 说明修改可能带来的风险或副作用")
        prompt_parts.append("5. **优秀方面**: 突出好的做法和编写良好的代码")
        prompt_parts.append("")
        prompt_parts.append("## 代码修改格式要求（必须遵守）")
        prompt_parts.append("对于每个代码修改建议，必须使用以下格式：")
        prompt_parts.append("```")
        prompt_parts.append("### 修改建议标题")
        prompt_parts.append("**问题**: 具体描述问题")
        prompt_parts.append("**行号**: 第X行")
        prompt_parts.append("**原始代码**:")
        prompt_parts.append("```python")
        prompt_parts.append("# 原始代码内容")
        prompt_parts.append("```")
        prompt_parts.append("**修改后代码**:")
        prompt_parts.append("```python")
        prompt_parts.append("# 修改后的代码内容")
        prompt_parts.append("```")
        prompt_parts.append("**修改原因**: 详细说明修改理由")
        prompt_parts.append("**修改好处**: 说明修改带来的好处")
        prompt_parts.append("**潜在风险**: 说明可能的副作用")
        prompt_parts.append("```")
        prompt_parts.append("")
        prompt_parts.append("### 重要说明：")
        prompt_parts.append("1. 每个修改建议必须包含完整的代码对比")
        prompt_parts.append("2. 必须使用 '原始代码:' 和 '修改后代码:' 的标记")
        prompt_parts.append("3. 代码块必须使用 ```python 或适当的语言标识")
        prompt_parts.append("4. 对于三元运算符优化，必须显示优化前后的完整对比")
        prompt_parts.append("5. 确保代码对比清晰易懂，突出改进点")
        prompt_parts.append("")
        prompt_parts.append("## 审查重点")
        prompt_parts.append("请重点关注以下方面并提供具体的改进建议：")
        prompt_parts.append("")
        prompt_parts.append("- **代码质量**: 命名规范、代码结构、可读性")
        prompt_parts.append("- **安全风险**: 输入验证、权限控制、数据保护")
        prompt_parts.append("- **性能优化**: 算法效率、资源使用、并发处理")
        prompt_parts.append("- **可维护性**: 模块化设计、错误处理、日志记录")
        prompt_parts.append("- **测试覆盖**: 单元测试、边界条件、异常情况")
        prompt_parts.append("- **文档完整性**: 注释清晰度、API文档、使用说明")
        prompt_parts.append("")
        prompt_parts.append("请使用清晰的markdown格式回复，并在适当的地方引用具体的行号。")
        prompt_parts.append("重要：对于每个修改建议，必须同时提供原始代码和修改后代码的完整对比")
        prompt_parts.append("这是代码审查的核心要求，没有代码对比的建议将被视为不完整的审查结果")

        return "\n".join(prompt_parts)

    def _get_file_language(self, file_path: str) -> str:
        """Get programming language identifier for syntax highlighting."""
        ext_map = {
            '.py': 'python',
            '.js': 'javascript',
            '.ts': 'typescript',
            '.jsx': 'jsx',
            '.tsx': 'tsx',
            '.go': 'go',
            '.rs': 'rust',
            '.java': 'java',
            '.cpp': 'cpp',
            '.c': 'c',
            '.h': 'c',
            '.hpp': 'cpp',
            '.rb': 'ruby',
            '.php': 'php',
            '.swift': 'swift',
            '.kt': 'kotlin',
            '.scala': 'scala',
            '.sh': 'bash',
            '.yaml': 'yaml',
            '.yml': 'yaml',
            '.json': 'json',
            '.xml': 'xml',
            '.html': 'html',
            '.css': 'css',
            '.sql': 'sql',
        }

        ext = Path(file_path).suffix.lower()
        return ext_map.get(ext, 'text')

    def _parse_review_response(self, review_response: str, changes: List[DiffChange],
                              project_info: ProjectInfo, diff_stats: Dict[str, int],
                              focus_areas: List[ReviewFocus]) -> ReviewResult:
        """
        Parse Claude's review response into structured result.

        Args:
            review_response: Raw response from Claude
            changes: Original diff changes
            project_info: Project context
            diff_stats: Diff statistics
            focus_areas: Review focus areas

        Returns:
            Structured ReviewResult object
        """
        from datetime import datetime

        # Extract suggestions with better Chinese support
        suggestions = []
        lines = review_response.split('\n')

        # Look for suggestion sections
        suggestion_markers = ['修改建议', '建议', 'suggestions', '推荐', '改进', '优化']
        code_examples = []
        current_suggestion = None

        for i, line in enumerate(lines):
            line = line.strip()

            # Check if this line starts a suggestion section
            if any(marker in line.lower() for marker in suggestion_markers):
                continue

            # Extract numbered or bulleted suggestions
            if re.match(r'^\d+\.|^[-*•]|^[a-zA-Z]\)', line):
                if current_suggestion:
                    suggestions.append(current_suggestion)

                current_suggestion = {
                    'description': re.sub(r'^\d+\.|^[-*•]|^[a-zA-Z]\)', '', line).strip(),
                    'type': self._determine_suggestion_type(line),
                    'priority': self._determine_suggestion_priority(line),
                    'line_number': None,
                    'file_path': None,
                    'original_code': None,
                    'modified_code': None,
                    'code_example': None,
                    'reasoning': None,
                    'benefits': None,
                    'risks': None
                }

                # Look ahead for more detailed content
                detailed_content = []
                j = i + 1
                while j < len(lines) and not re.match(r'^\d+\.|^[-*•]|^[a-zA-Z]\)', lines[j]):
                    detailed_content.append(lines[j].rstrip('\r'))
                    j += 1

                # Parse detailed content for code examples, reasoning, and risks
                content_text = '\n'.join(detailed_content).strip()

                # Extract original code (look for "原始代码" or "original code" blocks)
                original_code_patterns = [
                    r'原始代码[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'original code[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'before[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'修改前[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'旧代码[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'当前代码[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'现有代码[:：]?\s*```\w*\s*\n(.*?)\n```'
                ]
                for pattern in original_code_patterns:
                    original_match = re.search(pattern, content_text, re.DOTALL | re.IGNORECASE)
                    if original_match:
                        current_suggestion['original_code'] = original_match.group(1).strip()
                        break

                # Extract modified code (look for "修改后代码" or "modified code" blocks)
                modified_code_patterns = [
                    r'修改后代码[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'modified code[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'after[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'新代码[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'建议代码[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'推荐代码[:：]?\s*```\w*\s*\n(.*?)\n```',
                    r'优化后[:：]?\s*```\w*\s*\n(.*?)\n```'
                ]
                for pattern in modified_code_patterns:
                    modified_match = re.search(pattern, content_text, re.DOTALL | re.IGNORECASE)
                    if modified_match:
                        current_suggestion['modified_code'] = modified_match.group(1).strip()
                        break

                # Extract code from diff-like format (fallback mechanism)
                if not current_suggestion['original_code'] and not current_suggestion['modified_code']:
                    # Look for diff-like patterns with - and + lines
                    diff_pattern = r'[-]\s*(.*?)\n[+]\s*(.*?)$'
                    diff_match = re.search(diff_pattern, content_text, re.DOTALL | re.MULTILINE)
                    if diff_match:
                        current_suggestion['original_code'] = diff_match.group(1).strip()
                        current_suggestion['modified_code'] = diff_match.group(2).strip()

                # Extract general code example (fallback)
                if not current_suggestion['original_code'] and not current_suggestion['modified_code']:
                    code_match = re.search(r'```(?:\w+)?\s*\n(.*?)\n```', content_text, re.DOTALL)
                    if code_match:
                        current_suggestion['code_example'] = code_match.group(1).strip()

                # Extract reasoning
                reasoning_patterns = [
                    r'(?:修改原因|原因|理由|因为|due to|because|reason)[:：]?\s*(.*?)(?:\.\s|$)',
                    r'(?:why|修改理由)[:：]?\s*(.*?)(?:\.\s|$)'
                ]
                for pattern in reasoning_patterns:
                    reasoning_match = re.search(pattern, content_text, re.IGNORECASE)
                    if reasoning_match:
                        current_suggestion['reasoning'] = reasoning_match.group(1).strip()
                        break

                # Extract benefits
                benefits_patterns = [
                    r'(?:修改好处|好处|benefit|advantage)[:：]?\s*(.*?)(?:\.\s|$)',
                    r'(?:优点|优势)[:：]?\s*(.*?)(?:\.\s|$)'
                ]
                for pattern in benefits_patterns:
                    benefits_match = re.search(pattern, content_text, re.IGNORECASE)
                    if benefits_match:
                        current_suggestion['benefits'] = benefits_match.group(1).strip()
                        break

                # Extract risks
                risk_patterns = [
                    r'(?:潜在风险|风险|副作用|risk|side effect)[:：]?\s*(.*?)(?:\.\s|$)',
                    r'(?:缺点|disadvantage)[:：]?\s*(.*?)(?:\.\s|$)'
                ]
                for pattern in risk_patterns:
                    risk_match = re.search(pattern, content_text, re.IGNORECASE)
                    if risk_match:
                        current_suggestion['risks'] = risk_match.group(1).strip()
                        break

            # Extract line number references
            elif current_suggestion:
                # Extract line number in various formats
                line_ref_patterns = [
                    r'(?:第|行号|line)\s*(\d+)',
                    r'(\d+)\s*行',
                    r'line\s*(\d+)'
                ]
                for pattern in line_ref_patterns:
                    line_ref_match = re.search(pattern, line, re.IGNORECASE)
                    if line_ref_match:
                        current_suggestion['line_number'] = int(line_ref_match.group(1))
                        break

                # Extract file path references
                file_ref_patterns = [
                    r'(?:文件|file)[：:]?\s*([^\s]+)',
                    r'([^\s]+\.\w+)\s*中',
                    r'in\s+([^\s]+\.\w+)'
                ]
                for pattern in file_ref_patterns:
                    file_ref_match = re.search(pattern, line, re.IGNORECASE)
                    if file_ref_match:
                        current_suggestion['file_path'] = file_ref_match.group(1)
                        break

        # Add the last suggestion
        if current_suggestion:
            suggestions.append(current_suggestion)

        # Enhanced category parsing
        categories = {}
        category_patterns = {
            'code_quality': ['代码质量', 'code quality', '代码风格'],
            'security': ['安全', 'security', '安全性'],
            'performance': ['性能', 'performance'],
            'maintainability': ['可维护性', 'maintainability', '维护'],
            'testing': ['测试', 'testing', 'test'],
            'documentation': ['文档', 'documentation', 'docs']
        }

        all_category_keywords = '|'.join(sum(category_patterns.values(), []))

        for focus in focus_areas:
            category_text = ""
            category_issues = []
            category_suggestions = []

            # Try to find category-specific content
            patterns = category_patterns.get(focus.value, [focus.value])
            for pattern in patterns:
                pattern_match = re.search(
                    rf'{pattern}.*?(?=(?:{all_category_keywords})|$)',
                    review_response,
                    re.IGNORECASE | re.DOTALL
                )
                if pattern_match:
                    category_text = pattern_match.group(0)[:300]  # Limit length
                    break

            categories[focus.value] = {
                'issues': self._extract_issues_for_category(review_response, focus.value),
                'suggestions': [s for s in suggestions if self._is_suggestion_for_category(s, focus.value)],
                'analysis': category_text or f"针对{focus.value}的分析"
            }

        return ReviewResult(
            summary=self._extract_summary(review_response),
            categories=categories,
            suggestions=suggestions,
            files_reviewed=[change.file_path for change in changes],
            diff_stats=diff_stats,
            project_context={
                'project_type': project_info.project_type.value,
                'frameworks': project_info.frameworks,
                'dependencies_count': len(project_info.dependencies)
            },
            review_time=datetime.now().isoformat(),
            focus_areas=[focus.value for focus in focus_areas],
            raw_response=review_response  # 保存原始响应
        )

    def _determine_suggestion_type(self, line: str) -> str:
        """Determine the type of suggestion based on content."""
        line_lower = line.lower()

        type_keywords = {
            'bug': ['bug', '错误', '修复', 'fix', 'error', 'exception', 'fail'],
            'security': ['安全', 'security', 'vulnerability', '漏洞', '攻击', '注入', 'xss', 'csrf'],
            'performance': ['性能', 'performance', '优化', 'optimize', '效率', 'efficiency', '慢'],
            'maintainability': ['维护', 'maintain', 'refactor', '重构', '结构', 'structure'],
            'code_quality': ['质量', 'quality', '风格', 'style', '格式', 'format', '命名', 'name'],
            'testing': ['测试', 'test', 'unit', 'mock', '覆盖', 'coverage'],
            'documentation': ['文档', 'document', 'comment', '注释', '说明']
        }

        for suggestion_type, keywords in type_keywords.items():
            if any(keyword in line_lower for keyword in keywords):
                return suggestion_type

        return 'improvement'

    def _determine_suggestion_priority(self, line: str) -> str:
        """Determine the priority of suggestion based on content."""
        line_lower = line.lower()

        high_priority_keywords = ['严重', 'critical', '紧急', 'urgent', '必须', 'must', '安全', 'security', 'crash']
        medium_priority_keywords = ['建议', '建议', 'recommend', '应该', 'should', '优化', 'optimize']
        low_priority_keywords = ['可选', 'optional', '可以', 'could', '可能', 'possibly']

        if any(keyword in line_lower for keyword in high_priority_keywords):
            return 'high'
        elif any(keyword in line_lower for keyword in medium_priority_keywords):
            return 'medium'
        elif any(keyword in line_lower for keyword in low_priority_keywords):
            return 'low'

        return 'medium'

    def _extract_summary(self, review_response: str) -> str:
        """Extract summary from review response."""
        lines = review_response.split('\n')
        summary_lines = []

        # Look for summary section
        in_summary = False
        for line in lines:
            line = line.strip()
            if any(marker in line.lower() for marker in ['总体评估', 'overall', '摘要', 'summary']):
                in_summary = True
                continue
            elif in_summary and line.startswith('#'):
                break
            elif in_summary and line:
                summary_lines.append(line)
                if len(summary_lines) >= 5:  # Limit summary length
                    break

        summary = '\n'.join(summary_lines) if summary_lines else review_response[:300]
        return summary + ("..." if len(summary) > 300 else "")

    def _extract_issues_for_category(self, review_response: str, category: str) -> List[str]:
        """Extract issues specific to a category."""
        issues = []
        category_keywords = {
            'code_quality': ['代码质量', '风格', '可读性', 'style', 'readability'],
            'security': ['安全', '漏洞', 'security', 'vulnerability'],
            'performance': ['性能', '效率', 'performance', 'efficiency'],
            'maintainability': ['维护', '重构', 'maintainability', 'refactor'],
            'testing': ['测试', '覆盖', 'test', 'coverage'],
            'documentation': ['文档', '注释', 'documentation', 'comment']
        }

        keywords = category_keywords.get(category, [category])
        lines = review_response.split('\n')

        for line in lines:
            line = line.strip()
            if any(keyword in line.lower() for keyword in keywords):
                if any(issue_word in line.lower() for issue_word in ['问题', '错误', 'issue', 'problem', 'bug']):
                    issues.append(line)

        return issues[:5]  # Limit to 5 issues per category

    def _is_suggestion_for_category(self, suggestion: Dict[str, str], category: str) -> bool:
        """Check if a suggestion belongs to a specific category."""
        description = suggestion.get('description', '').lower()

        category_keywords = {
            'code_quality': ['代码', '风格', '格式', 'code', 'style', 'format'],
            'security': ['安全', '验证', 'security', 'validate', 'sanitize'],
            'performance': ['性能', '优化', 'performance', 'optimize', 'efficient'],
            'maintainability': ['维护', '结构', 'maintain', 'structure', 'organize'],
            'testing': ['测试', '断言', 'test', 'assert', 'mock'],
            'documentation': ['文档', '注释', 'document', 'comment', 'explain']
        }

        keywords = category_keywords.get(category, [])
        return any(keyword in description for keyword in keywords)

    def format_review_result(self, result: ReviewResult,
                           output_format: str = "detailed") -> str:
        """
        Format review result for display.

        Args:
            result: ReviewResult to format
            output_format: Output format ("brief", "detailed", "json", "raw")

        Returns:
            Formatted review result string
        """
        if output_format == "json":
            import json
            return json.dumps(result.__dict__, indent=2, default=str)

        elif output_format == "brief":
            return self._format_brief_result(result)

        elif output_format == "raw":
            # 返回 Claude AI 的原始响应，保持完整格式
            if hasattr(result, 'raw_response') and result.raw_response:
                return result.raw_response
            else:
                # 如果没有原始响应，返回格式化结果
                return self._format_detailed_result(result)

        else:  # detailed
            return self._format_detailed_result(result)

    def _format_brief_result(self, result: ReviewResult) -> str:
        """Format brief review result."""
        lines = [
            f"# 代码审查摘要",
            f"**审查文件数**: {len(result.files_reviewed)}",
            f"**变更行数**: {result.diff_stats['lines_changed']}",
            "",
            f"**主要建议** ({len(result.suggestions)} 条):",
        ]

        for i, suggestion in enumerate(result.suggestions[:3], 1):
            desc = suggestion['description']
            priority = suggestion.get('priority', 'medium')
            priority_text = {'high': '🔴 高优先级', 'medium': '🟡 中优先级', 'low': '🟢 低优先级'}.get(priority, '🟡 中优先级')
            line_ref = f" (第{suggestion['line_number']}行)" if suggestion.get('line_number') else ""
            lines.append(f"{i}. {priority_text} {desc}{line_ref}")

        if len(result.suggestions) > 3:
            lines.append(f"... 还有 {len(result.suggestions) - 3} 条建议")

        return "\n".join(lines)

    def _format_detailed_result(self, result: ReviewResult) -> str:
        """Format detailed review result."""
        lines = [
            f"# 代码审查报告",
            f"**生成时间**: {result.review_time}",
            "",
            "## 摘要",
            result.summary,
            "",
            "## 统计信息",
            f"- 审查文件数: {len(result.files_reviewed)}",
            f"- 新增行数: {result.diff_stats['lines_added']}",
            f"- 删除行数: {result.diff_stats['lines_removed']}",
            f"- 修改文件数: {result.diff_stats['files_changed']}",
            "",
            "## 项目背景",
            f"- 项目类型: {result.project_context['project_type']}",
            f"- 使用框架: {', '.join(result.project_context['frameworks']) or '无'}",
            f"- 依赖包数: {result.project_context['dependencies_count']} 个",
            "",
            "## 审查分类",
        ]

        category_names = {
            'code_quality': '代码质量',
            'security': '安全性',
            'performance': '性能',
            'maintainability': '可维护性',
            'testing': '测试',
            'documentation': '文档'
        }

        for area in result.focus_areas:
            if area in result.categories:
                cat = result.categories[area]
                area_name = category_names.get(area, area)
                lines.extend([
                    f"### {area_name}",
                    cat['analysis'],
                    ""
                ])

                # Show category-specific issues
                if cat.get('issues'):
                    lines.extend([
                        f"**{area_name}问题:**",
                        ""
                    ])
                    for issue in cat['issues']:
                        lines.append(f"- {issue}")
                    lines.append("")

        if result.suggestions:
            lines.extend([
                "## 修改建议",
                ""
            ])

            for i, suggestion in enumerate(result.suggestions, 1):
                suggestion_type = {
                    'improvement': '改进',
                    'bug': '错误修复',
                    'security': '安全',
                    'performance': '性能优化',
                    'maintainability': '可维护性',
                    'code_quality': '代码质量',
                    'testing': '测试',
                    'documentation': '文档'
                }.get(suggestion.get('type', 'improvement'), '改进')

                priority = suggestion.get('priority', 'medium')
                priority_text = {'high': '🔴 高优先级', 'medium': '🟡 中优先级', 'low': '🟢 低优先级'}.get(priority, '🟡 中优先级')

                desc = suggestion['description']
                file_ref = f" (文件: {suggestion['file_path']})" if suggestion.get('file_path') else ""
                line_ref = f" (第{suggestion['line_number']}行)" if suggestion.get('line_number') else ""

                lines.append(f"### {i}. {priority_text} {suggestion_type}")
                lines.append(f"**问题描述**: {desc}{file_ref}{line_ref}")

                # Add reasoning if available
                if suggestion.get('reasoning'):
                    lines.extend([
                        "",
                        "**修改原因**:",
                        suggestion['reasoning']
                    ])

                # Add risks if available
                if suggestion.get('risks'):
                    lines.extend([
                        "",
                        "**潜在风险**:",
                        suggestion['risks']
                    ])

                # Add code comparison if available
                if suggestion.get('original_code') and suggestion.get('modified_code'):
                    lines.extend([
                        "",
                        "**原始代码**:",
                        "```",
                        suggestion['original_code'],
                        "```",
                        "",
                        "**修改后代码**:",
                        "```",
                        suggestion['modified_code'],
                        "```"
                    ])
                # Fallback to general code example
                elif suggestion.get('code_example'):
                    lines.extend([
                        "",
                        "**推荐代码**:",
                        "```",
                        suggestion['code_example'],
                        "```"
                    ])

                # Add benefits if available
                if suggestion.get('benefits'):
                    lines.extend([
                        "",
                        "**修改好处**:",
                        suggestion['benefits']
                    ])

                lines.append("")

        lines.extend([
            "",
            "## 审查文件列表",
            ""
        ])

        for file_path in result.files_reviewed:
            lines.append(f"- {file_path}")

        return "\n".join(lines)
