#!/usr/bin/env python3
"""
Batch IR Processor - Agent 3 Transformation Pipeline

Processes Roslyn JSON outputs from Agent 2, builds IR, applies normalization
and transformation passes, and generates detailed conversion metrics.

Usage:
    python process_ir_batch.py
"""

import json
import logging
import sys
from collections import defaultdict
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any

# Add the orchestrator to the Python path
SCRIPT_DIR = Path(__file__).parent
ORCHESTRATOR_DIR = SCRIPT_DIR.parent.parent / "cpp-orchestrator" / "src"
sys.path.insert(0, str(ORCHESTRATOR_DIR))

from orchestrator.ir.builder import IRBuilder
from orchestrator.ir.nodes import ClassDeclaration
from orchestrator.ir.normalizer import IRNormalizer


@dataclass
class GapInstance:
    """Represents a specific unsupported construct"""

    construct_type: str  # "async/await", "LINQ", "delegate", etc.
    severity: str  # "BLOCKER", "WORKAROUND", "MISSING", "PARTIAL"
    location: str  # e.g., "Class.Method"
    description: str
    example_code: str = ""
    count: int = 1


@dataclass
class FileMetrics:
    """Metrics for a single file transformation"""

    file_name: str
    repo_name: str
    ir_build_success: bool = False
    classes_found: int = 0
    classes_converted: int = 0
    fields_created: int = 0
    properties_found: int = 0
    properties_normalized: int = 0
    methods_found: int = 0
    constructors_found: int = 0
    normalization_success: bool = False
    transformations_applied: list[str] = field(default_factory=list)
    gaps: list[GapInstance] = field(default_factory=list)
    errors: list[str] = field(default_factory=list)
    warnings: list[str] = field(default_factory=list)


@dataclass
class RepositoryMetrics:
    """Aggregate metrics for a repository"""

    repo_name: str
    files_processed: int = 0
    ir_build_success_rate: float = 0.0
    normalization_success_rate: float = 0.0
    transformation_coverage: float = 0.0
    fidelity_score: float = 0.0
    blocker_count: int = 0
    workaround_count: int = 0
    gap_count: int = 0
    file_metrics: list[FileMetrics] = field(default_factory=list)
    common_gaps: dict[str, int] = field(default_factory=dict)


class IRProcessor:
    """Main processor for IR transformation pipeline"""

    def __init__(self, input_dir: Path, output_dir: Path):
        self.input_dir = input_dir
        self.output_dir = output_dir
        self.output_dir.mkdir(parents=True, exist_ok=True)

        # Setup logging
        self.setup_logging()

        # Results storage
        self.repo_metrics: dict[str, RepositoryMetrics] = {}
        self.all_gaps: list[GapInstance] = []

    def setup_logging(self):
        """Configure logging for the processor"""
        log_file = self.output_dir / "transformation.log"
        logging.basicConfig(
            level=logging.INFO,
            format="[%(levelname)s] %(message)s",
            handlers=[logging.FileHandler(log_file), logging.StreamHandler()],
        )
        self.logger = logging.getLogger(__name__)

    def process_all_repositories(self):
        """Process all repositories in priority order"""
        repositories = ["DelegateTransform", "DotNetCSharpLinqByExample", "RobloxAPI", "C5"]

        for repo_name in repositories:
            repo_dir = self.input_dir / repo_name
            if not repo_dir.exists():
                self.logger.warning(f"Repository directory not found: {repo_name}")
                continue

            self.logger.info(f"\n{'=' * 80}")
            self.logger.info(f"Processing Repository: {repo_name}")
            self.logger.info(f"{'=' * 80}")

            self.process_repository(repo_name, repo_dir)

    def process_repository(self, repo_name: str, repo_dir: Path):
        """Process all JSON files in a repository"""
        # Create output directory for this repo
        repo_output_dir = self.output_dir / repo_name
        repo_output_dir.mkdir(parents=True, exist_ok=True)

        # Get all JSON files (exclude metrics files)
        json_files = sorted([f for f in repo_dir.glob("*.json") if not f.name.endswith("-metrics.json")])

        if not json_files:
            self.logger.warning(f"No JSON files found in {repo_name}")
            return

        # Initialize repository metrics
        repo_metrics = RepositoryMetrics(repo_name=repo_name)

        # Process each file
        for json_file in json_files:
            self.logger.info(f"\nProcessing: {json_file.name}")
            file_metrics = self.process_file(json_file, repo_output_dir)
            repo_metrics.file_metrics.append(file_metrics)
            repo_metrics.files_processed += 1

        # Calculate aggregate metrics
        self.calculate_repository_metrics(repo_metrics)
        self.repo_metrics[repo_name] = repo_metrics

        # Generate repository gap analysis
        self.generate_gap_analysis_report(repo_metrics, repo_output_dir)

    def process_file(self, json_file: Path, output_dir: Path) -> FileMetrics:
        """Process a single Roslyn JSON file through IR pipeline"""
        file_metrics = FileMetrics(
            file_name=json_file.stem, repo_name=output_dir.name
        )

        try:
            # Load Roslyn JSON
            with open(json_file, "r") as f:
                roslyn_data = json.load(f)

            # Analyze the JSON for unsupported constructs BEFORE IR building
            self.analyze_roslyn_json_for_gaps(roslyn_data, file_metrics)

            # Build IR
            file_metrics.classes_found = len(roslyn_data.get("classes", []))
            ir_classes = self.build_ir(roslyn_data, file_metrics)

            if not ir_classes:
                self.logger.error(f"Failed to build IR for {json_file.name}")
                return file_metrics

            file_metrics.ir_build_success = True
            file_metrics.classes_converted = len(ir_classes)

            # Process each class
            for ir_class in ir_classes:
                self.process_class_ir(ir_class, file_metrics, output_dir)

        except Exception as e:
            self.logger.error(f"Error processing {json_file.name}: {e}")
            file_metrics.errors.append(str(e))

        return file_metrics

    def build_ir(self, roslyn_data: dict[str, Any], metrics: FileMetrics) -> list[ClassDeclaration]:
        """Build IR from Roslyn JSON"""
        log_lines = []
        log_lines.append(f"[INFO] Building IR from {metrics.file_name}.json")

        builder = IRBuilder()
        ir_classes = builder.build_from_json(roslyn_data)

        # Track errors from builder
        if builder.errors:
            for error in builder.errors:
                log_lines.append(f"[ERROR] {error}")
                metrics.errors.append(error)

        # Log success
        for ir_class in ir_classes:
            member_count = (
                len(ir_class.fields)
                + len(ir_class.properties)
                + len(ir_class.methods)
                + len(ir_class.constructors)
            )
            log_lines.append(
                f"[SUCCESS] Created ClassDeclaration: {ir_class.name} ({member_count} members)"
            )

        # Write to log file
        self._append_to_transform_log(metrics.file_name, log_lines)

        return ir_classes

    def process_class_ir(
        self, ir_class: ClassDeclaration, metrics: FileMetrics, output_dir: Path
    ):
        """Process a single class through normalization and transformation"""
        log_lines = []

        # Count original members
        metrics.fields_created += len(ir_class.fields)
        metrics.properties_found += len(ir_class.properties)
        metrics.methods_found += len(ir_class.methods)
        metrics.constructors_found += len(ir_class.constructors)

        # Apply normalization
        log_lines.append(f"\n[INFO] Normalizing class: {ir_class.name}")
        normalized_class, norm_log = self.normalize_class(ir_class)

        if normalized_class:
            metrics.normalization_success = True
            log_lines.extend(norm_log)

            # Count changes
            properties_normalized = sum(
                1
                for t in norm_log
                if "Property->Getter" in t or "Property->Setter" in t
            )
            metrics.properties_normalized = properties_normalized // 2  # Getter+Setter = 1 property

            # Apply transformations (currently minimal)
            log_lines.append(f"[INFO] Applying transformation rules")
            # Note: Full transformation rules not implemented yet
            metrics.transformations_applied.append("PropertyNormalization")

            # Save IR as JSON
            ir_json_path = output_dir / f"{ir_class.name}-ir.json"
            self.save_ir_json(normalized_class, ir_json_path)
            log_lines.append(f"[SUCCESS] Saved IR to {ir_json_path.name}")

        else:
            log_lines.append(f"[ERROR] Normalization failed for {ir_class.name}")
            metrics.errors.append(f"Normalization failed for {ir_class.name}")

        # Write to transform log
        log_file = output_dir / f"{ir_class.name}-transform.log"
        with open(log_file, "w") as f:
            f.write("\n".join(log_lines))

    def normalize_class(
        self, ir_class: ClassDeclaration
    ) -> tuple[ClassDeclaration | None, list[str]]:
        """Apply normalization transformations"""
        log_lines = []

        try:
            normalizer = IRNormalizer()
            normalized = normalizer.normalize(ir_class)

            # Log transformations
            for transform in normalizer.transformations_applied:
                log_lines.append(f"[TRANSFORM] {transform}")

            return normalized, log_lines

        except Exception as e:
            log_lines.append(f"[ERROR] Normalization failed: {e}")
            return None, log_lines

    def analyze_roslyn_json_for_gaps(
        self, roslyn_data: dict[str, Any], metrics: FileMetrics
    ):
        """Analyze Roslyn JSON to identify unsupported constructs"""
        classes = roslyn_data.get("classes", [])

        for class_data in classes:
            class_name = class_data.get("name", "Unknown")

            # Check for attributes (test attributes, serialization, etc.)
            attributes = class_data.get("attributes", [])
            if attributes:
                for attr in attributes:
                    if attr in ["TestClass", "TestMethod"]:
                        gap = GapInstance(
                            construct_type="Test Attributes",
                            severity="WORKAROUND",
                            location=class_name,
                            description=f"Unit test attribute [{attr}] requires test framework",
                            example_code=f"[{attr}]",
                        )
                        metrics.gaps.append(gap)
                        self.all_gaps.append(gap)

            # Check members for unsupported constructs
            members = class_data.get("members", [])
            for member in members:
                member_name = member.get("name", "")
                location = f"{class_name}.{member_name}"

                # Check for async methods
                if member.get("isAsync", False):
                    gap = GapInstance(
                        construct_type="async/await",
                        severity="BLOCKER",
                        location=location,
                        description="Async method requires complete rewrite to synchronous or coroutine pattern",
                        example_code=f"async {member.get('returnType', '')} {member_name}(...)",
                    )
                    metrics.gaps.append(gap)
                    self.all_gaps.append(gap)

                # Check for generic type parameters
                type_params = member.get("typeParameters", [])
                if type_params:
                    gap = GapInstance(
                        construct_type="Generic Type Parameters",
                        severity="PARTIAL",
                        location=location,
                        description=f"Generic method with type parameters: {type_params}",
                        example_code=f"<{', '.join(type_params)}>",
                    )
                    metrics.gaps.append(gap)
                    self.all_gaps.append(gap)

                # Check for generic constraints
                constraints = member.get("constraints", [])
                if constraints:
                    gap = GapInstance(
                        construct_type="Generic Constraints",
                        severity="PARTIAL",
                        location=location,
                        description=f"Generic constraints may not fully map to C++ concepts",
                        example_code=f"where {', '.join(constraints)}",
                    )
                    metrics.gaps.append(gap)
                    self.all_gaps.append(gap)

            # Check for delegates (custom delegate types)
            delegates = roslyn_data.get("delegates", [])
            if delegates:
                for delegate_data in delegates:
                    delegate_name = delegate_data.get("name", "Unknown")
                    gap = GapInstance(
                        construct_type="Delegate Type",
                        severity="WORKAROUND",
                        location=f"{class_name} (delegate {delegate_name})",
                        description=f"Custom delegate type requires manual C++ function pointer or std::function definition",
                        example_code=f"public delegate {delegate_data.get('returnType', '')} {delegate_name}(...)",
                    )
                    metrics.gaps.append(gap)
                    self.all_gaps.append(gap)

    def save_ir_json(self, ir_class: ClassDeclaration, output_path: Path):
        """Serialize IR class to JSON"""
        ir_dict = ir_class.model_dump(mode="json")
        with open(output_path, "w") as f:
            json.dump(ir_dict, f, indent=2)

    def _append_to_transform_log(self, file_name: str, log_lines: list[str]):
        """Append lines to transformation log"""
        for line in log_lines:
            self.logger.info(line)

    def calculate_repository_metrics(self, repo_metrics: RepositoryMetrics):
        """Calculate aggregate metrics for a repository"""
        if not repo_metrics.file_metrics:
            return

        # Success rates
        successful_builds = sum(1 for fm in repo_metrics.file_metrics if fm.ir_build_success)
        repo_metrics.ir_build_success_rate = (
            (successful_builds / repo_metrics.files_processed) * 100
            if repo_metrics.files_processed > 0
            else 0
        )

        successful_norms = sum(1 for fm in repo_metrics.file_metrics if fm.normalization_success)
        repo_metrics.normalization_success_rate = (
            (successful_norms / repo_metrics.files_processed) * 100
            if repo_metrics.files_processed > 0
            else 0
        )

        # Count gaps by severity
        all_gaps = [gap for fm in repo_metrics.file_metrics for gap in fm.gaps]
        repo_metrics.blocker_count = sum(1 for gap in all_gaps if gap.severity == "BLOCKER")
        repo_metrics.workaround_count = sum(1 for gap in all_gaps if gap.severity == "WORKAROUND")
        repo_metrics.gap_count = len(all_gaps)

        # Count common gaps
        gap_counts = defaultdict(int)
        for gap in all_gaps:
            gap_counts[gap.construct_type] += gap.count
        repo_metrics.common_gaps = dict(gap_counts)

        # Calculate transformation coverage (% of members successfully transformed)
        total_members = sum(
            fm.fields_created + fm.properties_found + fm.methods_found + fm.constructors_found
            for fm in repo_metrics.file_metrics
        )
        transformed_members = sum(
            fm.fields_created + fm.properties_normalized + fm.methods_found + fm.constructors_found
            for fm in repo_metrics.file_metrics
        )
        repo_metrics.transformation_coverage = (
            (transformed_members / total_members) * 100 if total_members > 0 else 0
        )

        # Calculate fidelity score (estimated semantic equivalence)
        # Formula: 100 - (blockers * 20 + workarounds * 5 + gaps * 2) / files
        penalty = (
            repo_metrics.blocker_count * 20
            + repo_metrics.workaround_count * 5
            + (repo_metrics.gap_count - repo_metrics.blocker_count - repo_metrics.workaround_count)
            * 2
        )
        penalty_per_file = penalty / repo_metrics.files_processed if repo_metrics.files_processed > 0 else 0
        repo_metrics.fidelity_score = max(0, 100 - penalty_per_file)

    def generate_gap_analysis_report(self, repo_metrics: RepositoryMetrics, output_dir: Path):
        """Generate gap analysis report for a repository"""
        report_path = output_dir / f"{repo_metrics.repo_name}-gaps.md"

        # Group gaps by severity
        gaps_by_severity = defaultdict(list)
        for fm in repo_metrics.file_metrics:
            for gap in fm.gaps:
                gaps_by_severity[gap.severity].append(gap)

        with open(report_path, "w") as f:
            f.write(f"# {repo_metrics.repo_name} Gap Analysis\n\n")
            f.write(f"**Files Processed:** {repo_metrics.files_processed}\n\n")
            f.write(f"**Total Gaps:** {repo_metrics.gap_count}\n\n")
            f.write(f"**Blockers:** {repo_metrics.blocker_count}\n\n")
            f.write(f"**Workarounds:** {repo_metrics.workaround_count}\n\n")

            # Blockers
            if "BLOCKER" in gaps_by_severity:
                f.write("\n## BLOCKERS (Complete Rewrite Required)\n\n")
                blocker_counts = defaultdict(list)
                for gap in gaps_by_severity["BLOCKER"]:
                    blocker_counts[gap.construct_type].append(gap)

                for idx, (construct, gaps) in enumerate(blocker_counts.items(), 1):
                    f.write(f"### {idx}. {construct} ({len(gaps)} occurrence{'s' if len(gaps) > 1 else ''})\n\n")
                    example = gaps[0]
                    f.write(f"- **Example:** `{example.example_code}`\n")
                    f.write(f"- **Location:** {example.location}\n")
                    f.write(f"- **Impact:** {example.description}\n")
                    f.write(f"- **Workaround:** {self._get_workaround_suggestion(construct)}\n\n")

            # Workarounds
            if "WORKAROUND" in gaps_by_severity:
                f.write("\n## WORKAROUNDS (Manual Transformation)\n\n")
                workaround_counts = defaultdict(list)
                for gap in gaps_by_severity["WORKAROUND"]:
                    workaround_counts[gap.construct_type].append(gap)

                for idx, (construct, gaps) in enumerate(workaround_counts.items(), 1):
                    f.write(f"### {idx}. {construct} ({len(gaps)} occurrence{'s' if len(gaps) > 1 else ''})\n\n")
                    example = gaps[0]
                    f.write(f"- **Example:** `{example.example_code}`\n")
                    f.write(f"- **Impact:** {example.description}\n")
                    f.write(f"- **Workaround:** {self._get_workaround_suggestion(construct)}\n\n")

            # Partial support
            if "PARTIAL" in gaps_by_severity:
                f.write("\n## PARTIAL SUPPORT (May Require Manual Review)\n\n")
                partial_counts = defaultdict(list)
                for gap in gaps_by_severity["PARTIAL"]:
                    partial_counts[gap.construct_type].append(gap)

                for idx, (construct, gaps) in enumerate(partial_counts.items(), 1):
                    f.write(f"### {idx}. {construct} ({len(gaps)} occurrence{'s' if len(gaps) > 1 else ''})\n\n")
                    example = gaps[0]
                    f.write(f"- **Example:** `{example.example_code}`\n")
                    f.write(f"- **Note:** {example.description}\n\n")

            # Missing constructs
            if "MISSING" in gaps_by_severity:
                f.write("\n## MISSING TRANSFORMATIONS\n\n")
                for gap in gaps_by_severity["MISSING"]:
                    f.write(f"- **{gap.construct_type}** at {gap.location}: {gap.description}\n")

        self.logger.info(f"Generated gap analysis: {report_path}")

    def _get_workaround_suggestion(self, construct: str) -> str:
        """Get workaround suggestion for a construct"""
        suggestions = {
            "async/await": "Convert to synchronous API or implement custom coroutine system",
            "Delegate Type": "Define equivalent using std::function<> or function pointer typedef",
            "Test Attributes": "Use C++ test framework (Google Test, Catch2) with equivalent macros",
            "LINQ queries": "Rewrite using STL algorithms or range-v3 library",
            "yield return": "Convert to explicit iterator class or return vector",
            "events": "Implement using observer pattern with std::function callbacks",
        }
        return suggestions.get(construct, "Manual implementation required")

    def generate_aggregate_metrics(self):
        """Generate aggregate metrics across all repositories"""
        metrics_path = self.output_dir / "transformation-metrics.json"

        aggregate = {
            "repositories": {},
            "aggregateStatistics": {},
        }

        # Per-repository metrics
        for repo_name, repo_metrics in self.repo_metrics.items():
            aggregate["repositories"][repo_name] = {
                "filesProcessed": repo_metrics.files_processed,
                "irBuildSuccessRate": round(repo_metrics.ir_build_success_rate, 1),
                "normalizationSuccessRate": round(repo_metrics.normalization_success_rate, 1),
                "transformationCoverage": round(repo_metrics.transformation_coverage, 1),
                "fidelityScore": round(repo_metrics.fidelity_score, 1),
                "blockerCount": repo_metrics.blocker_count,
                "workaroundCount": repo_metrics.workaround_count,
                "gapCount": repo_metrics.gap_count,
                "commonGaps": repo_metrics.common_gaps,
            }

        # Aggregate statistics
        total_files = sum(rm.files_processed for rm in self.repo_metrics.values())
        avg_ir_build = sum(rm.ir_build_success_rate for rm in self.repo_metrics.values()) / len(
            self.repo_metrics
        )
        avg_fidelity = sum(rm.fidelity_score for rm in self.repo_metrics.values()) / len(
            self.repo_metrics
        )
        total_blockers = sum(rm.blocker_count for rm in self.repo_metrics.values())
        total_workarounds = sum(rm.workaround_count for rm in self.repo_metrics.values())

        # Count most common gaps
        all_gap_counts = defaultdict(int)
        for gap in self.all_gaps:
            all_gap_counts[gap.construct_type] += gap.count

        most_common = sorted(all_gap_counts.items(), key=lambda x: x[1], reverse=True)[:10]

        aggregate["aggregateStatistics"] = {
            "totalFiles": total_files,
            "avgIRBuildSuccessRate": round(avg_ir_build, 1),
            "avgFidelityScore": round(avg_fidelity, 1),
            "totalBlockers": total_blockers,
            "totalWorkarounds": total_workarounds,
            "mostCommonGaps": [{"construct": k, "count": v} for k, v in most_common],
        }

        with open(metrics_path, "w") as f:
            json.dump(aggregate, f, indent=2)

        self.logger.info(f"\nGenerated aggregate metrics: {metrics_path}")

    def generate_comprehensive_report(self):
        """Generate comprehensive IR transformation report"""
        report_path = self.output_dir / "IR_TRANSFORMATION_REPORT.md"

        with open(report_path, "w") as f:
            f.write("# IR Transformation Report - Agent 3\n\n")
            f.write("## Executive Summary\n\n")

            # Overall statistics
            total_files = sum(rm.files_processed for rm in self.repo_metrics.values())
            avg_fidelity = sum(rm.fidelity_score for rm in self.repo_metrics.values()) / len(
                self.repo_metrics
            )
            total_blockers = sum(rm.blocker_count for rm in self.repo_metrics.values())

            f.write(f"- **Total Files Processed:** {total_files}\n")
            f.write(f"- **Average Fidelity Score:** {avg_fidelity:.1f}%\n")
            f.write(f"- **Total Blocking Issues:** {total_blockers}\n\n")

            f.write("## Conversion Methodology\n\n")
            f.write("The IR transformation pipeline consists of three phases:\n\n")
            f.write("1. **IR Building:** Parse Roslyn JSON and construct intermediate representation\n")
            f.write("2. **Normalization:** Expand C#-specific constructs (properties, modifiers)\n")
            f.write("3. **Transformation:** Apply conversion rules for statements and expressions\n\n")

            # Repository-by-repository breakdown
            f.write("## Repository Analysis\n\n")

            for repo_name, repo_metrics in self.repo_metrics.items():
                f.write(f"### {repo_name}\n\n")
                f.write(f"- **Files:** {repo_metrics.files_processed}\n")
                f.write(f"- **IR Build Success:** {repo_metrics.ir_build_success_rate:.1f}%\n")
                f.write(f"- **Normalization Success:** {repo_metrics.normalization_success_rate:.1f}%\n")
                f.write(f"- **Transformation Coverage:** {repo_metrics.transformation_coverage:.1f}%\n")
                f.write(f"- **Fidelity Score:** {repo_metrics.fidelity_score:.1f}%\n")
                f.write(f"- **Blockers:** {repo_metrics.blocker_count}\n")
                f.write(f"- **Workarounds:** {repo_metrics.workaround_count}\n\n")

                if repo_metrics.common_gaps:
                    f.write("**Most Common Gaps:**\n")
                    for gap_type, count in sorted(
                        repo_metrics.common_gaps.items(), key=lambda x: x[1], reverse=True
                    )[:5]:
                        f.write(f"- {gap_type}: {count}\n")
                    f.write("\n")

            # Gap analysis summary
            f.write("## Gap Analysis Summary\n\n")

            gap_by_severity = defaultdict(list)
            for gap in self.all_gaps:
                gap_by_severity[gap.severity].append(gap)

            f.write(f"### Critical Gaps Blocking Conversion\n\n")
            if "BLOCKER" in gap_by_severity:
                blocker_counts = defaultdict(int)
                for gap in gap_by_severity["BLOCKER"]:
                    blocker_counts[gap.construct_type] += 1

                for construct, count in sorted(
                    blocker_counts.items(), key=lambda x: x[1], reverse=True
                ):
                    f.write(f"- **{construct}:** {count} occurrences\n")
            else:
                f.write("*No blocking issues found*\n")

            f.write("\n")

            # Recommendations
            f.write("## Recommendations for Agent 4 (Code Generation)\n\n")

            compilable_repos = [
                name
                for name, metrics in self.repo_metrics.items()
                if metrics.blocker_count == 0
            ]
            partial_repos = [
                name
                for name, metrics in self.repo_metrics.items()
                if 0 < metrics.blocker_count < 5
            ]
            blocked_repos = [
                name
                for name, metrics in self.repo_metrics.items()
                if metrics.blocker_count >= 5
            ]

            f.write("### Fully Convertible (No Blockers)\n\n")
            if compilable_repos:
                for repo in compilable_repos:
                    f.write(f"- **{repo}:** Can generate compilable C++ with workarounds\n")
            else:
                f.write("*None*\n")

            f.write("\n### Partially Convertible (Some Blockers)\n\n")
            if partial_repos:
                for repo in partial_repos:
                    metrics = self.repo_metrics[repo]
                    f.write(
                        f"- **{repo}:** {metrics.blocker_count} blocking issues - generate stubs for blocked methods\n"
                    )
            else:
                f.write("*None*\n")

            f.write("\n### Heavily Blocked (Manual Rewrite Required)\n\n")
            if blocked_repos:
                for repo in blocked_repos:
                    metrics = self.repo_metrics[repo]
                    f.write(
                        f"- **{repo}:** {metrics.blocker_count} blocking issues - requires significant manual work\n"
                    )
            else:
                f.write("*None*\n")

            f.write("\n### Code Generation Strategy\n\n")
            f.write("1. **Stub Generation:** Generate method stubs for blocked constructs with TODO comments\n")
            f.write("2. **Workaround Implementation:** Provide C++ equivalents for delegates, attributes\n")
            f.write("3. **Partial Support:** Generate partial implementations with inline comments for manual review\n")
            f.write("4. **Compilability:** Focus on generating syntactically correct C++ that compiles with warnings\n\n")

            # Final assessment
            f.write("## Compilability Assessment\n\n")

            if avg_fidelity >= 80:
                f.write(
                    "**HIGH:** Generated C++ code should compile with minimal manual intervention.\n"
                )
            elif avg_fidelity >= 60:
                f.write(
                    "**MEDIUM:** Generated C++ code will compile but require manual implementation of stubs.\n"
                )
            else:
                f.write(
                    "**LOW:** Generated C++ code will require significant manual work to compile.\n"
                )

        self.logger.info(f"\nGenerated comprehensive report: {report_path}")


def main():
    """Main entry point"""
    # Setup paths
    script_dir = Path(__file__).parent
    input_dir = script_dir.parent / "02-roslyn-output"
    output_dir = script_dir

    # Create processor
    processor = IRProcessor(input_dir, output_dir)

    # Process all repositories
    processor.process_all_repositories()

    # Generate aggregate metrics
    processor.generate_aggregate_metrics()

    # Generate comprehensive report
    processor.generate_comprehensive_report()

    print("\n" + "=" * 80)
    print("IR Transformation Complete!")
    print("=" * 80)
    print(f"\nResults saved to: {output_dir}")
    print("\nKey outputs:")
    print(f"  - Aggregate metrics: {output_dir / 'transformation-metrics.json'}")
    print(f"  - Comprehensive report: {output_dir / 'IR_TRANSFORMATION_REPORT.md'}")
    print(f"  - Per-repository gap analysis: {output_dir}/<RepoName>/<RepoName>-gaps.md")
    print(f"  - Transform logs: {output_dir}/<RepoName>/<ClassName>-transform.log")
    print(f"  - IR JSON: {output_dir}/<RepoName>/<ClassName>-ir.json")


if __name__ == "__main__":
    main()
