#!/usr/bin/env python3
"""
Migration Tools for GSM Extension System

Provides tools to migrate existing Python-based extensions to the new
configuration-driven format, enabling easier maintenance and faster development.
"""

import ast
import inspect
import json
import shutil
import textwrap
from pathlib import Path
from typing import Dict, List, Optional, Set, Any, Union
import yaml
import logging
from dataclasses import dataclass, field

from .base_extension import BaseExtension


@dataclass
class MigrationResult:
    """Result of a migration operation."""
    success: bool
    extension_name: str
    original_path: Path
    generated_files: List[Path] = field(default_factory=list)
    warnings: List[str] = field(default_factory=list)
    errors: List[str] = field(default_factory=list)


@dataclass
class ExtensionAnalysis:
    """Analysis of a Python extension for migration."""
    name: str
    version: str
    description: str
    supported_frameworks: List[str]
    dependencies: List[Dict[str, Any]]
    build_commands: List[str]
    test_commands: List[str]
    environment_variables: Dict[str, str]
    configuration_schema: Dict[str, Any]
    custom_methods: List[str]
    complexity_score: int


class PythonExtensionAnalyzer:
    """Analyzes Python extensions to extract configuration information."""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
    
    def analyze_extension(self, extension_path: Path) -> Optional[ExtensionAnalysis]:
        """Analyze a Python extension and extract migration information."""
        try:
            # Find the main extension.py file
            extension_file = extension_path / "extension.py"
            if not extension_file.exists():
                self.logger.warning(f"No extension.py found in {extension_path}")
                return None
            
            # Parse the Python file
            with open(extension_file, 'r') as f:
                content = f.read()
            
            tree = ast.parse(content)
            analyzer = ExtensionASTAnalyzer()
            analysis = analyzer.visit(tree)
            
            # Load existing metadata if available
            metadata_file = extension_path / "extension.json"
            if metadata_file.exists():
                with open(metadata_file, 'r') as f:
                    metadata = json.load(f)
                    analysis.name = metadata.get('name', analysis.name)
                    analysis.version = metadata.get('version', analysis.version)
                    analysis.description = metadata.get('description', analysis.description)
            
            # Analyze builder if present
            builder_file = extension_path / "builder.py"
            if builder_file.exists():
                analysis.build_commands.extend(self._analyze_builder(builder_file))
            
            # Analyze runner if present
            runner_file = extension_path / "runner.py"
            if runner_file.exists():
                analysis.test_commands.extend(self._analyze_runner(runner_file))
            
            return analysis
            
        except Exception as e:
            self.logger.error(f"Failed to analyze extension {extension_path}: {e}")
            return None
    
    def _analyze_builder(self, builder_file: Path) -> List[str]:
        """Extract build commands from builder.py."""
        commands = []
        try:
            with open(builder_file, 'r') as f:
                content = f.read()
            
            # Look for common build patterns
            if 'cmake' in content.lower():
                commands.append("cmake -B build -S .")
                commands.append("cmake --build build")
            if 'make' in content.lower():
                commands.append("make")
            if 'ninja' in content.lower():
                commands.append("ninja")
                
        except Exception as e:
            self.logger.warning(f"Could not analyze builder {builder_file}: {e}")
        
        return commands
    
    def _analyze_runner(self, runner_file: Path) -> List[str]:
        """Extract test commands from runner.py."""
        commands = []
        try:
            with open(runner_file, 'r') as f:
                content = f.read()
            
            # Look for common test patterns
            if 'pytest' in content.lower():
                commands.append("pytest")
            if 'ctest' in content.lower():
                commands.append("ctest")
                
        except Exception as e:
            self.logger.warning(f"Could not analyze runner {runner_file}: {e}")
        
        return commands


class ExtensionASTAnalyzer(ast.NodeVisitor):
    """AST visitor to analyze Python extension classes."""
    
    def __init__(self):
        self.analysis = ExtensionAnalysis(
            name="unknown",
            version="1.0.0", 
            description="",
            supported_frameworks=[],
            dependencies=[],
            build_commands=[],
            test_commands=[],
            environment_variables={},
            configuration_schema={},
            custom_methods=[],
            complexity_score=0
        )
    
    def visit_ClassDef(self, node: ast.ClassDef) -> Any:
        """Visit class definitions looking for extension classes."""
        # Check if this inherits from BaseExtension
        for base in node.bases:
            if isinstance(base, ast.Name) and base.id == 'BaseExtension':
                self._analyze_extension_class(node)
                break
        self.generic_visit(node)
    
    def _analyze_extension_class(self, node: ast.ClassDef):
        """Analyze an extension class definition."""
        self.analysis.name = node.name.lower().replace('extension', '')
        
        for item in node.body:
            if isinstance(item, ast.FunctionDef):
                self._analyze_method(item)
            elif isinstance(item, ast.Assign):
                self._analyze_assignment(item)
    
    def _analyze_method(self, node: ast.FunctionDef):
        """Analyze methods in extension class."""
        method_name = node.name
        
        if method_name == '__init__':
            self._analyze_init_method(node)
        elif method_name == 'description':
            self._extract_string_return(node, 'description')
        elif method_name == 'supported_frameworks':
            self._extract_list_return(node, 'supported_frameworks')
        elif method_name.startswith('get_') or method_name.startswith('create_'):
            self.analysis.custom_methods.append(method_name)
            self.analysis.complexity_score += 1
    
    def _analyze_init_method(self, node: ast.FunctionDef):
        """Analyze __init__ method for basic information."""
        for stmt in node.body:
            if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Call):
                call = stmt.value
                if (isinstance(call.func, ast.Attribute) and
                    call.func.attr == '__init__' and
                    len(call.args) >= 2):
                    # Extract name and version from super().__init__(name, version)
                    if isinstance(call.args[0], ast.Str):
                        self.analysis.name = call.args[0].s
                    if isinstance(call.args[1], ast.Str):
                        self.analysis.version = call.args[1].s
    
    def _extract_string_return(self, node: ast.FunctionDef, field: str):
        """Extract string return value from method."""
        for stmt in node.body:
            if isinstance(stmt, ast.Return) and isinstance(stmt.value, ast.Str):
                setattr(self.analysis, field, stmt.value.s)
                break
    
    def _extract_list_return(self, node: ast.FunctionDef, field: str):
        """Extract list return value from method."""
        for stmt in node.body:
            if isinstance(stmt, ast.Return) and isinstance(stmt.value, ast.List):
                values = []
                for elt in stmt.value.elts:
                    if isinstance(elt, ast.Str):
                        values.append(elt.s)
                setattr(self.analysis, field, values)
                break
    
    def _analyze_assignment(self, node: ast.Assign):
        """Analyze class-level assignments."""
        if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
            name = node.targets[0].id
            if isinstance(node.value, ast.Str):
                if name.lower() in ['description', 'name', 'version']:
                    setattr(self.analysis, name.lower(), node.value.s)


class ConfigurationGenerator:
    """Generates configuration files from extension analysis."""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
    
    def generate_extension_config(self, analysis: ExtensionAnalysis) -> Dict[str, Any]:
        """Generate extension configuration from analysis."""
        config = {
            "name": analysis.name,
            "version": analysis.version,
            "type": "config-driven",
            "description": analysis.description,
            "supported_frameworks": analysis.supported_frameworks,
            "priority": 50,  # Default priority
            
            "dependencies": {
                "system": self._convert_dependencies(analysis.dependencies),
                "build_tools": self._infer_build_tools(analysis.build_commands),
                "libraries": []
            },
            
            "build": {
                "type": "cmake" if any("cmake" in cmd for cmd in analysis.build_commands) else "make",
                "commands": analysis.build_commands,
                "parallel": True,
                "environment": analysis.environment_variables
            },
            
            "test": {
                "commands": analysis.test_commands,
                "timeout": 300,
                "parallel": False
            },
            
            "detection": {
                "patterns": self._generate_detection_patterns(analysis),
                "confidence_threshold": 0.7
            }
        }
        
        # Add schema if available
        if analysis.configuration_schema:
            config["schema"] = analysis.configuration_schema
        
        # Add warnings for complex extensions
        if analysis.complexity_score > 5:
            config["_migration_notes"] = [
                "This extension has high complexity and may require manual review",
                f"Custom methods found: {', '.join(analysis.custom_methods)}"
            ]
        
        return config
    
    def _convert_dependencies(self, dependencies: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """Convert dependencies to new format."""
        converted = []
        for dep in dependencies:
            if isinstance(dep, dict):
                converted.append(dep)
            elif isinstance(dep, str):
                converted.append({"type": "system", "name": dep})
        return converted
    
    def _infer_build_tools(self, commands: List[str]) -> List[str]:
        """Infer required build tools from commands."""
        tools = set()
        for cmd in commands:
            if 'cmake' in cmd.lower():
                tools.add('cmake')
            if 'make' in cmd.lower():
                tools.add('make')
            if 'ninja' in cmd.lower():
                tools.add('ninja')
            if 'gcc' in cmd.lower() or 'g++' in cmd.lower():
                tools.add('gcc')
        return list(tools)
    
    def _generate_detection_patterns(self, analysis: ExtensionAnalysis) -> List[Dict[str, Any]]:
        """Generate detection patterns based on supported frameworks."""
        patterns = []
        
        for framework in analysis.supported_frameworks:
            if framework.lower() == 'gcr':
                patterns.append({
                    "type": "xpath",
                    "pattern": "//Parameter[@name='solver_type' and @value='gcr']",
                    "confidence": 0.9
                })
            elif framework.lower() == 'petsc':
                patterns.append({
                    "type": "yaml_path", 
                    "pattern": "solver.type",
                    "expected": "petsc",
                    "confidence": 0.8
                })
            elif framework.lower() == 'hybrid':
                patterns.append({
                    "type": "content",
                    "pattern": r"(petsc|kokkos).*hybrid",
                    "confidence": 0.7
                })
        
        return patterns


class ExtensionMigrator:
    """Main migration tool for converting Python extensions to config-driven format."""
    
    def __init__(self, dry_run: bool = False):
        self.dry_run = dry_run
        self.logger = logging.getLogger(__name__)
        self.analyzer = PythonExtensionAnalyzer()
        self.generator = ConfigurationGenerator()
    
    def migrate_extension(self, extension_path: Path, output_path: Optional[Path] = None) -> MigrationResult:
        """Migrate a single extension from Python to config-driven format."""
        result = MigrationResult(
            success=False,
            extension_name=extension_path.name,
            original_path=extension_path
        )
        
        try:
            # Analyze the existing extension
            analysis = self.analyzer.analyze_extension(extension_path)
            if not analysis:
                result.errors.append("Failed to analyze extension")
                return result
            
            # Generate configuration
            config = self.generator.generate_extension_config(analysis)
            
            # Determine output path
            if not output_path:
                output_path = extension_path.parent / f"{analysis.name}_migrated"
            
            # Create output directory
            if not self.dry_run:
                output_path.mkdir(exist_ok=True)
            
            # Write extension configuration
            config_file = output_path / "extension.yaml"
            if not self.dry_run:
                with open(config_file, 'w') as f:
                    yaml.dump(config, f, default_flow_style=False, sort_keys=False)
            result.generated_files.append(config_file)
            
            # Copy non-Python files that might be needed
            for item in extension_path.iterdir():
                if item.suffix in ['.json', '.yaml', '.yml', '.md', '.txt']:
                    dest = output_path / item.name
                    if not self.dry_run:
                        shutil.copy2(item, dest)
                    result.generated_files.append(dest)
            
            # Generate migration report
            self._generate_migration_report(analysis, config, output_path, result)
            
            result.success = True
            result.extension_name = analysis.name
            
        except Exception as e:
            result.errors.append(f"Migration failed: {e}")
            self.logger.error(f"Migration failed for {extension_path}: {e}")
        
        return result
    
    def migrate_all_extensions(self, plugins_dir: Path, output_dir: Path) -> List[MigrationResult]:
        """Migrate all Python extensions in the plugins directory."""
        results = []
        
        for extension_dir in plugins_dir.iterdir():
            if extension_dir.is_dir() and (extension_dir / "extension.py").exists():
                output_path = output_dir / f"{extension_dir.name}_migrated"
                result = self.migrate_extension(extension_dir, output_path)
                results.append(result)
        
        return results
    
    def _generate_migration_report(self, analysis: ExtensionAnalysis, config: Dict[str, Any], 
                                 output_path: Path, result: MigrationResult):
        """Generate a detailed migration report."""
        report = {
            "migration_summary": {
                "original_extension": analysis.name,
                "complexity_score": analysis.complexity_score,
                "custom_methods": analysis.custom_methods,
                "generated_config": config
            },
            "manual_review_needed": analysis.complexity_score > 5,
            "warnings": result.warnings,
            "next_steps": [
                "Review generated extension.yaml configuration",
                "Test the migrated extension with existing configurations",
                "Update any custom build or test logic if needed"
            ]
        }
        
        if analysis.custom_methods:
            report["custom_methods_note"] = (
                "The original extension had custom methods that may need "
                "manual implementation in the new config-driven system"
            )
        
        report_file = output_path / "migration_report.yaml"
        if not self.dry_run:
            with open(report_file, 'w') as f:
                yaml.dump(report, f, default_flow_style=False)
        result.generated_files.append(report_file)


def main():
    """Command-line interface for migration tools."""
    import argparse
    
    parser = argparse.ArgumentParser(description="Migrate GSM extensions to config-driven format")
    parser.add_argument("extension_path", type=Path, help="Path to extension to migrate")
    parser.add_argument("-o", "--output", type=Path, help="Output directory")
    parser.add_argument("--dry-run", action="store_true", help="Show what would be done without making changes")
    parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
    
    args = parser.parse_args()
    
    if args.verbose:
        logging.basicConfig(level=logging.INFO)
    
    migrator = ExtensionMigrator(dry_run=args.dry_run)
    result = migrator.migrate_extension(args.extension_path, args.output)
    
    if result.success:
        print(f"✅ Successfully migrated {result.extension_name}")
        print(f"Generated files: {len(result.generated_files)}")
        for file in result.generated_files:
            print(f"  - {file}")
    else:
        print(f"❌ Migration failed for {result.extension_name}")
        for error in result.errors:
            print(f"  Error: {error}")
    
    if result.warnings:
        print("Warnings:")
        for warning in result.warnings:
            print(f"  ⚠️  {warning}")


if __name__ == "__main__":
    main()