"""
Fortran MCP server implementation.

Provides Fortran-specific code analysis tools for function call tree generation
and code understanding through parsing of modern and legacy Fortran constructs.
"""

import logging
from typing import Dict, List, Any, Optional, Union
from pathlib import Path

try:
    from ...core.base_mcp import BaseMCP
    from ...core.error_handler import CodeMCPError, AnalysisError, log_info
except ImportError:
    import sys
    from pathlib import Path
    sys.path.insert(0, str(Path(__file__).parent.parent.parent))
    from core.base_mcp import BaseMCP
    from core.error_handler import CodeMCPError, AnalysisError, log_info
from .parser import FortranParser
from .analyzer import CallGraphAnalyzer
from .modules import ModuleAnalyzer
from .common_blocks import LegacyAnalyzer
from .interfaces import InterfaceAnalyzer
from .types import TypeAnalyzer
from .resolver import DependencyResolver
from .dynamic_calls import DynamicCallDetector
from .module_dependency_tracker import ModuleDependencyTracker


class FortranMCP(BaseMCP):
    """Fortran-specific MCP server for code analysis."""
    
    def __init__(self, config=None):
        """Initialize Fortran MCP server."""
        super().__init__(name="fortran_mcp", language="fortran", config=config)
        
        # Initialize analyzers
        self.parser = FortranParser()
        self.call_analyzer = CallGraphAnalyzer()
        self.module_analyzer = ModuleAnalyzer()
        self.legacy_analyzer = LegacyAnalyzer()
        self.interface_analyzer = InterfaceAnalyzer()
        self.type_analyzer = TypeAnalyzer()
        self.dependency_resolver = DependencyResolver()
        self.dynamic_detector = DynamicCallDetector()
        
        # Initialize advanced dependency tracking
        self.dependency_tracker = ModuleDependencyTracker()
        self.project_analysis = None
        
        # Register Fortran-specific tools
        self._register_tools()
        
        self.logger = logging.getLogger(__name__)
    
    async def _register_default_tools(self):
        """Register default tools for this MCP server."""
        # This is handled by _register_tools()
        pass
    
    async def _initialize_language_components(self):
        """Initialize language-specific components."""
        # Components are already initialized in __init__
        pass
    
    def _is_supported_file(self, file_path: Path) -> bool:
        """Check if file is supported by this MCP server."""
        return file_path.suffix in self.get_supported_extensions()
    
    def _get_supported_extensions(self) -> List[str]:
        """Get list of supported file extensions."""
        return self.parser.get_supported_extensions()
    
    def _register_tools(self):
        """Register Fortran-specific MCP tools."""
        tools = [
            {
                'name': 'analyze_file',
                'description': 'Analyze a Fortran source file comprehensively',
                'handler': self._analyze_file_handler,
                'parameters': {
                    'file_path': {'type': 'string', 'required': True, 'description': 'Path to Fortran file'},
                    'include_legacy': {'type': 'boolean', 'default': True, 'description': 'Include legacy construct analysis'},
                    'include_dependencies': {'type': 'boolean', 'default': True, 'description': 'Include dependency analysis'}
                }
            },
            {
                'name': 'analyze_function',
                'description': 'Analyze a specific Fortran function or subroutine',
                'handler': self._analyze_function_handler,
                'parameters': {
                    'file_path': {'type': 'string', 'required': True},
                    'function_name': {'type': 'string', 'required': True, 'description': 'Name of function/subroutine to analyze'}
                }
            },
            {
                'name': 'get_call_tree',
                'description': 'Generate call tree from a Fortran entry point',
                'handler': self._get_call_tree_handler,
                'parameters': {
                    'file_path': {'type': 'string', 'required': True},
                    'entry_point': {'type': 'string', 'required': True, 'description': 'Entry point procedure name'},
                    'max_depth': {'type': 'integer', 'default': 10, 'description': 'Maximum call depth'}
                }
            },
            {
                'name': 'analyze_modules',
                'description': 'Analyze Fortran modules and USE dependencies',
                'handler': self._analyze_modules_handler,
                'parameters': {
                    'file_path': {'type': 'string', 'required': True}
                }
            },
            {
                'name': 'analyze_legacy',
                'description': 'Analyze legacy Fortran constructs (COMMON, GOTO, etc.)',
                'handler': self._analyze_legacy_handler,
                'parameters': {
                    'file_path': {'type': 'string', 'required': True}
                }
            },
            {
                'name': 'detect_dynamic_calls',
                'description': 'Detect dynamic procedure calls and procedure pointers',
                'handler': self._detect_dynamic_calls_handler,
                'parameters': {
                    'file_path': {'type': 'string', 'required': True}
                }
            },
            {
                'name': 'resolve_dependencies',
                'description': 'Resolve module dependencies across multiple files',
                'handler': self._resolve_dependencies_handler,
                'parameters': {
                    'file_paths': {'type': 'array', 'required': True, 'description': 'List of Fortran file paths'}
                }
            },
            {
                'name': 'analyze_project_dependencies',
                'description': 'Comprehensive project-wide module dependency analysis',
                'handler': self._analyze_project_dependencies_handler,
                'parameters': {
                    'project_path': {'type': 'string', 'default': '.', 'description': 'Path to the Fortran project root'}
                }
            },
            {
                'name': 'get_compilation_order',
                'description': 'Get optimal compilation order for Fortran modules',
                'handler': self._get_compilation_order_handler,
                'parameters': {
                    'project_path': {'type': 'string', 'default': '.', 'description': 'Path to the Fortran project root'}
                }
            },
            {
                'name': 'get_module_build_info',
                'description': 'Get detailed build information for a specific module',
                'handler': self._get_module_build_info_handler,
                'parameters': {
                    'module_name': {'type': 'string', 'required': True, 'description': 'Name of the module'},
                    'project_path': {'type': 'string', 'default': '.', 'description': 'Path to the Fortran project root'}
                }
            },
            {
                'name': 'detect_circular_dependencies',
                'description': 'Detect circular dependencies in Fortran modules',
                'handler': self._detect_circular_dependencies_handler,
                'parameters': {
                    'project_path': {'type': 'string', 'default': '.', 'description': 'Path to the Fortran project root'}
                }
            }
        ]
        
        for tool in tools:
            self.tools[tool['name']] = tool
    
    async def parse_file(self, file_path: str) -> Any:
        """Parse a Fortran file and return structured data."""
        try:
            return self.parser.parse_file(file_path)
        except Exception as e:
            self.logger.error(f"Error parsing {file_path}: {e}")
            raise AnalysisError(f"Failed to parse {file_path}: {e}")
    
    async def build_call_tree(self, entry_point: str, max_depth: int = None) -> Any:
        """Build a call tree from an entry point."""
        from ...core.base_mcp import CallTree, CallTreeNode
        
        try:
            # This would typically require analysis of multiple files
            # For now, return a simple structure
            root_node = CallTreeNode(
                name=entry_point,
                file_path="unknown",
                line_number=1,
                language="fortran"
            )
            
            tree = CallTree(root_node)
            return tree
            
        except Exception as e:
            self.logger.error(f"Error building call tree: {e}")
            raise AnalysisError(f"Failed to build call tree: {e}")
    
    async def _analyze_file_handler(self, file_path: str, include_legacy: bool = True, 
                                   include_dependencies: bool = True) -> Dict[str, Any]:
        """Handler for comprehensive file analysis."""
        try:
            log_info(f"Analyzing Fortran file: {file_path}")
            
            # Parse the file
            parsed_data = await self.parse_file(file_path)
            
            result = {
                'file_path': file_path,
                'basic_analysis': parsed_data,
                'call_graph': self.call_analyzer.analyze_file(parsed_data),
                'modules': self.module_analyzer.analyze_modules(parsed_data),
                'interfaces': self.interface_analyzer.analyze_interfaces(parsed_data),
                'types': self.type_analyzer.analyze_types(parsed_data)
            }
            
            if include_legacy:
                result['legacy_analysis'] = self.legacy_analyzer.analyze_legacy_constructs(parsed_data)
            
            if include_dependencies:
                self.dependency_resolver.add_file_analysis(file_path, parsed_data)
                result['dependencies'] = self.dependency_resolver.resolve_dependencies()
            
            # Detect dynamic calls
            result['dynamic_calls'] = self.dynamic_detector.detect_dynamic_calls(parsed_data)
            
            return result
            
        except Exception as e:
            self.logger.error(f"Error in analyze_file_handler: {e}")
            raise AnalysisError(f"File analysis failed: {e}")
    
    async def _analyze_function_handler(self, file_path: str, function_name: str) -> Dict[str, Any]:
        """Handler for function-specific analysis."""
        try:
            parsed_data = await self.parse_file(file_path)
            
            # Find the specific function/subroutine
            target_function = None
            
            for func in parsed_data.get('functions', []):
                if func['name'] == function_name:
                    target_function = func
                    break
            
            if not target_function:
                for sub in parsed_data.get('subroutines', []):
                    if sub['name'] == function_name:
                        target_function = sub
                        break
            
            if not target_function:
                raise AnalysisError(f"Function '{function_name}' not found in {file_path}")
            
            # Analyze call graph for this function
            call_graph = self.call_analyzer.analyze_file(parsed_data)
            
            result = {
                'function_info': target_function,
                'calls_made': target_function.get('calls', []),
                'called_by': call_graph['reverse_graph'].get(function_name, []),
                'call_tree': self.call_analyzer.build_call_tree(function_name, max_depth=5)
            }
            
            return result
            
        except Exception as e:
            self.logger.error(f"Error in analyze_function_handler: {e}")
            raise AnalysisError(f"Function analysis failed: {e}")
    
    async def _get_call_tree_handler(self, file_path: str, entry_point: str, max_depth: int = 10) -> Dict[str, Any]:
        """Handler for call tree generation."""
        try:
            parsed_data = await self.parse_file(file_path)
            call_graph = self.call_analyzer.analyze_file(parsed_data)
            
            # Build call tree from the analyzer
            call_tree = self.call_analyzer.build_call_tree(entry_point, max_depth)
            
            return {
                'entry_point': entry_point,
                'max_depth': max_depth,
                'call_tree': call_tree
            }
            
        except Exception as e:
            self.logger.error(f"Error in get_call_tree_handler: {e}")
            raise AnalysisError(f"Call tree generation failed: {e}")
    
    async def _analyze_modules_handler(self, file_path: str) -> Dict[str, Any]:
        """Handler for module analysis."""
        try:
            parsed_data = await self.parse_file(file_path)
            return self.module_analyzer.analyze_modules(parsed_data)
            
        except Exception as e:
            self.logger.error(f"Error in analyze_modules_handler: {e}")
            raise AnalysisError(f"Module analysis failed: {e}")
    
    async def _analyze_legacy_handler(self, file_path: str) -> Dict[str, Any]:
        """Handler for legacy construct analysis."""
        try:
            parsed_data = await self.parse_file(file_path)
            return self.legacy_analyzer.analyze_legacy_constructs(parsed_data)
            
        except Exception as e:
            self.logger.error(f"Error in analyze_legacy_handler: {e}")
            raise AnalysisError(f"Legacy analysis failed: {e}")
    
    async def _detect_dynamic_calls_handler(self, file_path: str) -> Dict[str, Any]:
        """Handler for dynamic call detection."""
        try:
            parsed_data = await self.parse_file(file_path)
            return self.dynamic_detector.detect_dynamic_calls(parsed_data)
            
        except Exception as e:
            self.logger.error(f"Error in detect_dynamic_calls_handler: {e}")
            raise AnalysisError(f"Dynamic call detection failed: {e}")
    
    async def _resolve_dependencies_handler(self, file_paths: List[str]) -> Dict[str, Any]:
        """Handler for multi-file dependency resolution."""
        try:
            # Clear previous analysis
            self.dependency_resolver = DependencyResolver()
            
            # Analyze each file
            for file_path in file_paths:
                if self.parser.is_fortran_file(file_path):
                    parsed_data = await self.parse_file(file_path)
                    self.dependency_resolver.add_file_analysis(file_path, parsed_data)
            
            return self.dependency_resolver.resolve_dependencies()
            
        except Exception as e:
            self.logger.error(f"Error in resolve_dependencies_handler: {e}")
            raise AnalysisError(f"Dependency resolution failed: {e}")
    
    async def _analyze_project_dependencies_handler(self, project_path: str = '.') -> Dict[str, Any]:
        """Handler for comprehensive project dependency analysis."""
        try:
            log_info(f"Analyzing Fortran project dependencies in: {project_path}")
            
            # Initialize tracker with project path
            self.dependency_tracker = ModuleDependencyTracker(project_path)
            
            # Perform analysis
            self.project_analysis = self.dependency_tracker.analyze_project_dependencies()
            
            return self.project_analysis
            
        except Exception as e:
            self.logger.error(f"Error in analyze_project_dependencies_handler: {e}")
            raise AnalysisError(f"Project dependency analysis failed: {e}")
    
    async def _get_compilation_order_handler(self, project_path: str = '.') -> Dict[str, Any]:
        """Handler for getting compilation order."""
        try:
            # Ensure we have project analysis
            if not self.project_analysis or self.dependency_tracker.project_root != Path(project_path):
                await self._analyze_project_dependencies_handler(project_path)
            
            compilation_order = self.project_analysis['compilation_order']
            modules = self.project_analysis['modules']
            
            # Build detailed compilation information
            compilation_info = []
            for i, module_name in enumerate(compilation_order):
                if module_name in modules:
                    module_info = modules[module_name]
                    compilation_info.append({
                        'order': i + 1,
                        'module': module_name,
                        'file_path': module_info['file_path'],
                        'dependencies': module_info['dependencies'],
                        'can_parallel_build': self._can_build_in_parallel(module_name, compilation_order[:i])
                    })
            
            # Find modules that can be built in parallel
            parallel_groups = self._find_parallel_build_groups(compilation_order, modules)
            
            return {
                'compilation_order': compilation_order,
                'detailed_order': compilation_info,
                'parallel_build_groups': parallel_groups,
                'total_modules': len(compilation_order),
                'unbuildable_modules': [m for m in modules.keys() if m not in compilation_order],
                'build_recommendations': self._generate_build_recommendations(modules, compilation_order)
            }
            
        except Exception as e:
            self.logger.error(f"Error in get_compilation_order_handler: {e}")
            raise AnalysisError(f"Compilation order analysis failed: {e}")
    
    async def _get_module_build_info_handler(self, module_name: str, project_path: str = '.') -> Dict[str, Any]:
        """Handler for getting module build information."""
        try:
            # Ensure we have project analysis
            if not self.project_analysis or self.dependency_tracker.project_root != Path(project_path):
                await self._analyze_project_dependencies_handler(project_path)
            
            return self.dependency_tracker.get_module_build_info(module_name)
            
        except Exception as e:
            self.logger.error(f"Error in get_module_build_info_handler: {e}")
            raise AnalysisError(f"Module build info failed: {e}")
    
    async def _detect_circular_dependencies_handler(self, project_path: str = '.') -> Dict[str, Any]:
        """Handler for detecting circular dependencies."""
        try:
            # Ensure we have project analysis
            if not self.project_analysis or self.dependency_tracker.project_root != Path(project_path):
                await self._analyze_project_dependencies_handler(project_path)
            
            cycles = self.project_analysis['circular_dependencies']
            
            # Provide detailed cycle analysis
            cycle_analysis = []
            for cycle in cycles:
                cycle_info = {
                    'cycle': cycle,
                    'length': len(cycle) - 1,  # Last element is repeat of first
                    'modules_involved': cycle[:-1],  # Remove duplicate
                    'suggested_fixes': self._suggest_cycle_fixes(cycle)
                }
                cycle_analysis.append(cycle_info)
            
            return {
                'has_cycles': len(cycles) > 0,
                'cycle_count': len(cycles),
                'cycles': cycle_analysis,
                'affected_modules': list(set(module for cycle in cycles for module in cycle[:-1])),
                'resolution_priority': self._prioritize_cycle_resolution(cycles)
            }
            
        except Exception as e:
            self.logger.error(f"Error in detect_circular_dependencies_handler: {e}")
            raise AnalysisError(f"Circular dependency detection failed: {e}")
    
    def _can_build_in_parallel(self, module_name: str, previous_modules: List[str]) -> bool:
        """Check if a module can be built in parallel with any previous modules."""
        if not self.project_analysis:
            return False
        
        modules = self.project_analysis['modules']
        if module_name not in modules:
            return False
        
        module_deps = set(modules[module_name]['dependencies'])
        
        # Check if any previous module in the same compilation round is independent
        for prev_module in previous_modules:
            if prev_module not in modules:
                continue
            
            prev_deps = set(modules[prev_module]['dependencies'])
            
            # Modules can be built in parallel if they don't depend on each other
            if (module_name not in prev_deps and 
                prev_module not in module_deps and
                not module_deps.intersection(prev_deps)):
                return True
        
        return False
    
    def _find_parallel_build_groups(self, compilation_order: List[str], modules: Dict[str, Any]) -> List[List[str]]:
        """Find groups of modules that can be built in parallel."""
        parallel_groups = []
        remaining_modules = compilation_order.copy()
        
        while remaining_modules:
            current_group = []
            modules_to_remove = []
            
            for module in remaining_modules:
                if module not in modules:
                    modules_to_remove.append(module)
                    continue
                
                module_deps = set(modules[module]['dependencies'])
                
                # Check if this module's dependencies are satisfied by previous groups
                deps_satisfied = True
                for dep in module_deps:
                    if dep in remaining_modules:
                        deps_satisfied = False
                        break
                
                if deps_satisfied:
                    # Check if this module conflicts with current group
                    can_add_to_group = True
                    for group_module in current_group:
                        if group_module in module_deps or module in modules.get(group_module, {}).get('dependencies', []):
                            can_add_to_group = False
                            break
                    
                    if can_add_to_group:
                        current_group.append(module)
                        modules_to_remove.append(module)
            
            # Remove processed modules
            for module in modules_to_remove:
                remaining_modules.remove(module)
            
            if current_group:
                parallel_groups.append(current_group)
            elif remaining_modules:
                # If we can't process any modules, there might be cycles
                # Take the first module to avoid infinite loop
                parallel_groups.append([remaining_modules[0]])
                remaining_modules.remove(remaining_modules[0])
        
        return parallel_groups
    
    def _generate_build_recommendations(self, modules: Dict[str, Any], compilation_order: List[str]) -> List[str]:
        """Generate build system recommendations."""
        recommendations = []
        
        # Check for modules that can't be built
        unbuildable = [m for m in modules.keys() if m not in compilation_order]
        if unbuildable:
            recommendations.append(f"Fix dependency issues for {len(unbuildable)} unbuildable modules")
        
        # Check for very long dependency chains
        max_deps = max(len(modules[m]['dependencies']) for m in modules.keys()) if modules else 0
        if max_deps > 15:
            recommendations.append("Consider refactoring modules with many dependencies to reduce complexity")
        
        # Check for modules with no dependencies (potential utilities)
        no_deps = [m for m in modules.keys() if len(modules[m]['dependencies']) == 0]
        if len(no_deps) > 5:
            recommendations.append(f"Consider consolidating {len(no_deps)} independent modules")
        
        return recommendations
    
    def _suggest_cycle_fixes(self, cycle: List[str]) -> List[str]:
        """Suggest ways to fix a circular dependency."""
        suggestions = []
        
        if len(cycle) == 3:  # Simple A->B->A cycle
            suggestions.append("Move shared functionality to a new common module")
            suggestions.append("Use procedure pointers or abstract interfaces to break the cycle")
        elif len(cycle) > 5:
            suggestions.append("Break down large modules into smaller, more focused modules")
            suggestions.append("Identify and extract common dependencies")
        
        suggestions.append("Use dependency inversion: create interfaces in lower-level modules")
        suggestions.append("Consider if some dependencies can be made optional or delayed")
        
        return suggestions
    
    def _prioritize_cycle_resolution(self, cycles: List[List[str]]) -> List[Dict[str, Any]]:
        """Prioritize which cycles should be resolved first."""
        if not cycles:
            return []
        
        priorities = []
        for i, cycle in enumerate(cycles):
            # Priority factors: cycle length, number of modules involved
            unique_modules = len(set(cycle[:-1]))  # Remove duplicate last element
            
            priority_score = unique_modules * 2  # More modules = higher priority
            if len(cycle) == 3:  # Simple cycles are easier to fix
                priority_score += 5
            
            priorities.append({
                'cycle_index': i,
                'cycle': cycle,
                'priority_score': priority_score,
                'reason': f"Involves {unique_modules} modules, {'simple' if len(cycle) == 3 else 'complex'} cycle"
            })
        
        # Sort by priority score (higher = more urgent)
        priorities.sort(key=lambda x: x['priority_score'], reverse=True)
        return priorities
    
    async def _extract_function_info(self, ast_data: Any, function_name: str) -> Optional[Dict[str, Any]]:
        """Extract function information from parsed data."""
        if not isinstance(ast_data, dict):
            return None
        
        # Check functions
        for func in ast_data.get('functions', []):
            if func['name'] == function_name:
                return func
        
        # Check subroutines
        for sub in ast_data.get('subroutines', []):
            if sub['name'] == function_name:
                return sub
        
        return None
    
    def get_analysis_summary(self, file_path: str) -> Dict[str, Any]:
        """Get a summary of analysis capabilities for a file."""
        if not self.parser.is_fortran_file(file_path):
            return {'supported': False, 'reason': 'Not a Fortran file'}
        
        return {
            'supported': True,
            'capabilities': [
                'Parse modern Fortran (90+) and legacy Fortran (77)',
                'Call graph analysis for procedures',
                'Module and USE statement analysis',
                'Interface block analysis',
                'Derived type and type hierarchy analysis',
                'Legacy construct analysis (COMMON, GOTO)',
                'Dynamic call detection (procedure pointers)',
                'Dependency resolution across files',
                'Compilation order determination'
            ],
            'file_extensions': self.get_supported_extensions(),
            'analysis_tools': list(self.tools.keys())
        }