"""
Advanced Fortran module dependency tracking.

Provides comprehensive dependency analysis for Fortran module systems including:
- Multi-file module dependency resolution
- Build order optimization
- Interface dependency analysis
- Submodule hierarchy tracking
- Cross-compilation unit analysis
"""

import re
import glob
from typing import Dict, List, Any, Optional, Set, Tuple
from pathlib import Path
from collections import defaultdict, deque
import logging

from ...core.error_handler import AnalysisError, log_info, log_debug
from ...core.project_scanner import ProjectScanner
from ...core.function_classifier import FunctionClassifier


class ModuleDependencyNode:
    """Represents a module in the dependency graph."""
    
    def __init__(self, name: str, file_path: str = "", module_type: str = "module"):
        self.name = name
        self.file_path = file_path
        self.module_type = module_type  # 'module', 'submodule', 'program', 'interface'
        self.dependencies = set()  # Modules this module depends on
        self.dependents = set()    # Modules that depend on this module
        self.provides = set()      # Public entities this module provides
        self.requires = set()      # External entities this module requires
        self.interfaces = {}       # Interface dependencies
        self.submodules = set()    # If this is a module, its submodules
        self.parent_module = None  # If this is a submodule, its parent
        self.compilation_order = None  # Determined by topological sort
        self.analyzed = False
        
    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary representation."""
        return {
            'name': self.name,
            'file_path': self.file_path,
            'module_type': self.module_type,
            'dependencies': list(self.dependencies),
            'dependents': list(self.dependents),
            'provides': list(self.provides),
            'requires': list(self.requires),
            'interfaces': dict(self.interfaces),
            'submodules': list(self.submodules),
            'parent_module': self.parent_module,
            'compilation_order': self.compilation_order,
            'analyzed': self.analyzed
        }


class FortranProjectScanner:
    """Scans Fortran projects to discover modules and dependencies."""
    
    def __init__(self, project_root: str = '.'):
        """Initialize project scanner."""
        self.project_root = Path(project_root)
        self.project_scanner = ProjectScanner()
        self.function_classifier = FunctionClassifier()
        
    def scan_project(self, include_patterns: List[str] = None) -> Dict[str, Any]:
        """Scan project for Fortran files and modules."""
        if include_patterns is None:
            include_patterns = [
                '**/*.f90', '**/*.f95', '**/*.f03', '**/*.f08', '**/*.f',
                '**/*.F90', '**/*.F95', '**/*.F03', '**/*.F08', '**/*.F',
                '**/*.for', '**/*.FOR'
            ]
        
        fortran_files = []
        
        for pattern in include_patterns:
            files = glob.glob(str(self.project_root / pattern), recursive=True)
            fortran_files.extend([Path(f) for f in files])
        
        # Remove duplicates and filter
        fortran_files = list(set(fortran_files))
        fortran_files = [f for f in fortran_files if self._is_valid_fortran_file(f)]
        
        # Analyze each file for module information
        modules_found = {}
        programs_found = {}
        build_files = self._find_build_files()
        
        for file_path in fortran_files:
            file_info = self._analyze_fortran_file(file_path)
            
            # Categorize what was found
            for module_name in file_info.get('modules', []):
                modules_found[module_name] = {
                    'file_path': str(file_path),
                    'type': 'module',
                    'details': file_info
                }
            
            for submodule_name in file_info.get('submodules', []):
                modules_found[submodule_name] = {
                    'file_path': str(file_path),
                    'type': 'submodule',
                    'details': file_info
                }
            
            for program_name in file_info.get('programs', []):
                programs_found[program_name] = {
                    'file_path': str(file_path),
                    'type': 'program',
                    'details': file_info
                }
        
        return {
            'project_root': str(self.project_root),
            'fortran_files': [str(f) for f in fortran_files],
            'modules': modules_found,
            'programs': programs_found,
            'build_files': build_files,
            'statistics': {
                'total_files': len(fortran_files),
                'total_modules': len(modules_found),
                'total_programs': len(programs_found),
                'build_systems_detected': len(build_files)
            }
        }
    
    def _is_valid_fortran_file(self, file_path: Path) -> bool:
        """Check if file is a valid Fortran source file."""
        if not file_path.exists() or not file_path.is_file():
            return False
        
        # Skip common non-source directories
        skip_dirs = {'.git', '__pycache__', '.pytest_cache', 'build', 'dist', 'CMakeFiles'}
        if any(part in skip_dirs for part in file_path.parts):
            return False
        
        return True
    
    def _find_build_files(self) -> List[Dict[str, Any]]:
        """Find build system files in the project."""
        build_files = []
        
        # CMake
        cmake_files = list(self.project_root.glob('**/CMakeLists.txt'))
        for cmake_file in cmake_files:
            build_files.append({
                'type': 'cmake',
                'file': str(cmake_file),
                'build_system': 'CMake'
            })
        
        # Makefile
        makefiles = []
        makefiles.extend(self.project_root.glob('**/Makefile'))
        makefiles.extend(self.project_root.glob('**/makefile'))
        makefiles.extend(self.project_root.glob('**/*.mk'))
        
        for makefile in makefiles:
            build_files.append({
                'type': 'makefile',
                'file': str(makefile),
                'build_system': 'Make'
            })
        
        # Autotools
        configure_files = list(self.project_root.glob('**/configure.ac'))
        configure_files.extend(self.project_root.glob('**/configure.in'))
        
        for config_file in configure_files:
            build_files.append({
                'type': 'autotools',
                'file': str(config_file),
                'build_system': 'Autotools'
            })
        
        # SCons
        scons_files = list(self.project_root.glob('**/SConstruct'))
        scons_files.extend(self.project_root.glob('**/SConscript'))
        
        for scons_file in scons_files:
            build_files.append({
                'type': 'scons',
                'file': str(scons_file),
                'build_system': 'SCons'
            })
        
        # FPM (Fortran Package Manager)
        fpm_files = list(self.project_root.glob('**/fpm.toml'))
        
        for fpm_file in fpm_files:
            build_files.append({
                'type': 'fpm',
                'file': str(fpm_file),
                'build_system': 'FPM'
            })
        
        return build_files
    
    def _analyze_fortran_file(self, file_path: Path) -> Dict[str, Any]:
        """Analyze a Fortran file for modules, programs, and dependencies."""
        try:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                content = f.read()
        except Exception as e:
            log_debug(f"Could not read file {file_path}: {e}")
            return {}
        
        analysis = {
            'file_path': str(file_path),
            'modules': [],
            'submodules': [],
            'programs': [],
            'uses': [],
            'includes': [],
            'interfaces': [],
            'procedures': [],
            'types': []
        }
        
        # Clean content (remove comments for analysis)
        cleaned_content = self._remove_fortran_comments(content)
        lines = cleaned_content.split('\n')
        
        # Analyze line by line
        for line_num, line in enumerate(lines, 1):
            line_stripped = line.strip().lower()
            
            # Module definitions
            module_match = re.match(r'^\s*module\s+(\w+)', line_stripped)
            if module_match and 'procedure' not in line_stripped:
                analysis['modules'].append(module_match.group(1))
            
            # Submodule definitions
            submodule_match = re.match(r'^\s*submodule\s*\(\s*(\w+)(?:\s*:\s*(\w+))?\s*\)\s*(\w+)', line_stripped)
            if submodule_match:
                parent_module = submodule_match.group(1)
                parent_submodule = submodule_match.group(2)
                submodule_name = submodule_match.group(3)
                analysis['submodules'].append({
                    'name': submodule_name,
                    'parent_module': parent_module,
                    'parent_submodule': parent_submodule
                })
            
            # Program definitions
            program_match = re.match(r'^\s*program\s+(\w+)', line_stripped)
            if program_match:
                analysis['programs'].append(program_match.group(1))
            
            # USE statements
            use_match = re.match(r'^\s*use(?:\s*,\s*(intrinsic|non_intrinsic))?\s+(\w+)(?:\s*,\s*only\s*:\s*(.+))?', line_stripped)
            if use_match:
                intrinsic_type = use_match.group(1)
                module_name = use_match.group(2)
                only_list = use_match.group(3)
                
                use_info = {
                    'module': module_name,
                    'line': line_num,
                    'intrinsic': intrinsic_type == 'intrinsic',
                    'non_intrinsic': intrinsic_type == 'non_intrinsic',
                    'only': only_list is not None
                }
                
                if only_list:
                    # Parse only list
                    entities = [e.strip() for e in only_list.split(',')]
                    use_info['only_list'] = entities
                
                analysis['uses'].append(use_info)
            
            # INCLUDE statements  
            include_match = re.match(r'^\s*include\s+[\'"]([^\'"]+)[\'"]', line_stripped)
            if include_match:
                analysis['includes'].append({
                    'file': include_match.group(1),
                    'line': line_num
                })
            
            # Interface blocks
            interface_match = re.match(r'^\s*(?:abstract\s+)?interface(?:\s+(\w+))?', line_stripped)
            if interface_match:
                interface_name = interface_match.group(1) or 'anonymous'
                analysis['interfaces'].append({
                    'name': interface_name,
                    'line': line_num,
                    'abstract': 'abstract' in line_stripped
                })
            
            # Procedure definitions (functions and subroutines)
            proc_match = re.match(r'^\s*(?:pure\s+|elemental\s+|impure\s+)?(?:recursive\s+)?(function|subroutine)\s+(\w+)', line_stripped)
            if proc_match:
                proc_type = proc_match.group(1)
                proc_name = proc_match.group(2)
                analysis['procedures'].append({
                    'name': proc_name,
                    'type': proc_type,
                    'line': line_num,
                    'pure': 'pure' in line_stripped,
                    'elemental': 'elemental' in line_stripped,
                    'recursive': 'recursive' in line_stripped
                })
            
            # Type definitions
            type_match = re.match(r'^\s*type(?:\s*,\s*(?:abstract|extends\((\w+)\)))?\s*::\s*(\w+)', line_stripped)
            if type_match:
                extends_type = type_match.group(1)
                type_name = type_match.group(2)
                analysis['types'].append({
                    'name': type_name,
                    'line': line_num,
                    'extends': extends_type,
                    'abstract': 'abstract' in line_stripped
                })
        
        return analysis
    
    def _remove_fortran_comments(self, content: str) -> str:
        """Remove Fortran comments from content for analysis."""
        lines = content.split('\n')
        cleaned_lines = []
        
        for line in lines:
            # Remove ! comments (but be careful with strings)
            in_string = False
            quote_char = None
            cleaned_line = ""
            
            i = 0
            while i < len(line):
                char = line[i]
                
                if not in_string:
                    if char in ['"', "'"]:
                        in_string = True
                        quote_char = char
                        cleaned_line += char
                    elif char == '!':
                        # This is a comment, stop processing this line
                        break
                    else:
                        cleaned_line += char
                else:
                    # In string
                    cleaned_line += char
                    if char == quote_char:
                        # Check if it's escaped
                        if i + 1 < len(line) and line[i + 1] == quote_char:
                            # Escaped quote
                            i += 1
                            cleaned_line += line[i]
                        else:
                            # End of string
                            in_string = False
                            quote_char = None
                
                i += 1
            
            cleaned_lines.append(cleaned_line)
        
        return '\n'.join(cleaned_lines)


class ModuleDependencyTracker:
    """Advanced Fortran module dependency tracking and analysis."""
    
    def __init__(self, project_root: str = '.'):
        """Initialize dependency tracker."""
        self.project_root = Path(project_root)
        self.scanner = FortranProjectScanner(project_root)
        self.modules = {}  # module_name -> ModuleDependencyNode
        self.programs = {}  # program_name -> program info
        self.dependency_graph = defaultdict(set)
        self.reverse_dependency_graph = defaultdict(set)
        self.compilation_order = []
        self.logger = logging.getLogger(__name__)
        
        # Intrinsic modules that don't need to be compiled
        self.intrinsic_modules = {
            'iso_c_binding', 'iso_fortran_env', 'ieee_exceptions',
            'ieee_arithmetic', 'ieee_features', 'omp_lib', 'omp_lib_kinds',
            'mpi', 'mpi_f08'
        }
    
    def analyze_project_dependencies(self) -> Dict[str, Any]:
        """Perform comprehensive dependency analysis of Fortran project."""
        try:
            log_info(f"Starting Fortran project dependency analysis in {self.project_root}")
            
            # Scan project for modules and files
            scan_result = self.scanner.scan_project()
            
            # Build dependency nodes
            self._build_dependency_nodes(scan_result)
            
            # Analyze dependencies between modules
            self._analyze_module_dependencies(scan_result)
            
            # Build dependency graph
            self._build_dependency_graph()
            
            # Determine compilation order
            self._determine_compilation_order()
            
            # Detect issues
            cycles = self._detect_cycles()
            missing_modules = self._find_missing_modules()
            interface_conflicts = self._detect_interface_conflicts()
            
            # Generate recommendations
            recommendations = self._generate_recommendations(cycles, missing_modules, interface_conflicts)
            
            result = {
                'project_scan': scan_result,
                'modules': {name: node.to_dict() for name, node in self.modules.items()},
                'programs': self.programs,
                'dependency_graph': {mod: list(deps) for mod, deps in self.dependency_graph.items()},
                'compilation_order': self.compilation_order,
                'circular_dependencies': cycles,
                'missing_modules': missing_modules,
                'interface_conflicts': interface_conflicts,
                'recommendations': recommendations,
                'statistics': self._calculate_statistics()
            }
            
            log_info(f"Dependency analysis complete: {len(self.modules)} modules, {len(cycles)} cycles detected")
            
            return result
            
        except Exception as e:
            self.logger.error(f"Dependency analysis failed: {e}")
            raise AnalysisError(f"Module dependency analysis failed: {str(e)}")
    
    def _build_dependency_nodes(self, scan_result: Dict[str, Any]):
        """Build dependency nodes from scan results."""
        # Process modules
        for module_name, module_info in scan_result['modules'].items():
            node = ModuleDependencyNode(
                name=module_name,
                file_path=module_info['file_path'],
                module_type=module_info['type']
            )
            
            # Extract provides/requires from file analysis
            details = module_info.get('details', {})
            
            # Procedures provided by this module
            for proc in details.get('procedures', []):
                node.provides.add(f"procedure:{proc['name']}")
            
            # Types provided by this module
            for type_def in details.get('types', []):
                node.provides.add(f"type:{type_def['name']}")
            
            # Interfaces provided
            for interface in details.get('interfaces', []):
                node.provides.add(f"interface:{interface['name']}")
            
            self.modules[module_name] = node
        
        # Process programs
        for program_name, program_info in scan_result['programs'].items():
            self.programs[program_name] = {
                'name': program_name,
                'file_path': program_info['file_path'],
                'dependencies': [],
                'entry_point': True
            }
    
    def _analyze_module_dependencies(self, scan_result: Dict[str, Any]):
        """Analyze dependencies between modules."""
        # For each file, process its USE statements
        for file_path in scan_result['fortran_files']:
            file_path_obj = Path(file_path)
            
            # Get analysis for this file
            file_analysis = None
            for module_info in scan_result['modules'].values():
                if module_info['file_path'] == file_path:
                    file_analysis = module_info.get('details', {})
                    break
            
            if not file_analysis:
                # Check if it's a program file
                for program_info in scan_result['programs'].values():
                    if program_info['file_path'] == file_path:
                        file_analysis = program_info.get('details', {})
                        break
            
            if not file_analysis:
                continue
            
            # Process USE statements
            for use_stmt in file_analysis.get('uses', []):
                used_module = use_stmt['module']
                
                # Skip intrinsic modules
                if used_module.lower() in self.intrinsic_modules:
                    continue
                
                # Find which module/program in this file depends on the used module
                file_modules = file_analysis.get('modules', [])
                file_programs = file_analysis.get('programs', [])
                
                # Add dependency for each module in this file
                for module_name in file_modules:
                    if module_name in self.modules:
                        self.modules[module_name].dependencies.add(used_module)
                        self.modules[module_name].requires.add(f"module:{used_module}")
                
                # Add dependency for each program in this file
                for program_name in file_programs:
                    if program_name in self.programs:
                        self.programs[program_name]['dependencies'].append(used_module)
            
            # Process submodule relationships
            for submodule_info in file_analysis.get('submodules', []):
                if isinstance(submodule_info, dict):
                    submodule_name = submodule_info['name']
                    parent_module = submodule_info['parent_module']
                    
                    if submodule_name in self.modules and parent_module in self.modules:
                        # Submodule depends on parent module
                        self.modules[submodule_name].dependencies.add(parent_module)
                        self.modules[submodule_name].parent_module = parent_module
                        
                        # Parent module has this submodule
                        self.modules[parent_module].submodules.add(submodule_name)
    
    def _build_dependency_graph(self):
        """Build the module dependency graph."""
        self.dependency_graph.clear()
        self.reverse_dependency_graph.clear()
        
        for module_name, node in self.modules.items():
            for dependency in node.dependencies:
                self.dependency_graph[module_name].add(dependency)
                self.reverse_dependency_graph[dependency].add(module_name)
                
                # Update dependents in the target module
                if dependency in self.modules:
                    self.modules[dependency].dependents.add(module_name)
    
    def _determine_compilation_order(self):
        """Determine compilation order using topological sort."""
        self.compilation_order = []
        
        # Kahn's algorithm for topological sorting
        in_degree = defaultdict(int)
        
        # Calculate in-degrees
        for module_name in self.modules:
            in_degree[module_name] = 0
        
        for module_name, dependencies in self.dependency_graph.items():
            for dependency in dependencies:
                if dependency in self.modules:  # Skip external/intrinsic modules
                    in_degree[dependency] += 1
        
        # Start with modules that have no dependencies
        queue = deque([module for module, degree in in_degree.items() if degree == 0])
        order = 0
        
        while queue:
            current_module = queue.popleft()
            self.compilation_order.append(current_module)
            
            # Set compilation order in the node
            if current_module in self.modules:
                self.modules[current_module].compilation_order = order
                order += 1
            
            # Reduce in-degree for dependent modules
            for dependent in self.reverse_dependency_graph.get(current_module, []):
                if dependent in self.modules:
                    in_degree[dependent] -= 1
                    if in_degree[dependent] == 0:
                        queue.append(dependent)
    
    def _detect_cycles(self) -> List[List[str]]:
        """Detect circular dependencies in the module graph."""
        cycles = []
        visited = set()
        rec_stack = set()
        
        def dfs(module: str, path: List[str]) -> bool:
            if module in rec_stack:
                # Found a cycle
                cycle_start = path.index(module)
                cycle = path[cycle_start:] + [module]
                cycles.append(cycle)
                return True
            
            if module in visited:
                return False
            
            visited.add(module)
            rec_stack.add(module)
            path.append(module)
            
            for dependency in self.dependency_graph.get(module, []):
                if dependency in self.modules:  # Only check internal modules
                    if dfs(dependency, path.copy()):
                        return True
            
            rec_stack.remove(module)
            path.pop()
            return False
        
        for module in self.modules:
            if module not in visited:
                dfs(module, [])
        
        return cycles
    
    def _find_missing_modules(self) -> List[Dict[str, Any]]:
        """Find modules that are referenced but not defined."""
        missing = []
        
        for module_name, node in self.modules.items():
            for dependency in node.dependencies:
                if (dependency not in self.modules and 
                    dependency.lower() not in self.intrinsic_modules):
                    missing.append({
                        'module': dependency,
                        'required_by': module_name,
                        'file_path': node.file_path
                    })
        
        # Also check program dependencies
        for program_name, program_info in self.programs.items():
            for dependency in program_info.get('dependencies', []):
                if (dependency not in self.modules and 
                    dependency.lower() not in self.intrinsic_modules):
                    missing.append({
                        'module': dependency,
                        'required_by': program_name,
                        'file_path': program_info['file_path'],
                        'type': 'program'
                    })
        
        return missing
    
    def _detect_interface_conflicts(self) -> List[Dict[str, Any]]:
        """Detect potential interface conflicts between modules."""
        conflicts = []
        entity_providers = defaultdict(list)
        
        # Collect all entities provided by modules
        for module_name, node in self.modules.items():
            for entity in node.provides:
                entity_providers[entity].append(module_name)
        
        # Find conflicts (same entity provided by multiple modules)
        for entity, providers in entity_providers.items():
            if len(providers) > 1:
                conflicts.append({
                    'entity': entity,
                    'providers': providers,
                    'conflict_type': 'multiple_definitions'
                })
        
        return conflicts
    
    def _generate_recommendations(self, cycles: List[List[str]], 
                                missing_modules: List[Dict[str, Any]],
                                interface_conflicts: List[Dict[str, Any]]) -> List[str]:
        """Generate recommendations for improving the module structure."""
        recommendations = []
        
        # Cycle recommendations
        if cycles:
            recommendations.append(
                f"Resolve {len(cycles)} circular dependencies by refactoring module interfaces"
            )
            for cycle in cycles[:3]:  # Show first 3 cycles
                cycle_str = " -> ".join(cycle)
                recommendations.append(f"Circular dependency: {cycle_str}")
        
        # Missing module recommendations
        if missing_modules:
            unique_missing = set(m['module'] for m in missing_modules)
            recommendations.append(
                f"Locate or create {len(unique_missing)} missing modules"
            )
        
        # Interface conflict recommendations
        if interface_conflicts:
            recommendations.append(
                f"Resolve {len(interface_conflicts)} interface conflicts using specific imports or renaming"
            )
        
        # Compilation order recommendations
        if len(self.compilation_order) < len(self.modules):
            stuck_modules = set(self.modules.keys()) - set(self.compilation_order)
            recommendations.append(
                f"Fix dependency issues for {len(stuck_modules)} modules that cannot be ordered"
            )
        
        # Large dependency recommendations
        large_dep_modules = [name for name, node in self.modules.items() 
                           if len(node.dependencies) > 10]
        if large_dep_modules:
            recommendations.append(
                f"Consider refactoring {len(large_dep_modules)} modules with many dependencies"
            )
        
        return recommendations
    
    def _calculate_statistics(self) -> Dict[str, Any]:
        """Calculate project dependency statistics."""
        total_dependencies = sum(len(node.dependencies) for node in self.modules.values())
        total_internal_deps = sum(
            len([dep for dep in node.dependencies if dep in self.modules])
            for node in self.modules.values()
        )
        
        # Dependency distribution
        dep_counts = [len(node.dependencies) for node in self.modules.values()]
        
        # Module types
        module_types = defaultdict(int)
        for node in self.modules.values():
            module_types[node.module_type] += 1
        
        return {
            'total_modules': len(self.modules),
            'total_programs': len(self.programs),
            'total_dependencies': total_dependencies,
            'internal_dependencies': total_internal_deps,
            'external_dependencies': total_dependencies - total_internal_deps,
            'average_dependencies_per_module': total_dependencies / len(self.modules) if self.modules else 0,
            'max_dependencies': max(dep_counts) if dep_counts else 0,
            'modules_with_no_dependencies': sum(1 for count in dep_counts if count == 0),
            'modules_by_type': dict(module_types),
            'compilation_order_length': len(self.compilation_order),
            'buildable_modules': len(self.compilation_order),
            'unbuildable_modules': len(self.modules) - len(self.compilation_order)
        }
    
    def get_module_build_info(self, module_name: str) -> Dict[str, Any]:
        """Get detailed build information for a specific module."""
        if module_name not in self.modules:
            return {'error': f'Module {module_name} not found'}
        
        node = self.modules[module_name]
        
        # Get modules that must be compiled before this one
        prerequisites = []
        for dep in node.dependencies:
            if dep in self.modules:
                dep_node = self.modules[dep]
                prerequisites.append({
                    'module': dep,
                    'file_path': dep_node.file_path,
                    'compilation_order': dep_node.compilation_order
                })
        
        # Sort by compilation order
        prerequisites.sort(key=lambda x: x['compilation_order'] or 999999)
        
        # Get modules that depend on this one
        dependents = []
        for dep in node.dependents:
            if dep in self.modules:
                dep_node = self.modules[dep]
                dependents.append({
                    'module': dep,
                    'file_path': dep_node.file_path,
                    'compilation_order': dep_node.compilation_order
                })
        
        return {
            'module': node.to_dict(),
            'prerequisites': prerequisites,
            'dependents': dependents,
            'can_build': node.compilation_order is not None,
            'build_order_position': node.compilation_order,
            'total_prerequisites': len(prerequisites),
            'total_dependents': len(dependents)
        }