"""
Entry Point Discovery Module.

Automatically discovers main entry points and starting functions across different
programming languages in a codebase. Supports Python, C++, TypeScript, Fortran,
and other languages with configurable patterns.
"""

import ast
import re
import json
from typing import Dict, List, Any, Optional, Set, Tuple, Union
from pathlib import Path
from dataclasses import dataclass
from collections import defaultdict
import logging

from .error_handler import AnalysisError, log_info, log_debug


@dataclass
class EntryPoint:
    """Represents a discovered entry point in code."""
    name: str
    file_path: str
    line_number: int
    language: str
    entry_type: str  # 'main', 'cli', 'export', 'program', 'kernel'
    confidence: float  # 0.0 to 1.0
    signature: str
    context: Dict[str, Any]  # Additional context like arguments, decorators, etc.


@dataclass
class EntryDiscoveryConfig:
    """Configuration for entry point discovery."""
    languages: List[str] = None
    custom_patterns: Dict[str, List[str]] = None
    ignore_tests: bool = True
    ignore_examples: bool = True
    min_confidence: float = 0.7
    max_results_per_file: int = 5


class EntryPointDiscovery:
    """Discovers entry points across multiple programming languages."""
    
    def __init__(self, config: EntryDiscoveryConfig = None):
        """Initialize the entry point discovery system."""
        self.config = config or EntryDiscoveryConfig()
        
        # Default patterns for each language
        self.default_patterns = {
            'python': {
                'main_guard': r'if\s+__name__\s*==\s*["\']__main__["\']',
                'main_function': r'def\s+main\s*\(',
                'cli_decorator': r'@click\.(command|group)',
                'argparse_usage': r'ArgumentParser\s*\(',
                'entry_points': r'entry_points\s*=',
            },
            'cpp': {
                'main_function': r'int\s+main\s*\(',
                'wmain_function': r'int\s+wmain\s*\(',
                'dll_main': r'BOOL\s+APIENTRY\s+DllMain',
                'cuda_global': r'__global__\s+\w+',
                'hip_global': r'__global__\s+\w+',
            },
            'typescript': {
                'main_function': r'function\s+main\s*\(',
                'export_default': r'export\s+default',
                'cli_commander': r'program\s*\.\s*(command|option)',
                'process_argv': r'process\.argv',
            },
            'fortran': {
                'program_block': r'^\s*program\s+\w+',
                'main_subroutine': r'subroutine\s+main\s*\(',
            }
        }
        
        # File patterns to ignore
        self.ignore_patterns = {
            'test_files': [
                r'.*test.*\.py$',
                r'.*_test\.cpp$',
                r'.*\.test\.ts$',
                r'.*spec\.ts$',
            ],
            'example_files': [
                r'.*example.*',
                r'.*demo.*',
                r'.*sample.*',
            ]
        }
        
        log_debug("Initialized EntryPointDiscovery")
    
    def discover_entry_points(self, project_path: Union[str, Path], 
                            file_paths: List[str] = None) -> List[EntryPoint]:
        """
        Discover all entry points in a project or specific files.
        
        Args:
            project_path: Root path of the project
            file_paths: Optional list of specific files to analyze
            
        Returns:
            List of discovered entry points
        """
        project_path = Path(project_path)
        entry_points = []
        
        if file_paths:
            # Analyze specific files
            for file_path in file_paths:
                file_path = Path(file_path)
                if not file_path.is_absolute():
                    file_path = project_path / file_path
                
                if file_path.exists() and file_path.is_file():
                    language = self._detect_language(file_path)
                    if language and self._should_analyze_file(file_path):
                        points = self._analyze_file(file_path, language)
                        entry_points.extend(points)
        else:
            # Discover all relevant files in project
            for file_path in self._find_relevant_files(project_path):
                if self._should_analyze_file(file_path):
                    language = self._detect_language(file_path)
                    if language:
                        points = self._analyze_file(file_path, language)
                        entry_points.extend(points)
        
        # Sort by confidence and filter
        entry_points.sort(key=lambda ep: ep.confidence, reverse=True)
        filtered_points = [ep for ep in entry_points 
                          if ep.confidence >= self.config.min_confidence]
        
        log_info(f"Discovered {len(filtered_points)} entry points "
                f"from {len(entry_points)} candidates")
        
        return filtered_points
    
    def _find_relevant_files(self, project_path: Path) -> List[Path]:
        """Find all relevant source files in the project."""
        relevant_files = []
        
        # File extensions to consider
        extensions = {'.py', '.cpp', '.cc', '.cxx', '.c', '.h', '.hpp',
                     '.ts', '.tsx', '.js', '.jsx', '.f90', '.f95', '.f03', '.f08', '.f'}
        
        for file_path in project_path.rglob('*'):
            if (file_path.is_file() and 
                file_path.suffix.lower() in extensions and
                not self._is_in_ignore_directory(file_path)):
                relevant_files.append(file_path)
        
        return relevant_files
    
    def _is_in_ignore_directory(self, file_path: Path) -> bool:
        """Check if file is in a directory that should be ignored."""
        ignore_dirs = {
            'node_modules', '__pycache__', '.git', '.svn', '.hg',
            'build', 'dist', 'target', 'cmake-build-debug',
            'cmake-build-release', '.pytest_cache', '.tox'
        }
        
        for part in file_path.parts:
            if part in ignore_dirs:
                return True
        
        return False
    
    def _should_analyze_file(self, file_path: Path) -> bool:
        """Determine if a file should be analyzed based on configuration."""
        file_str = str(file_path).lower()
        
        # Check test file patterns
        if self.config.ignore_tests:
            for pattern in self.ignore_patterns['test_files']:
                if re.search(pattern, file_str):
                    return False
        
        # Check example file patterns
        if self.config.ignore_examples:
            for pattern in self.ignore_patterns['example_files']:
                if re.search(pattern, file_str):
                    return False
        
        return True
    
    def _detect_language(self, file_path: Path) -> Optional[str]:
        """Detect the programming language of a file."""
        suffix = file_path.suffix.lower()
        
        language_map = {
            '.py': 'python',
            '.cpp': 'cpp', '.cc': 'cpp', '.cxx': 'cpp',
            '.c': 'cpp', '.h': 'cpp', '.hpp': 'cpp',
            '.ts': 'typescript', '.tsx': 'typescript',
            '.js': 'typescript', '.jsx': 'typescript',
            '.f90': 'fortran', '.f95': 'fortran', '.f03': 'fortran',
            '.f08': 'fortran', '.f': 'fortran'
        }
        
        return language_map.get(suffix)
    
    def _analyze_file(self, file_path: Path, language: str) -> List[EntryPoint]:
        """Analyze a single file for entry points."""
        try:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                content = f.read()
        except Exception as e:
            log_debug(f"Error reading file {file_path}: {e}")
            return []
        
        if language == 'python':
            return self._analyze_python_file(file_path, content)
        elif language == 'cpp':
            return self._analyze_cpp_file(file_path, content)
        elif language == 'typescript':
            return self._analyze_typescript_file(file_path, content)
        elif language == 'fortran':
            return self._analyze_fortran_file(file_path, content)
        else:
            return []
    
    def _analyze_python_file(self, file_path: Path, content: str) -> List[EntryPoint]:
        """Analyze Python file for entry points."""
        entry_points = []
        lines = content.splitlines()
        
        try:
            # Try to parse as AST for better analysis
            tree = ast.parse(content)
            
            # Look for if __name__ == "__main__" blocks
            for node in ast.walk(tree):
                if isinstance(node, ast.If):
                    if self._is_main_guard(node):
                        entry_points.append(EntryPoint(
                            name="__main__",
                            file_path=str(file_path),
                            line_number=node.lineno,
                            language="python",
                            entry_type="main",
                            confidence=0.95,
                            signature="if __name__ == '__main__':",
                            context={"ast_node": "If", "guard": True}
                        ))
                
                # Look for main() function definitions
                elif isinstance(node, ast.FunctionDef) and node.name == 'main':
                    entry_points.append(EntryPoint(
                        name="main",
                        file_path=str(file_path),
                        line_number=node.lineno,
                        language="python",
                        entry_type="main",
                        confidence=0.85,
                        signature=f"def main({self._get_function_signature(node)}):",
                        context={"ast_node": "FunctionDef", "args": len(node.args.args)}
                    ))
                
                # Look for CLI decorators (click, argparse, etc.)
                elif isinstance(node, ast.FunctionDef):
                    if self._has_cli_decorator(node):
                        entry_points.append(EntryPoint(
                            name=node.name,
                            file_path=str(file_path),
                            line_number=node.lineno,
                            language="python",
                            entry_type="cli",
                            confidence=0.8,
                            signature=f"def {node.name}({self._get_function_signature(node)}):",
                            context={"ast_node": "FunctionDef", "cli_decorated": True}
                        ))
        
        except SyntaxError:
            # Fallback to regex-based analysis
            log_debug(f"AST parsing failed for {file_path}, using regex fallback")
            entry_points.extend(self._regex_analyze_python(file_path, content, lines))
        
        return entry_points
    
    def _analyze_cpp_file(self, file_path: Path, content: str) -> List[EntryPoint]:
        """Analyze C++ file for entry points."""
        entry_points = []
        lines = content.splitlines()
        
        # Look for main function
        main_pattern = re.compile(r'^\s*int\s+main\s*\([^)]*\)', re.MULTILINE)
        for match in main_pattern.finditer(content):
            line_no = content[:match.start()].count('\n') + 1
            entry_points.append(EntryPoint(
                name="main",
                file_path=str(file_path),
                line_number=line_no,
                language="cpp",
                entry_type="main",
                confidence=0.95,
                signature=match.group(0).strip(),
                context={"function_type": "main"}
            ))
        
        # Look for wmain function (Windows)
        wmain_pattern = re.compile(r'^\s*int\s+wmain\s*\([^)]*\)', re.MULTILINE)
        for match in wmain_pattern.finditer(content):
            line_no = content[:match.start()].count('\n') + 1
            entry_points.append(EntryPoint(
                name="wmain",
                file_path=str(file_path),
                line_number=line_no,
                language="cpp",
                entry_type="main",
                confidence=0.95,
                signature=match.group(0).strip(),
                context={"function_type": "wmain", "platform": "windows"}
            ))
        
        # Look for CUDA/HIP kernels
        kernel_pattern = re.compile(r'__global__\s+\w+\s+(\w+)\s*\([^)]*\)', re.MULTILINE)
        for match in kernel_pattern.finditer(content):
            line_no = content[:match.start()].count('\n') + 1
            kernel_name = match.group(1)
            entry_points.append(EntryPoint(
                name=kernel_name,
                file_path=str(file_path),
                line_number=line_no,
                language="cpp",
                entry_type="kernel",
                confidence=0.9,
                signature=match.group(0).strip(),
                context={"function_type": "cuda_kernel", "gpu": True}
            ))
        
        return entry_points
    
    def _analyze_typescript_file(self, file_path: Path, content: str) -> List[EntryPoint]:
        """Analyze TypeScript/JavaScript file for entry points."""
        entry_points = []
        
        # Check if this is a package.json main file
        try:
            parent_dir = file_path.parent
            package_json = parent_dir / 'package.json'
            if package_json.exists():
                with open(package_json, 'r') as f:
                    pkg_data = json.load(f)
                    main_file = pkg_data.get('main', '')
                    if main_file and Path(main_file).resolve() == file_path.resolve():
                        entry_points.append(EntryPoint(
                            name="package_main",
                            file_path=str(file_path),
                            line_number=1,
                            language="typescript",
                            entry_type="export",
                            confidence=0.9,
                            signature="package.json main",
                            context={"package_main": True}
                        ))
        except (json.JSONDecodeError, Exception):
            pass
        
        # Look for main function
        main_pattern = re.compile(r'^\s*(?:export\s+)?(?:async\s+)?function\s+main\s*\([^)]*\)', re.MULTILINE)
        for match in main_pattern.finditer(content):
            line_no = content[:match.start()].count('\n') + 1
            entry_points.append(EntryPoint(
                name="main",
                file_path=str(file_path),
                line_number=line_no,
                language="typescript",
                entry_type="main",
                confidence=0.85,
                signature=match.group(0).strip(),
                context={"function_type": "main"}
            ))
        
        # Look for process.argv usage (Node.js CLI)
        if 'process.argv' in content:
            argv_pattern = re.compile(r'process\.argv', re.MULTILINE)
            match = argv_pattern.search(content)
            if match:
                line_no = content[:match.start()].count('\n') + 1
                entry_points.append(EntryPoint(
                    name="cli_script",
                    file_path=str(file_path),
                    line_number=line_no,
                    language="typescript",
                    entry_type="cli",
                    confidence=0.7,
                    signature="process.argv usage",
                    context={"cli_args": True}
                ))
        
        return entry_points
    
    def _analyze_fortran_file(self, file_path: Path, content: str) -> List[EntryPoint]:
        """Analyze Fortran file for entry points."""
        entry_points = []
        
        # Look for PROGRAM blocks
        program_pattern = re.compile(r'^\s*program\s+(\w+)', re.MULTILINE | re.IGNORECASE)
        for match in program_pattern.finditer(content):
            line_no = content[:match.start()].count('\n') + 1
            program_name = match.group(1)
            entry_points.append(EntryPoint(
                name=program_name,
                file_path=str(file_path),
                line_number=line_no,
                language="fortran",
                entry_type="program",
                confidence=0.95,
                signature=match.group(0).strip(),
                context={"program_name": program_name}
            ))
        
        return entry_points
    
    def _is_main_guard(self, node: ast.If) -> bool:
        """Check if an AST If node is a main guard."""
        if isinstance(node.test, ast.Compare):
            left = node.test.left
            if isinstance(left, ast.Name) and left.id == '__name__':
                for comparator in node.test.comparators:
                    if isinstance(comparator, ast.Constant) and comparator.value == '__main__':
                        return True
        return False
    
    def _get_function_signature(self, node: ast.FunctionDef) -> str:
        """Extract function signature from AST node."""
        args = []
        for arg in node.args.args:
            args.append(arg.arg)
        return ', '.join(args)
    
    def _has_cli_decorator(self, node: ast.FunctionDef) -> bool:
        """Check if function has CLI-related decorators."""
        cli_decorators = {'click.command', 'click.group', 'app.command', 'typer.run'}
        
        for decorator in node.decorator_list:
            if isinstance(decorator, ast.Attribute):
                full_name = f"{decorator.value.id}.{decorator.attr}"
                if full_name in cli_decorators:
                    return True
            elif isinstance(decorator, ast.Name):
                if decorator.id in {'command', 'group'}:
                    return True
        
        return False
    
    def _regex_analyze_python(self, file_path: Path, content: str, 
                            lines: List[str]) -> List[EntryPoint]:
        """Fallback regex-based analysis for Python files."""
        entry_points = []
        
        # Look for main guard
        main_guard_pattern = re.compile(r'if\s+__name__\s*==\s*["\']__main__["\']')
        for i, line in enumerate(lines):
            if main_guard_pattern.search(line):
                entry_points.append(EntryPoint(
                    name="__main__",
                    file_path=str(file_path),
                    line_number=i + 1,
                    language="python",
                    entry_type="main",
                    confidence=0.9,
                    signature=line.strip(),
                    context={"regex_match": True}
                ))
        
        return entry_points
    
    def get_best_entry_point(self, entry_points: List[EntryPoint]) -> Optional[EntryPoint]:
        """Get the best entry point from a list of candidates."""
        if not entry_points:
            return None
        
        # Filter by confidence threshold
        filtered = [ep for ep in entry_points if ep.confidence >= self.config.min_confidence]
        if not filtered:
            return None
        
        # Prioritize by entry type
        type_priority = {'main': 3, 'program': 3, 'cli': 2, 'export': 1, 'kernel': 1}
        
        best = max(filtered, key=lambda ep: (
            type_priority.get(ep.entry_type, 0),
            ep.confidence
        ))
        
        return best
    
    def group_by_file(self, entry_points: List[EntryPoint]) -> Dict[str, List[EntryPoint]]:
        """Group entry points by file path."""
        grouped = defaultdict(list)
        for ep in entry_points:
            grouped[ep.file_path].append(ep)
        
        # Sort within each file by confidence
        for file_path in grouped:
            grouped[file_path].sort(key=lambda ep: ep.confidence, reverse=True)
        
        return dict(grouped)
    
    def to_dict(self, entry_points: List[EntryPoint]) -> Dict[str, Any]:
        """Convert entry points to dictionary format."""
        return {
            'total_count': len(entry_points),
            'by_language': self._group_by_language(entry_points),
            'by_type': self._group_by_type(entry_points),
            'entry_points': [
                {
                    'name': ep.name,
                    'file_path': ep.file_path,
                    'line_number': ep.line_number,
                    'language': ep.language,
                    'entry_type': ep.entry_type,
                    'confidence': ep.confidence,
                    'signature': ep.signature,
                    'context': ep.context
                }
                for ep in entry_points
            ]
        }
    
    def _group_by_language(self, entry_points: List[EntryPoint]) -> Dict[str, int]:
        """Group entry points by language."""
        counts = defaultdict(int)
        for ep in entry_points:
            counts[ep.language] += 1
        return dict(counts)
    
    def _group_by_type(self, entry_points: List[EntryPoint]) -> Dict[str, int]:
        """Group entry points by type."""
        counts = defaultdict(int)
        for ep in entry_points:
            counts[ep.entry_type] += 1
        return dict(counts)