"""
Analyze command implementation.

Provides batch analysis capabilities for function call tree generation
and analysis across multiple programming languages.
"""

import argparse
import asyncio
from typing import Dict, Any, List, Optional
from pathlib import Path
import time

from ...core.error_handler import CodeMCPError, AnalysisError, log_info
from ..base_command import CLICommand


class AnalyzeCommand(CLICommand):
    """Command for analyzing function call trees."""
    
    def add_parser(self, subparsers) -> argparse.ArgumentParser:
        """Add analyze command parser."""
        parser = subparsers.add_parser(
            'analyze',
            help='Analyze function call trees',
            description="""
Analyze function call trees starting from specified entry points.
Supports multiple programming languages and cross-language analysis.
            """.strip(),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            epilog="""
Examples:
  codemcp analyze --file main.py --function main
  codemcp analyze --file src/app.cpp --function main --max-depth 15
  codemcp analyze --file project/ --pattern "*.py" --function main
  codemcp analyze --config analysis.toml --output result.json
            """.strip()
        )
        
        # Input specification
        input_group = parser.add_argument_group('Input Options')
        input_group.add_argument(
            '--file', '-f',
            type=str,
            help='File or directory to analyze'
        )
        input_group.add_argument(
            '--function',
            type=str,
            help='Function name to start analysis from'
        )
        input_group.add_argument(
            '--pattern',
            type=str,
            help='File pattern for directory analysis (e.g., "*.py")'
        )
        input_group.add_argument(
            '--language',
            choices=['python', 'cpp', 'fortran', 'typescript', 'latex', 'markdown', 'auto'],
            default='auto',
            help='Force specific language (default: auto-detect)'
        )
        
        # Analysis options
        analysis_group = parser.add_argument_group('Analysis Options')
        analysis_group.add_argument(
            '--max-depth',
            type=int,
            default=10,
            help='Maximum depth for call tree analysis (default: 10)'
        )
        analysis_group.add_argument(
            '--include-dependencies',
            action='store_true',
            help='Include external dependencies in analysis'
        )
        analysis_group.add_argument(
            '--cross-language',
            action='store_true',
            help='Enable cross-language call resolution'
        )
        analysis_group.add_argument(
            '--mode',
            choices=['automatic', 'manual'],
            default='automatic',
            help='Analysis mode (default: automatic)'
        )
        
        # Output options
        output_group = parser.add_argument_group('Output Options')
        output_group.add_argument(
            '--output', '-o',
            type=str,
            help='Output file for results'
        )
        output_group.add_argument(
            '--include-source',
            action='store_true',
            help='Include source code snippets in output'
        )
        output_group.add_argument(
            '--include-metrics',
            action='store_true',
            help='Include complexity and performance metrics'
        )
        
        # Performance options
        perf_group = parser.add_argument_group('Performance Options')
        perf_group.add_argument(
            '--parallel',
            type=int,
            default=1,
            help='Number of parallel analysis workers (default: 1)'
        )
        perf_group.add_argument(
            '--cache',
            action='store_true',
            help='Enable caching for faster repeated analysis'
        )
        perf_group.add_argument(
            '--timeout',
            type=int,
            default=300,
            help='Analysis timeout in seconds (default: 300)'
        )
        
        return parser
    
    async def execute(self, args: argparse.Namespace) -> int:
        """Execute the analyze command."""
        try:
            # Validate arguments
            await self._validate_arguments(args)
            
            # Show banner
            if not args.quiet:
                self.cli.print_banner()
                self.printer.print_info(f"Starting analysis of {args.file}")
            
            # Prepare analysis
            analysis_config = await self._prepare_analysis(args)
            
            # Execute analysis
            start_time = time.time()
            results = await self._execute_analysis(analysis_config)
            duration = time.time() - start_time
            
            # Process and display results
            await self._process_results(results, args, duration)
            
            return 0
            
        except CodeMCPError as e:
            self.printer.print_error(f"Analysis failed: {e.message}")
            return 1
        except Exception as e:
            self.printer.print_error(f"Unexpected error during analysis: {e}")
            return 1
    
    async def _validate_arguments(self, args: argparse.Namespace):
        """Validate command arguments."""
        if not args.file:
            raise CodeMCPError("File or directory path is required (use --file)")
        
        file_path = Path(args.file)
        if not file_path.exists():
            raise CodeMCPError(f"Path does not exist: {args.file}")
        
        if file_path.is_file() and not args.function:
            raise CodeMCPError("Function name is required for single file analysis (use --function)")
        
        if args.max_depth < 1:
            raise CodeMCPError("Max depth must be at least 1")
        
        if args.parallel < 1:
            raise CodeMCPError("Parallel workers must be at least 1")
        
        if args.timeout < 1:
            raise CodeMCPError("Timeout must be at least 1 second")
    
    async def _prepare_analysis(self, args: argparse.Namespace) -> Dict[str, Any]:
        """Prepare analysis configuration."""
        file_path = Path(args.file)
        
        # Determine files to analyze
        if file_path.is_file():
            files_to_analyze = [file_path]
        else:
            # Directory analysis
            pattern = args.pattern or "*"
            files_to_analyze = list(file_path.glob(f"**/{pattern}"))
            files_to_analyze = [f for f in files_to_analyze if f.is_file()]
            
            if not files_to_analyze:
                raise CodeMCPError(f"No files found matching pattern '{pattern}' in {args.file}")
        
        # Determine language
        if args.language == 'auto':
            languages = set()
            for file in files_to_analyze:
                try:
                    lang = self.get_language_from_extension(file)
                    languages.add(lang)
                except CodeMCPError:
                    continue  # Skip unsupported files
            
            if not languages:
                raise CodeMCPError("No supported files found for analysis")
            
            primary_language = list(languages)[0]  # Use first detected language
        else:
            primary_language = args.language
        
        # Build analysis configuration
        config = {
            'files': files_to_analyze,
            'function_name': args.function,
            'language': primary_language,
            'max_depth': args.max_depth,
            'include_dependencies': args.include_dependencies,
            'cross_language': args.cross_language,
            'mode': args.mode,
            'include_source': args.include_source,
            'include_metrics': args.include_metrics,
            'parallel_workers': args.parallel,
            'enable_cache': args.cache,
            'timeout': args.timeout,
            'output_file': args.output
        }
        
        log_info(f"Analysis configuration prepared: {len(files_to_analyze)} files, language: {primary_language}")
        return config
    
    async def _execute_analysis(self, config: Dict[str, Any]) -> Dict[str, Any]:
        """Execute the analysis based on configuration."""
        files = config['files']
        total_files = len(files)
        
        if config['mode'] == 'manual':
            # Manual mode - interactive analysis
            return await self._execute_manual_analysis(config)
        else:
            # Automatic mode - batch analysis
            return await self._execute_automatic_analysis(config)
    
    async def _execute_automatic_analysis(self, config: Dict[str, Any]) -> Dict[str, Any]:
        """Execute automatic batch analysis."""
        files = config['files']
        results = {
            'analysis_type': 'automatic',
            'configuration': config,
            'call_trees': [],
            'statistics': {
                'total_files': len(files),
                'processed_files': 0,
                'failed_files': 0,
                'total_nodes': 0,
                'total_functions': 0,
                'errors': []
            }
        }
        
        # Progress tracking
        with self.progress.progress_bar(len(files), "Analyzing files") as progress:
            for i, file_path in enumerate(files):
                try:
                    # Update progress
                    progress.update(message=f"Analyzing {file_path.name}")
                    
                    # Analyze single file
                    file_result = await self._analyze_single_file(file_path, config)
                    results['call_trees'].append(file_result)
                    
                    # Update statistics
                    results['statistics']['processed_files'] += 1
                    if 'call_tree' in file_result:
                        tree_metadata = file_result['call_tree'].get('metadata', {})
                        results['statistics']['total_nodes'] += tree_metadata.get('total_nodes', 0)
                    
                except Exception as e:
                    error_info = {
                        'file': str(file_path),
                        'error': str(e),
                        'type': type(e).__name__
                    }
                    results['statistics']['errors'].append(error_info)
                    results['statistics']['failed_files'] += 1
                    
                    if not config.get('continue_on_error', True):
                        raise AnalysisError(f"Analysis failed for {file_path}: {e}")
        
        return results
    
    async def _execute_manual_analysis(self, config: Dict[str, Any]) -> Dict[str, Any]:
        """Execute manual interactive analysis."""
        # This would integrate with the interactive explorer
        # For now, return a placeholder
        self.printer.print_info("Manual analysis mode would start interactive explorer")
        
        return {
            'analysis_type': 'manual',
            'configuration': config,
            'message': 'Manual analysis requires interactive explorer (not yet implemented in analyze command)'
        }
    
    async def _analyze_single_file(self, file_path: Path, config: Dict[str, Any]) -> Dict[str, Any]:
        """Analyze a single file."""
        # This is a placeholder implementation
        # In the real implementation, this would:
        # 1. Load the appropriate MCP server for the language
        # 2. Parse the file
        # 3. Build the call tree
        # 4. Extract additional metadata if requested
        
        language = config['language']
        function_name = config['function_name']
        
        # Simulate analysis (placeholder)
        await asyncio.sleep(0.1)  # Simulate processing time
        
        # Mock call tree structure
        mock_call_tree = {
            'root_node': {
                'name': function_name or 'main',
                'file_path': str(file_path),
                'line_number': 1,
                'language': language,
                'node_type': 'function',
                'signature': f'{function_name or "main"}()',
                'callee_count': 3,
                'caller_count': 0,
                'complexity_score': 5.2
            },
            'nodes': {
                f"{file_path}:1:{function_name or 'main'}": {
                    'name': function_name or 'main',
                    'file_path': str(file_path),
                    'line_number': 1,
                    'language': language,
                    'node_type': 'function',
                    'callee_count': 3,
                    'caller_count': 0
                }
            },
            'metadata': {
                'total_nodes': 4,
                'max_depth': 2,
                'analysis_type': 'static',
                'language': language,
                'created_at': time.time()
            }
        }
        
        result = {
            'file_path': str(file_path),
            'function_name': function_name,
            'language': language,
            'call_tree': mock_call_tree,
            'analysis_metadata': {
                'analysis_time': 0.1,
                'max_depth_reached': 2,
                'nodes_analyzed': 4,
                'cross_language_calls': 0
            }
        }
        
        if config.get('include_metrics'):
            result['metrics'] = {
                'complexity_score': 5.2,
                'cyclomatic_complexity': 3,
                'cognitive_complexity': 4,
                'lines_of_code': 45,
                'maintainability_index': 78.5
            }
        
        if config.get('include_source'):
            # In real implementation, would extract actual source
            result['source_snippets'] = {
                function_name or 'main': f"def {function_name or 'main'}():\n    # Function implementation\n    pass"
            }
        
        return result
    
    async def _process_results(self, results: Dict[str, Any], args: argparse.Namespace, duration: float):
        """Process and display analysis results."""
        # Display summary
        stats = results.get('statistics', {})
        
        self.printer.print_header("Analysis Summary", level=1)
        
        summary_data = {
            'Analysis Type': results.get('analysis_type', 'unknown'),
            'Duration': f'{duration:.2f} seconds',
            'Files Processed': stats.get('processed_files', 0),
            'Total Files': stats.get('total_files', 0),
            'Total Nodes': stats.get('total_nodes', 0),
            'Failed Files': stats.get('failed_files', 0)
        }
        
        self.formatter.print_key_value_pairs(summary_data)
        
        # Display errors if any
        errors = stats.get('errors', [])
        if errors:
            self.printer.print_header("Errors", level=2)
            for error in errors:
                self.printer.print_error(f"{error['file']}: {error['error']}")
        
        # Display call trees
        call_trees = results.get('call_trees', [])
        if call_trees and not args.quiet:
            self.printer.print_header("Call Trees", level=2)
            
            for tree_result in call_trees[:3]:  # Show first 3 trees
                file_path = tree_result.get('file_path', 'unknown')
                function_name = tree_result.get('function_name', 'unknown')
                
                self.printer.print_header(f"{Path(file_path).name}::{function_name}", level=3)
                
                if 'call_tree' in tree_result:
                    self.formatter.print_call_tree(tree_result['call_tree'], max_depth=5)
                
                print()
        
        # Save results to file if requested
        if args.output:
            output_path = Path(args.output)
            self.formatter.save_output_to_file(results, str(output_path))
        
        # Print completion message
        if stats.get('failed_files', 0) == 0:
            self.printer.print_success("Analysis completed successfully!")
        else:
            failed = stats.get('failed_files', 0)
            total = stats.get('total_files', 0)
            self.printer.print_warning(f"Analysis completed with {failed}/{total} files failed")
        
        return results