"""Analysis command with integrated analyzers."""

from __future__ import annotations

import argparse
import json
import sys
from pathlib import Path

from flowmap_core.analyzers import analyzer_registry


def register(subparsers: argparse._SubParsersAction) -> None:
    parser = subparsers.add_parser("analyze", help="Analyze source trees and emit call graphs")
    parser.add_argument("paths", nargs="+", help="Source directories or files")
    parser.add_argument("--language", choices=analyzer_registry.get_available_languages(),
                       help="Force specific language analyzer (auto-detect by default)")
    parser.add_argument("--output", "-o", type=Path, help="Output file for analysis results (JSON)")
    parser.add_argument("--recursive", "-r", action="store_true", default=True,
                       help="Recursively analyze directories")
    parser.add_argument("--include-source", action="store_true",
                       help="Include source code in analysis results")
    parser.add_argument("--use-clang", action="store_true",
                       help="Use Clang AST for C++ analysis (if available)")
    parser.add_argument("--compile-commands", type=Path,
                       help="Path to compile_commands.json for C++ analysis")
    parser.add_argument("--exclude-functions", nargs="*", default=[],
                       help="Function names to exclude from analysis")
    parser.add_argument("--root-functions", nargs="*",
                       help="Root functions to limit analysis scope")
    parser.add_argument("--max-depth", type=int, default=10,
                       help="Maximum call depth to analyze")
    parser.add_argument("--include-stdlib", action="store_true",
                       help="Include standard library functions")
    parser.add_argument("--include-external", action="store_true",
                       help="Include external library functions")
    parser.add_argument("--no-cycles", action="store_true",
                       help="Disable cycle detection")
    parser.add_argument("--summary", "-s", action="store_true",
                       help="Show summary statistics only")
    parser.set_defaults(handler=handle)


def handle(args: argparse.Namespace) -> int:
    paths = [Path(p) for p in args.paths]

    # Validate paths
    for path in paths:
        if not path.exists():
            print(f"Error: Path does not exist: {path}", file=sys.stderr)
            return 1

    # Build analysis options
    analysis_options = {
        'recursive': args.recursive,
        'include_source_code': args.include_source,
        'use_clang': args.use_clang,
        'compile_commands_path': str(args.compile_commands) if args.compile_commands else None,
        'exclude_functions': args.exclude_functions,
        'root_functions': args.root_functions,
        'max_depth': args.max_depth,
        'include_standard_lib': args.include_stdlib,
        'include_external': args.include_external,
        'detect_cycles': not args.no_cycles
    }

    try:
        if args.language:
            # Use specific language analyzer
            analyzer = analyzer_registry.get_analyzer(args.language)
            if not analyzer:
                print(f"Error: Analyzer for language '{args.language}' not available", file=sys.stderr)
                return 1

            results = {args.language: analyzer.analyze(paths, **analysis_options)}
        else:
            # Auto-detect and analyze all languages
            results = analyzer_registry.analyze_paths(paths, **analysis_options)

        if not results:
            print("No supported files found for analysis", file=sys.stderr)
            return 1

        # Output results
        if args.output:
            # Save to JSON file
            output_data = {}
            for language, result in results.items():
                output_data[language] = result.to_dict()

            with open(args.output, 'w') as f:
                json.dump(output_data, f, indent=2)
            print(f"Analysis results saved to {args.output}")
        else:
            # Print to stdout
            if args.summary:
                _print_summary(results)
            else:
                _print_detailed_results(results)

    except Exception as e:
        print(f"Error during analysis: {e}", file=sys.stderr)
        return 1

    return 0


def _print_summary(results):
    """Print summary statistics."""
    print("Analysis Summary")
    print("=" * 50)

    total_functions = 0
    total_calls = 0
    total_files = 0

    for language, result in results.items():
        print(f"\n{language.upper()} Analysis:")
        print(f"  Functions: {result.statistics.get('total_functions', 0)}")
        print(f"  Function calls: {result.statistics.get('total_calls', 0)}")
        print(f"  Source files: {result.statistics.get('source_files', 0)}")

        if result.cycles:
            print(f"  Call cycles detected: {len(result.cycles)}")

        largest_func = result.statistics.get('largest_function')
        if largest_func:
            print(f"  Largest function: {largest_func['name']} ({largest_func['line_count']} lines)")

        avg_length = result.statistics.get('average_function_length', 0)
        if avg_length:
            print(f"  Average function length: {avg_length:.1f} lines")

        total_functions += result.statistics.get('total_functions', 0)
        total_calls += result.statistics.get('total_calls', 0)
        total_files += result.statistics.get('source_files', 0)

    if len(results) > 1:
        print(f"\nTotal across all languages:")
        print(f"  Functions: {total_functions}")
        print(f"  Function calls: {total_calls}")
        print(f"  Source files: {total_files}")


def _print_detailed_results(results):
    """Print detailed analysis results."""
    for language, result in results.items():
        print(f"\n{language.upper()} Analysis Results")
        print("=" * 50)

        # Print functions
        print(f"\nFunctions ({len(result.functions)}):")
        for func in sorted(result.functions, key=lambda f: f.full_name):
            print(f"  {func.full_name}")
            if func.return_type:
                print(f"    Return type: {func.return_type}")
            if func.parameters:
                print(f"    Parameters: {', '.join(func.parameters)}")
            print(f"    Location: {func.source_file}:{func.start_line}")
            if func.line_count:
                print(f"    Size: {func.line_count} lines")
            print()

        # Print call graph
        if result.call_graph.edges:
            print(f"\nCall Relationships:")
            for caller, callees in result.call_graph.edges.items():
                if callees:
                    print(f"  {caller} calls:")
                    for callee in sorted(callees):
                        print(f"    -> {callee}")

        # Print cycles if detected
        if result.cycles:
            print(f"\nCall Cycles Detected ({len(result.cycles)}):")
            for i, cycle in enumerate(result.cycles, 1):
                print(f"  Cycle {i}: {' -> '.join(cycle)}")

        # Print statistics
        print(f"\nStatistics:")
        for key, value in result.statistics.items():
            if isinstance(value, dict):
                print(f"  {key}:")
                for k, v in value.items():
                    print(f"    {k}: {v}")
            else:
                print(f"  {key}: {value}")
