#!/usr/bin/env python3
"""
Pi Calculator - Unified Command Line Interface

This script provides a unified interface for calculating π using various methods
from the three different pi calculator scripts:
- calculate_pi.py (standard mathematical methods)
- spigot_pi_calculator.py (digit extraction methods)
- geometric_pi_calculator.py (geometric visualization methods)

Features:
- Access to all calculation methods in one place
- Comparative benchmarking across all methods
- Unified visualization of results
- Command-line interface with detailed help

Run with --help to see all available options.
"""

import argparse
import time
import math
import sys
import importlib.util
from decimal import Decimal, getcontext
from tabulate import tabulate

# Set high precision for decimal calculations
getcontext().prec = 100


def import_module(module_path):
    """
    Dynamically import a module from a file path.
    
    Args:
        module_path: Path to the Python module to import
        
    Returns:
        The imported module object
    """
    try:
        module_name = module_path.replace('.py', '').replace('/', '.').replace('\\', '.')
        spec = importlib.util.spec_from_file_location(module_name, module_path)
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        return module
    except Exception as e:
        print(f"Failed to import {module_path}: {e}")
        return None


def format_result(result, precision=15):
    """
    Format the result nicely with error calculation.
    
    Args:
        result: The calculated approximation of π
        precision: Number of decimal places to display
        
    Returns:
        Formatted string with result and error
    """
    try:
        if isinstance(result, str) and result.startswith("3."):
            # For string results, just show the first few digits
            display_result = result[:precision+2]  # +2 for "3."
            numeric_result = float(display_result)
        else:
            numeric_result = float(result)
            display_result = f"{numeric_result:.{precision}f}"
        
        error = abs(numeric_result - math.pi)
        error_str = f"{error:.{precision}e}"
        
        return f"{display_result} (Error: {error_str})"
    except Exception as e:
        return f"{result} (Error calculation failed: {e})"


def run_method(module, method_name, *args, **kwargs):
    """
    Run a specific pi calculation method from a module.
    
    Args:
        module: The imported module containing the method
        method_name: Name of the method to run
        *args, **kwargs: Arguments to pass to the method
        
    Returns:
        Tuple of (result, execution_time)
    """
    if not module:
        return None, 0
    
    try:
        method = getattr(module, method_name)
        start_time = time.time()
        result = method(*args, **kwargs)
        execution_time = time.time() - start_time
        return result, execution_time
    except AttributeError:
        print(f"Method {method_name} not found in module")
        return None, 0
    except Exception as e:
        print(f"Error running {method_name}: {e}")
        return None, 0


def benchmark(methods, iterations=1):
    """
    Benchmark multiple pi calculation methods.
    
    Args:
        methods: List of (module, method_name, args, kwargs) tuples
        iterations: Number of iterations for each method
        
    Returns:
        List of benchmark results
    """
    results = []
    
    for module, method_name, args, kwargs in methods:
        if not module:
            continue
            
        method_display_name = f"{method_name}({', '.join(map(str, args))})"
        
        try:
            total_time = 0
            result = None
            
            for _ in range(iterations):
                result, execution_time = run_method(module, method_name, *args, **kwargs)
                total_time += execution_time
            
            avg_time = total_time / iterations
            
            if result is not None:
                try:
                    if isinstance(result, str) and result.startswith("3."):
                        numeric_result = float(result[:17])  # Limit to reasonable precision
                    else:
                        numeric_result = float(result)
                    error = abs(numeric_result - math.pi)
                except (ValueError, TypeError):
                    numeric_result = None
                    error = None
                
                results.append({
                    'Method': method_display_name,
                    'Result': result,
                    'Numeric': numeric_result,
                    'Error': error,
                    'Time': avg_time
                })
        except Exception as e:
            print(f"Error benchmarking {method_display_name}: {e}")
    
    return results


def display_benchmark_results(results):
    """
    Display benchmark results in a nice table.
    
    Args:
        results: List of benchmark result dictionaries
    """
    # Sort by error (if available) or time
    sorted_results = sorted(results, key=lambda x: (x['Error'] if x['Error'] is not None else float('inf'), x['Time']))
    
    # Prepare table data
    table_data = []
    for r in sorted_results:
        result_display = r['Result']
        if isinstance(result_display, (float, int, Decimal)):
            result_display = f"{float(result_display):.15f}"
        elif isinstance(result_display, str) and result_display.startswith("3."):
            result_display = result_display[:17]  # Limit string length for display
        
        error_display = f"{r['Error']:.15e}" if r['Error'] is not None else "N/A"
        
        table_data.append([
            r['Method'],
            result_display,
            error_display,
            f"{r['Time']:.6f} s"
        ])
    
    # Print the table
    print("\nBenchmark Results (sorted by accuracy):")
    print(tabulate(table_data, headers=['Method', 'Result', 'Error', 'Time'], 
                  tablefmt='grid'))
    
    # Also calculate the most accurate and fastest methods
    most_accurate = min(results, key=lambda x: x['Error'] if x['Error'] is not None else float('inf'))
    fastest = min(results, key=lambda x: x['Time'])
    
    print(f"\nMost accurate method: {most_accurate['Method']} with error {most_accurate['Error']:.15e}")
    print(f"Fastest method: {fastest['Method']} in {fastest['Time']:.6f} seconds")


def create_parser():
    """
    Create the command-line argument parser.
    
    Returns:
        The configured argparse.ArgumentParser object
    """
    parser = argparse.ArgumentParser(description='Unified Pi Calculator')
    
    subparsers = parser.add_subparsers(dest='command', help='Command to execute')
    
    # Run a specific method
    run_parser = subparsers.add_parser('run', help='Run a specific pi calculation method')
    run_parser.add_argument('module', choices=['standard', 'spigot', 'geometric'],
                           help='Which module to use')
    run_parser.add_argument('method', help='The specific method to run')
    run_parser.add_argument('--args', nargs='*', type=int, 
                           help='Arguments to pass to the method')
    run_parser.add_argument('--visualize', action='store_true',
                           help='Enable visualization (for supported methods)')
    
    # Benchmark command
    benchmark_parser = subparsers.add_parser('benchmark', help='Benchmark multiple methods')
    benchmark_parser.add_argument('--iterations', type=int, default=3,
                                 help='Number of iterations for benchmarking (default: 3)')
    benchmark_parser.add_argument('--quick', action='store_true',
                                 help='Run only a quick subset of methods')
    benchmark_parser.add_argument('--standard', action='store_true',
                                 help='Include standard methods')
    benchmark_parser.add_argument('--spigot', action='store_true',
                                 help='Include spigot methods')
    benchmark_parser.add_argument('--geometric', action='store_true',
                                 help='Include geometric methods')
    benchmark_parser.add_argument('--all', action='store_true',
                                 help='Include all methods')
    
    # List available methods
    list_parser = subparsers.add_parser('list', help='List all available methods')
    
    return parser


def list_available_methods(standard_module, spigot_module, geometric_module):
    """
    List all available pi calculation methods.
    
    Args:
        standard_module: The imported standard module
        spigot_module: The imported spigot module
        geometric_module: The imported geometric module
    """
    method_groups = {
        'Standard Methods (calculate_pi.py)': [
            ('monte_carlo_pi', 'Monte Carlo method using random sampling'),
            ('leibniz_pi', 'Leibniz formula using infinite series'),
            ('bbp_pi', 'Bailey-Borwein-Plouffe formula'),
            ('nilakantha_pi', 'Nilakantha series (faster convergence)'),
            ('archimedes_pi', 'Archimedes method with polygons')
        ],
        'Spigot Methods (spigot_pi_calculator.py)': [
            ('rabinowitz_wagon_spigot', 'Unbounded Spigot algorithm'),
            ('gosper_continued_fraction', 'Gosper continued fraction method'),
            ('chudnovsky_algorithm', 'Chudnovsky algorithm (high precision)'),
            ('pi_digit_extraction', 'Extract specific digits of π')
        ],
        'Geometric Methods (geometric_pi_calculator.py)': [
            ('circle_approximation_method', 'Circle approximation with point sampling'),
            ('integration_methods', 'Numerical integration methods'),
            ('polygon_perimeter_method', 'Polygon perimeter approximation'),
            ('buffon_needle_method', "Buffon's needle method"),
            ('historical_approximations', 'Historical approximations of π')
        ]
    }
    
    print("Available Pi Calculation Methods:\n")
    for group_name, methods in method_groups.items():
        print(f"{group_name}")
        print("=" * len(group_name))
        
        for method_name, description in methods:
            module = None
            if 'Standard' in group_name:
                module = standard_module
            elif 'Spigot' in group_name:
                module = spigot_module
            elif 'Geometric' in group_name:
                module = geometric_module
            
            available = "✓" if module and hasattr(module, method_name) else "✗"
            print(f"{available} {method_name:<30} - {description}")
            
        print()


def main():
    """
    Main function to execute the unified pi calculator.
    """
    # Import the modules
    standard_module = import_module("calculate_pi.py")
    spigot_module = import_module("spigot_pi_calculator.py")
    geometric_module = import_module("geometric_pi_calculator.py")
    
    # Check if at least one module was imported successfully
    if not any([standard_module, spigot_module, geometric_module]):
        print("Error: Failed to import any of the pi calculator modules.")
        print("Please make sure the following files exist in the current directory:")
        print("  - calculate_pi.py")
        print("  - spigot_pi_calculator.py")
        print("  - geometric_pi_calculator.py")
        return
    
    # Create and parse arguments
    parser = create_parser()
    args = parser.parse_args()
    
    if args.command == 'list':
        list_available_methods(standard_module, spigot_module, geometric_module)
        return
        
    elif args.command == 'run':
        # Determine which module to use
        if args.module == 'standard':
            module = standard_module
        elif args.module == 'spigot':
            module = spigot_module
        elif args.module == 'geometric':
            module = geometric_module
        else:
            print(f"Unknown module: {args.module}")
            return
        
        if not module:
            print(f"Module {args.module} could not be imported.")
            return
        
        # Check if the method exists
        if not hasattr(module, args.method):
            print(f"Method {args.method} not found in {args.module} module.")
            return
        
        # Prepare arguments
        method_args = args.args or []
        kwargs = {'visualize': args.visualize} if args.visualize else {}
        
        print(f"Running {args.method} from {args.module} module...")
        result, execution_time = run_method(module, args.method, *method_args, **kwargs)
        
        if result is not None:
            print(f"\nResult: {format_result(result)}")
            print(f"Execution time: {execution_time:.6f} seconds")
        
    elif args.command == 'benchmark':
        # Determine which modules to include
        include_all = args.all
        include_standard = args.standard or include_all
        include_spigot = args.spigot or include_all
        include_geometric = args.geometric or include_all
        
        # If none specified, include all
        if not any([include_standard, include_spigot, include_geometric]):
            include_standard = include_spigot = include_geometric = True
        
        # Define methods to benchmark
        methods_to_benchmark = []
        
        if include_standard and standard_module:
            if args.quick:
                methods_to_benchmark.extend([
                    (standard_module, 'monte_carlo_pi', [100000], {}),
                    (standard_module, 'leibniz_pi', [100000], {}),
                    (standard_module, 'nilakantha_pi', [1000], {})
                ])
            else:
                methods_to_benchmark.extend([
                    (standard_module, 'monte_carlo_pi', [1000000], {}),
                    (standard_module, 'leibniz_pi', [1000000], {}),
                    (standard_module, 'bbp_pi', [10], {}),
                    (standard_module, 'nilakantha_pi', [10000], {}),
                    (standard_module, 'archimedes_pi', [1000000], {})
                ])
        
        if include_spigot and spigot_module:
            if args.quick:
                methods_to_benchmark.extend([
                    (spigot_module, 'rabinowitz_wagon_spigot', [20], {}),
                    (spigot_module, 'gosper_continued_fraction', [10], {})
                ])
            else:
                methods_to_benchmark.extend([
                    (spigot_module, 'rabinowitz_wagon_spigot', [50], {}),
                    (spigot_module, 'gosper_continued_fraction', [20], {}),
                    (spigot_module, 'chudnovsky_algorithm', [3], {})
                ])
        
        if include_geometric and geometric_module:
            if args.quick:
                methods_to_benchmark.extend([
                    (geometric_module, 'circle_approximation_method', [100000], {'visualize': False}),
                    (geometric_module, 'integration_methods', ['midpoint', 10000], {'visualize': False}),
                    (geometric_module, 'polygon_perimeter_method', [6, 8], {'visualize': False})
                ])
            else:
                methods_to_benchmark.extend([
                    (geometric_module, 'circle_approximation_method', [1000000], {'visualize': False}),
                    (geometric_module, 'integration_methods', ['midpoint', 100000], {'visualize': False}),
                    (geometric_module, 'integration_methods', ['trapezoid', 100000], {'visualize': False}),
                    (geometric_module, 'integration_methods', ['simpson', 50000], {'visualize': False}),
                    (geometric_module, 'polygon_perimeter_method', [6, 10], {'visualize': False}),
                    (geometric_module, 'buffon_needle_method', [100000], {'visualize': False})
                ])
        
        if not methods_to_benchmark:
            print("No methods to benchmark.")
            return
        
        print(f"Benchmarking {len(methods_to_benchmark)} methods with {args.iterations} iterations each...")
        print("This may take a while depending on the number and complexity of the methods.")
        
        results = benchmark(methods_to_benchmark, args.iterations)
        display_benchmark_results(results)
        
    else:
        parser.print_help()


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("\nOperation cancelled by user.")
    except Exception as e:
        print(f"\nAn error occurred: {e}")