#!/bin/bash

# CodeMCP Discovery System Test Runner
# Comprehensive test execution script for all discovery modules

set -e  # Exit on any error

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TEST_DIR="$SCRIPT_DIR"

# Default options
VERBOSE=false
COVERAGE=false
PERFORMANCE=true
INTEGRATION=true
PARALLEL=false
OUTPUT_DIR="$TEST_DIR/test_results"
TIMEOUT=300

# Function to print colored output
print_status() {
    echo -e "${BLUE}[INFO]${NC} $1"
}

print_success() {
    echo -e "${GREEN}[SUCCESS]${NC} $1"
}

print_warning() {
    echo -e "${YELLOW}[WARNING]${NC} $1"
}

print_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

# Function to show usage
show_usage() {
    cat << EOF
CodeMCP Discovery System Test Runner

Usage: $0 [OPTIONS]

OPTIONS:
    -v, --verbose           Enable verbose output
    -c, --coverage          Generate coverage reports
    --no-performance        Skip performance tests
    --no-integration        Skip integration tests
    -p, --parallel          Run tests in parallel
    -o, --output DIR        Output directory for reports (default: test_results)
    -t, --timeout SECONDS   Test timeout in seconds (default: 300)
    --module MODULE         Run specific module tests only
    --list                  List available test modules
    -h, --help              Show this help message

MODULES:
    scanner                 Project Scanner Module tests
    classifier              Function Classifier Module tests
    resolver                Cross-File Resolution Module tests
    mcps                    Enhanced MCP tests
    cli                     CLI Discovery Command tests
    integration             End-to-end integration tests

EXAMPLES:
    # Run all tests with coverage
    $0 --coverage --verbose

    # Run only core module tests
    $0 --module scanner --module classifier --module resolver

    # Run tests without performance tests
    $0 --no-performance --verbose

    # Run parallel tests with custom output directory
    $0 --parallel --output /tmp/test_results
EOF
}

# Function to check dependencies
check_dependencies() {
    print_status "Checking test dependencies..."
    
    # Check Python and pytest
    if ! command -v python3 &> /dev/null; then
        print_error "Python 3 is required but not installed"
        exit 1
    fi
    
    if ! python3 -c "import pytest" &> /dev/null; then
        print_error "pytest is required. Install with: pip install pytest"
        exit 1
    fi
    
    # Check optional dependencies
    local missing_deps=()
    
    if ! python3 -c "import pytest_cov" &> /dev/null && [ "$COVERAGE" = true ]; then
        missing_deps+=("pytest-cov")
    fi
    
    if ! python3 -c "import pytest_asyncio" &> /dev/null; then
        missing_deps+=("pytest-asyncio")
    fi
    
    if [ ${#missing_deps[@]} -gt 0 ]; then
        print_warning "Missing optional dependencies: ${missing_deps[*]}"
        print_warning "Install with: pip install ${missing_deps[*]}"
    fi
    
    # Check external tools
    if ! command -v node &> /dev/null; then
        print_warning "Node.js not found - TypeScript tests may fail"
    fi
    
    if ! command -v clang &> /dev/null && ! command -v gcc &> /dev/null; then
        print_warning "C++ compiler not found - C++ tests may fail"
    fi
    
    if ! command -v gfortran &> /dev/null; then
        print_warning "Fortran compiler not found - Fortran tests may fail"
    fi
    
    print_success "Dependency check completed"
}

# Function to setup test environment
setup_environment() {
    print_status "Setting up test environment..."
    
    # Create output directory
    mkdir -p "$OUTPUT_DIR"
    
    # Set PYTHONPATH to include CodeMCP root (SZ directory)
    local codemcp_root="$(cd "$TEST_DIR/../../.." && pwd)"
    export PYTHONPATH="${PYTHONPATH:+$PYTHONPATH:}$codemcp_root"
    
    # Also add current directory to sys.path for relative imports
    export PYTHONPATH="${PYTHONPATH:+$PYTHONPATH:}$(pwd)"
    
    # Change to test directory
    cd "$TEST_DIR"
    
    print_success "Test environment ready"
}

# Function to run specific test module
run_test_module() {
    local module="$1"
    local test_file=""
    local description=""
    
    case "$module" in
        scanner)
            test_file="test_project_scanner.py"
            description="Project Scanner Module"
            ;;
        classifier)
            test_file="test_function_classifier.py"
            description="Function Classifier Module"
            ;;
        resolver)
            test_file="test_cross_file_resolver.py"
            description="Cross-File Resolution Module"
            ;;
        mcps)
            test_file="test_enhanced_mcps.py"
            description="Enhanced MCP Capabilities"
            ;;
        cli)
            test_file="test_cli_discovery.py"
            description="CLI Discovery Commands"
            ;;
        integration)
            test_file="test_integration.py"
            description="Integration Tests"
            ;;
        *)
            print_error "Unknown module: $module"
            return 1
            ;;
    esac
    
    if [ ! -f "$test_file" ]; then
        print_error "Test file not found: $test_file"
        return 1
    fi
    
    print_status "Running $description tests..."
    
    # Build pytest command
    local pytest_cmd="pytest $test_file"
    
    if [ "$VERBOSE" = true ]; then
        pytest_cmd="$pytest_cmd -v"
    fi
    
    if [ "$COVERAGE" = true ]; then
        pytest_cmd="$pytest_cmd --cov=codemcp --cov-append"
    fi
    
    # Add markers for test filtering
    local markers=()
    if [ "$PERFORMANCE" = false ]; then
        markers+=("not performance")
    fi
    
    if [ "$INTEGRATION" = false ] && [ "$module" = "integration" ]; then
        print_warning "Skipping integration tests as requested"
        return 0
    fi
    
    if [ ${#markers[@]} -gt 0 ]; then
        local marker_expr=$(IFS=" and "; echo "${markers[*]}")
        pytest_cmd="$pytest_cmd -m \"$marker_expr\""
    fi
    
    # Add timeout only if pytest-timeout is available
    if python3 -c "import pytest_timeout" &> /dev/null 2>&1; then
        pytest_cmd="$pytest_cmd --timeout=$TIMEOUT"
    fi
    
    # Add output options
    pytest_cmd="$pytest_cmd --tb=short --junit-xml=$OUTPUT_DIR/${module}_results.xml"
    
    # Run the test
    if eval "$pytest_cmd"; then
        print_success "$description tests completed"
        return 0
    else
        print_error "$description tests failed"
        return 1
    fi
}

# Function to run all tests
run_all_tests() {
    local modules=("scanner" "classifier" "resolver" "mcps" "cli")
    
    if [ "$INTEGRATION" = true ]; then
        modules+=("integration")
    fi
    
    local failed_modules=()
    local total_modules=${#modules[@]}
    local passed_modules=0
    
    print_status "Running all discovery tests ($total_modules modules)..."
    
    # Run tests
    for module in "${modules[@]}"; do
        if run_test_module "$module"; then
            ((passed_modules++))
        else
            failed_modules+=("$module")
        fi
        echo
    done
    
    # Generate summary
    print_status "Test Execution Summary"
    echo "=========================="
    echo "Total modules: $total_modules"
    echo "Passed: $passed_modules"
    echo "Failed: ${#failed_modules[@]}"
    
    if [ ${#failed_modules[@]} -eq 0 ]; then
        print_success "All tests passed!"
        return 0
    else
        print_error "Failed modules: ${failed_modules[*]}"
        return 1
    fi
}

# Function to generate coverage report
generate_coverage_report() {
    if [ "$COVERAGE" = true ]; then
        print_status "Generating coverage report..."
        
        # Generate HTML coverage report
        if python3 -c "import coverage" &> /dev/null; then
            coverage html -d "$OUTPUT_DIR/coverage_html"
            coverage xml -o "$OUTPUT_DIR/coverage.xml"
            coverage report > "$OUTPUT_DIR/coverage.txt"
            print_success "Coverage report generated in $OUTPUT_DIR/"
        else
            print_warning "Coverage module not available"
        fi
    fi
}

# Function to list available modules
list_modules() {
    echo "Available test modules:"
    echo "  scanner     - Project Scanner Module tests"
    echo "  classifier  - Function Classifier Module tests"
    echo "  resolver    - Cross-File Resolution Module tests"
    echo "  mcps        - Enhanced MCP tests"
    echo "  cli         - CLI Discovery Command tests"
    echo "  integration - End-to-end integration tests"
}

# Parse command line arguments
MODULES=()
while [[ $# -gt 0 ]]; do
    case $1 in
        -v|--verbose)
            VERBOSE=true
            shift
            ;;
        -c|--coverage)
            COVERAGE=true
            shift
            ;;
        --no-performance)
            PERFORMANCE=false
            shift
            ;;
        --no-integration)
            INTEGRATION=false
            shift
            ;;
        -p|--parallel)
            PARALLEL=true
            shift
            ;;
        -o|--output)
            OUTPUT_DIR="$2"
            shift 2
            ;;
        -t|--timeout)
            TIMEOUT="$2"
            shift 2
            ;;
        --module)
            MODULES+=("$2")
            shift 2
            ;;
        --list)
            list_modules
            exit 0
            ;;
        -h|--help)
            show_usage
            exit 0
            ;;
        *)
            print_error "Unknown option: $1"
            show_usage
            exit 1
            ;;
    esac
done

# Main execution
main() {
    print_status "CodeMCP Discovery System Test Runner"
    print_status "====================================="
    
    # Check dependencies
    check_dependencies
    
    # Setup environment
    setup_environment
    
    # Run tests
    local exit_code=0
    
    if [ ${#MODULES[@]} -gt 0 ]; then
        # Run specific modules
        print_status "Running specific modules: ${MODULES[*]}"
        local failed_modules=()
        
        for module in "${MODULES[@]}"; do
            if ! run_test_module "$module"; then
                failed_modules+=("$module")
                exit_code=1
            fi
        done
        
        if [ ${#failed_modules[@]} -gt 0 ]; then
            print_error "Failed modules: ${failed_modules[*]}"
        fi
    else
        # Run all tests
        if ! run_all_tests; then
            exit_code=1
        fi
    fi
    
    # Generate coverage report
    generate_coverage_report
    
    # Final status
    echo
    if [ $exit_code -eq 0 ]; then
        print_success "All requested tests completed successfully!"
    else
        print_error "Some tests failed. Check the output above for details."
    fi
    
    print_status "Test results and reports available in: $OUTPUT_DIR"
    
    exit $exit_code
}

# Run main function
main "$@"