#!/bin/bash
# Final Validation Script for ADKR Go Implementation
# This script performs comprehensive validation of all components

set -e

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# Logging functions
log_info() {
    echo -e "${BLUE}[INFO]${NC} $1"
}

log_success() {
    echo -e "${GREEN}[SUCCESS]${NC} $1"
}

log_warning() {
    echo -e "${YELLOW}[WARNING]${NC} $1"
}

log_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

# Validation results
VALIDATION_RESULTS=()
TOTAL_TESTS=0
PASSED_TESTS=0

# Test result tracking
add_test_result() {
    local test_name="$1"
    local result="$2"
    local details="$3"
    
    TOTAL_TESTS=$((TOTAL_TESTS + 1))
    if [ "$result" = "PASS" ]; then
        PASSED_TESTS=$((PASSED_TESTS + 1))
        log_success "$test_name: PASS"
    else
        log_error "$test_name: FAIL - $details"
    fi
    
    VALIDATION_RESULTS+=("$test_name:$result:$details")
}

# Test 1: Build all binaries
test_build_binaries() {
    log_info "Testing binary compilation..."
    
    local build_errors=0
    
    # Build main binaries
    if ! go build -o adkr_node ./cmd/adkr_node 2>/dev/null; then
        build_errors=$((build_errors + 1))
    fi
    
    if ! go build -o adkr_bench ./cmd/adkr_bench 2>/dev/null; then
        build_errors=$((build_errors + 1))
    fi
    
    if ! go build -o bls_gen ./cmd/bls_gen 2>/dev/null; then
        build_errors=$((build_errors + 1))
    fi
    
    if ! go build -o config_validator ./cmd/config_validator 2>/dev/null; then
        build_errors=$((build_errors + 1))
    fi
    
    if ! go build -o parity_simple ./cmd/parity_simple 2>/dev/null; then
        build_errors=$((build_errors + 1))
    fi
    
    if ! go build -o coin_bench ./cmd/coin_bench 2>/dev/null; then
        build_errors=$((build_errors + 1))
    fi
    
    if [ $build_errors -eq 0 ]; then
        add_test_result "Binary Compilation" "PASS" "All binaries compiled successfully"
    else
        add_test_result "Binary Compilation" "FAIL" "$build_errors binaries failed to compile"
    fi
}

# Test 2: BLS Key Generation
test_bls_key_generation() {
    log_info "Testing BLS key generation..."
    
    # Clean up any existing test keys
    rm -rf ./test_keys_final
    
    # Generate keys
    if ./bls_gen -n 4 -t 3 -output ./test_keys_final >/dev/null 2>&1; then
        # Check if key files were created
        if [ -f "./test_keys_final/bls_keys_complete.json" ] && \
           [ -f "./test_keys_final/bls_keys_node_0.json" ] && \
           [ -f "./test_keys_final/hosts.template" ]; then
            add_test_result "BLS Key Generation" "PASS" "Keys generated successfully"
        else
            add_test_result "BLS Key Generation" "FAIL" "Key files not created"
        fi
    else
        add_test_result "BLS Key Generation" "FAIL" "Key generation failed"
    fi
}

# Test 3: Configuration Validation
test_config_validation() {
    log_info "Testing configuration validation..."
    
    if [ -d "./test_keys_final" ]; then
        if ./config_validator -config ./test_keys_final >/dev/null 2>&1; then
            add_test_result "Configuration Validation" "PASS" "Configuration validated successfully"
        else
            add_test_result "Configuration Validation" "FAIL" "Configuration validation failed"
        fi
    else
        add_test_result "Configuration Validation" "FAIL" "No configuration to validate"
    fi
}

# Test 4: Performance Benchmark
test_performance_benchmark() {
    log_info "Testing performance benchmark..."
    
    # Run fast mode benchmark
    if timeout 60s ./adkr_bench -n 4 -f 1 -fast >/dev/null 2>&1; then
        add_test_result "Performance Benchmark" "PASS" "Benchmark completed successfully"
    else
        add_test_result "Performance Benchmark" "FAIL" "Benchmark failed or timed out"
    fi
}

# Test 5: Equivalence Tests
test_equivalence() {
    log_info "Testing equivalence..."
    
    # Run simplified parity tests
    if timeout 90s ./parity_simple -output ./parity_final_results.json >/dev/null 2>&1; then
        if [ -f "./parity_final_results.json" ]; then
            # Check if results contain successful tests
            local success_count=$(jq '.results | map(select(.success == true)) | length' ./parity_final_results.json 2>/dev/null || echo "0")
            local total_count=$(jq '.results | length' ./parity_final_results.json 2>/dev/null || echo "0")
            
            if [ "$total_count" -gt 0 ]; then
                local success_rate=$((success_count * 100 / total_count))
                if [ $success_rate -ge 80 ]; then
                    add_test_result "Equivalence Tests" "PASS" "$success_count/$total_count tests passed ($success_rate%)"
                else
                    add_test_result "Equivalence Tests" "FAIL" "Only $success_count/$total_count tests passed ($success_rate%)"
                fi
            else
                add_test_result "Equivalence Tests" "FAIL" "No test results found"
            fi
        else
            add_test_result "Equivalence Tests" "FAIL" "Results file not created"
        fi
    else
        add_test_result "Equivalence Tests" "FAIL" "Equivalence tests failed or timed out"
    fi
}

# Test 6: Operations Manager
test_operations_manager() {
    log_info "Testing operations manager..."
    
    if [ -f "./scripts/ops_manager.sh" ]; then
        # Test help command
        if ./scripts/ops_manager.sh --help >/dev/null 2>&1; then
            # Test setup command
            if ./scripts/ops_manager.sh setup >/dev/null 2>&1; then
                add_test_result "Operations Manager" "PASS" "Operations manager working correctly"
            else
                add_test_result "Operations Manager" "FAIL" "Setup command failed"
            fi
        else
            add_test_result "Operations Manager" "FAIL" "Help command failed"
        fi
    else
        add_test_result "Operations Manager" "FAIL" "Operations manager script not found"
    fi
}

# Test 7: Documentation
test_documentation() {
    log_info "Testing documentation..."
    
    local doc_files=(
        "README.md"
        "docs/INDEX.md"
        "docs/MODULE_MAPPING.md"
        "docs/RUNNING_GUIDE.md"
        "docs/EQUIVALENCE_METRICS.md"
        "docs/API_REFERENCE.md"
        "docs/OPERATIONS_GUIDE.md"
        "PROJECT_SUMMARY.md"
    )
    
    local missing_docs=0
    for doc in "${doc_files[@]}"; do
        if [ ! -f "$doc" ]; then
            missing_docs=$((missing_docs + 1))
        fi
    done
    
    if [ $missing_docs -eq 0 ]; then
        add_test_result "Documentation" "PASS" "All documentation files present"
    else
        add_test_result "Documentation" "FAIL" "$missing_docs documentation files missing"
    fi
}

# Test 8: Unit Tests
test_unit_tests() {
    log_info "Testing unit tests..."
    
    # Run Go unit tests
    if go test ./pkg/... -v >/dev/null 2>&1; then
        add_test_result "Unit Tests" "PASS" "All unit tests passed"
    else
        add_test_result "Unit Tests" "FAIL" "Some unit tests failed"
    fi
}

# Test 9: Coin Benchmark
test_coin_benchmark() {
    log_info "Testing coin benchmark..."
    
    if timeout 30s ./coin_bench -n 4 -f 1 -round 3 >/dev/null 2>&1; then
        add_test_result "Coin Benchmark" "PASS" "Coin benchmark completed successfully"
    else
        add_test_result "Coin Benchmark" "FAIL" "Coin benchmark failed or timed out"
    fi
}

# Test 10: File Structure
test_file_structure() {
    log_info "Testing file structure..."
    
    local required_dirs=(
        "cmd"
        "pkg"
        "docs"
        "scripts"
        "configs"
    )
    
    local required_files=(
        "go.mod"
        "go.sum"
        "README.md"
        "PROJECT_SUMMARY.md"
    )
    
    local missing_items=0
    
    for dir in "${required_dirs[@]}"; do
        if [ ! -d "$dir" ]; then
            missing_items=$((missing_items + 1))
        fi
    done
    
    for file in "${required_files[@]}"; do
        if [ ! -f "$file" ]; then
            missing_items=$((missing_items + 1))
        fi
    done
    
    if [ $missing_items -eq 0 ]; then
        add_test_result "File Structure" "PASS" "All required files and directories present"
    else
        add_test_result "File Structure" "FAIL" "$missing_items required items missing"
    fi
}

# Generate final report
generate_final_report() {
    log_info "Generating final validation report..."
    
    local success_rate=$((PASSED_TESTS * 100 / TOTAL_TESTS))
    
    echo ""
    echo "============================================================"
    echo "ADKR GO IMPLEMENTATION - FINAL VALIDATION REPORT"
    echo "============================================================"
    echo ""
    echo "Overall Results:"
    echo "  Total Tests: $TOTAL_TESTS"
    echo "  Passed Tests: $PASSED_TESTS"
    echo "  Failed Tests: $((TOTAL_TESTS - PASSED_TESTS))"
    echo "  Success Rate: $success_rate%"
    echo ""
    
    if [ $success_rate -ge 90 ]; then
        log_success "VALIDATION PASSED: $success_rate% success rate"
        echo ""
        echo "🎉 ADKR Go implementation is ready for production use!"
    elif [ $success_rate -ge 80 ]; then
        log_warning "VALIDATION PASSED WITH WARNINGS: $success_rate% success rate"
        echo ""
        echo "⚠️  ADKR Go implementation is functional but has some issues."
    else
        log_error "VALIDATION FAILED: $success_rate% success rate"
        echo ""
        echo "❌ ADKR Go implementation needs attention before production use."
    fi
    
    echo ""
    echo "Detailed Results:"
    echo "=================="
    for result in "${VALIDATION_RESULTS[@]}"; do
        IFS=':' read -r test_name test_result test_details <<< "$result"
        if [ "$test_result" = "PASS" ]; then
            echo "✅ $test_name"
        else
            echo "❌ $test_name: $test_details"
        fi
    done
    
    echo ""
    echo "============================================================"
    
    # Save report to file
    cat > ./final_validation_report.txt << EOF
ADKR Go Implementation - Final Validation Report
Generated: $(date)

Overall Results:
  Total Tests: $TOTAL_TESTS
  Passed Tests: $PASSED_TESTS
  Failed Tests: $((TOTAL_TESTS - PASSED_TESTS))
  Success Rate: $success_rate%

Detailed Results:
$(for result in "${VALIDATION_RESULTS[@]}"; do
    IFS=':' read -r test_name test_result test_details <<< "$result"
    if [ "$test_result" = "PASS" ]; then
        echo "✅ $test_name"
    else
        echo "❌ $test_name: $test_details"
    fi
done)

Validation Status: $([ $success_rate -ge 90 ] && echo "PASSED" || echo "FAILED")
EOF
    
    log_info "Final validation report saved to: ./final_validation_report.txt"
}

# Cleanup function
cleanup() {
    log_info "Cleaning up test files..."
    rm -rf ./test_keys_final
    rm -f ./parity_final_results.json
}

# Main execution
main() {
    log_info "Starting ADKR Go Implementation Final Validation..."
    log_info "This will test all components and generate a comprehensive report."
    echo ""
    
    # Run all tests
    test_build_binaries
    test_bls_key_generation
    test_config_validation
    test_performance_benchmark
    test_equivalence
    test_operations_manager
    test_documentation
    test_unit_tests
    test_coin_benchmark
    test_file_structure
    
    # Generate final report
    generate_final_report
    
    # Cleanup
    cleanup
    
    echo ""
    log_info "Final validation completed!"
}

# Run main function
main "$@"
