/*
 * Automated Correctness Testing Framework
 * Comprehensive testing system for GPU correctness validation
 */

#include <cuda_runtime.h>
#include <stdio.h>
#include <cstdlib.h>
#include <string>
#include <vector>
#include <time>
#include <json/json.h>

// Include our validation components
#include "cpu_reference.h"
#include "validation.h"
#include "memory_layout.h"
#include "edge_cases.h"
#include "debug_utils.h"

// Test configuration structure
struct TestConfig {
    int array_size = 1024 * 1024;  // 1M elements
    int num_sub_tasks = 128;
    int threads_per_block = 256;
    float tolerance = 1e-6f;
    bool verbose = false;
    bool generate_json = true;
    bool run_edge_cases = true;
    bool run_memory_analysis = true;
    std::string output_dir = "./validation_results";
    int num_iterations = 5;
};

// Test result structure
struct TestResult {
    std::string test_name;
    bool passed = false;
    double execution_time = 0.0;
    int total_elements = 0;
    int error_count = 0;
    float max_error = 0.0f;
    std::string error_message;
    Json::Value detailed_results;
    
    void print_summary() const {
        printf("Test: %s - %s (%.2f ms, %d errors)\n",
               test_name.c_str(), passed ? "PASSED" : "FAILED",
               execution_time, error_count);
        if (!error_message.empty()) {
            printf("  Error: %s\n", error_message.c_str());
        }
    }
};

// Comprehensive test suite
class AutomatedTestSuite {
private:
    TestConfig config;
    
    // CUDA memory management
    std::vector<float*> d_a, d_b, d_c;
    std::vector<float*> h_a, h_b, h_c, h_expected;
    
public:
    AutomatedTestSuite(const TestConfig& cfg) : config(cfg) {
        setup_memory();
    }
    
    ~AutomatedTestSuite() {
        cleanup();
    }
    
    void setup_memory() {
        // Allocate host memory
        h_a.resize(config.num_sub_tasks);
        h_b.resize(config.num_sub_tasks);
        h_c.resize(config.num_sub_tasks);
        h_expected.resize(config.num_sub_tasks);
        
        for (int i = 0; i < config.num_sub_tasks; i++) {
            cudaMallocHost(&h_a[i], config.array_size * sizeof(float));
            cudaMallocHost(&h_b[i], config.array_size * sizeof(float));
            cudaMallocHost(&h_c[i], config.array_size * sizeof(float));
            cudaMallocHost(&h_expected[i], config.array_size * sizeof(float));
            
            // Initialize with test data
            initialize_test_data(i);
        }
        
        // Allocate device memory
        d_a.resize(config.num_sub_tasks);
        d_b.resize(config.num_sub_tasks);
        d_c.resize(config.num_sub_tasks);
        
        for (int i = 0; i < config.num_sub_tasks; i++) {
            cudaMalloc(&d_a[i], config.array_size * sizeof(float));
            cudaMalloc(&d_b[i], config.array_size * sizeof(float));
            cudaMalloc(&d_c[i], config.array_size * sizeof(float));
            
            // Copy data to device
            cudaMemcpy(d_a[i], h_a[i], config.array_size * sizeof(float), cudaMemcpyHostToDevice);
            cudaMemcpy(d_b[i], h_b[i], config.array_size * sizeof(float), cudaMemcpyHostToDevice);
        }
    }
    
    void initialize_test_data(int task_id) {
        // Use different patterns for each task
        CPUReferenceCalculator calculator(config.array_size, config.num_sub_tasks);
        
        switch (task_id % 6) {
            case 0: // Linear pattern
                calculator.generateLinearPattern(h_a[task_id], config.array_size);
                calculator.generateLinearPattern(h_b[task_id], config.array_size, 1.0f, 1.0f);
                break;
            case 1: // Interleaved pattern
                calculator.generateInterleavedPattern(h_a[task_id], config.array_size, 2);
                calculator.generateInterleavedPattern(h_b[task_id], config.array_size, 3);
                break;
            case 2: // Blocked pattern
                calculator.generateBlockedPattern(h_a[task_id], config.array_size, 1024);
                calculator.generateBlockedPattern(h_b[task_id], config.array_size, 1024, 1);
                break;
            case 3: // Zero arrays
                calculator.generateZeroArrays(h_a[task_id], h_b[task_id], config.array_size);
                break;
            case 4: // Large values
                for (int i = 0; i < config.array_size; i++) {
                    h_a[task_id][i] = 1e6f;
                    h_b[task_id][i] = 2e6f;
                }
                break;
            case 5: // Small values
                for (int i = 0; i < config.array_size; i++) {
                    h_a[task_id][i] = 1e-6f;
                    h_b[task_id][i] = 2e-6f;
                }
                break;
        }
        
        // Calculate expected results
        calculator.vectorAddCPU(h_a[task_id], h_b[task_id], h_expected[task_id], config.array_size);
    }
    
    // Run sequential execution test
    TestResult test_sequential_execution() {
        TestResult result;
        result.test_name = "Sequential Execution";
        
        auto start = std::chrono::high_resolution_clock::now();
        
        // Use CPU reference for validation
        CPUReferenceCalculator calculator(config.array_size, config.num_sub_tasks);
        
        for (int iter = 0; iter < config.num_iterations; iter++) {
            calculator.sequentialExecution(
                const_cast<const float**>(h_a.data()),
                const_cast<const float**>(h_b.data()),
                h_c.data(),
                config.array_size,
                config.num_sub_tasks
            );
        }
        
        auto end = std::chrono::high_resolution_clock::now();
        result.execution_time = std::chrono::duration<double, std::milli>(end - start).count();
        
        // Validate results
        FloatArrayValidator validator;
        bool all_valid = true;
        int total_errors = 0;
        
        for (int i = 0; i < config.num_sub_tasks; i++) {
            ValidationResult v_result = validator.validate(
                h_c[i], h_expected[i], config.array_size
            );
            
            if (!v_result.passed) {
                all_valid = false;
                total_errors += v_result.error_count;
            }
        }
        
        result.passed = all_valid;
        result.error_count = total_errors;
        result.total_elements = config.array_size * config.num_sub_tasks;
        
        return result;
    }
    
    // Run edge case tests
    TestResult test_edge_cases() {
        TestResult result;
        result.test_name = "Edge Case Validation";
        
        EdgeCaseConfig edge_config;
        EdgeCaseGenerator generator(edge_config);
        
        auto start = std::chrono::high_resolution_clock::now();
        
        // Test various edge cases
        std::vector<EdgeCaseType> test_cases = {
            EDGE_ZERO, EDGE_NAN, EDGE_INFINITY, EDGE_DENORMAL, EDGE_LARGE, EDGE_SMALL
        };
        
        bool all_passed = true;
        int total_errors = 0;
        
        for (auto test_case : test_cases) {
            // Generate test data for this edge case
            for (int task = 0; task < config.num_sub_tasks; task++) {
                generator.generate_edge_case_data(
                    h_a[task], h_b[task], config.array_size, test_case
                );
                
                // Calculate expected results
                CPUReferenceCalculator calculator(config.array_size, config.num_sub_tasks);
                calculator.vectorAddCPU(h_a[task], h_b[task], h_expected[task], config.array_size);
            }
            
            // Validate
            FloatArrayValidator validator;
            for (int task = 0; task < config.num_sub_tasks; task++) {
                EdgeCaseResult e_result = generator.validate_edge_cases(
                    h_c[task], h_expected[task], config.array_size
                );
                
                if (!e_result.passed) {
                    all_passed = false;
                    total_errors += e_result.error_count;
                }
            }
        }
        
        auto end = std::chrono::high_resolution_clock::now();
        result.execution_time = std::chrono::duration<double, std::milli>(end - start).count();
        
        result.passed = all_passed;
        result.error_count = total_errors;
        result.total_elements = config.array_size * config.num_sub_tasks * test_cases.size();
        
        return result;
    }
    
    // Run memory layout tests
    TestResult test_memory_layout() {
        TestResult result;
        result.test_name = "Memory Layout Validation";
        
        auto start = std::chrono::high_resolution_clock::now();
        
        std::vector<MemoryPattern> test_patterns = {
            LINEAR, INTERLEAVED, BLOCKED, STRIDED
        };
        
        bool all_valid = true;
        int total_errors = 0;
        
        for (auto pattern : test_patterns) {
            MemoryLayout layout(pattern, 2, 1024, 0, config.array_size);
            MemoryLayoutAnalyzer analyzer(layout);
            
            for (int task = 0; task < config.num_sub_tasks; task++) {
                // Generate test data with specific pattern
                analyzer.generate_pattern(h_a[task], config.array_size, pattern, 2, 1024, task);
                analyzer.generate_pattern(h_b[task], config.array_size, pattern, 3, 1024, task);
                
                // Calculate expected results
                CPUReferenceCalculator calculator(config.array_size, config.num_sub_tasks);
                calculator.vectorAddCPU(h_a[task], h_b[task], h_expected[task], config.array_size);
                
                // Validate memory layout
                bool valid = analyzer.validate_layout(
                    h_c[task], h_expected[task], config.array_size, layout
                );
                
                if (!valid) {
                    all_valid = false;
                    total_errors++;
                }
            }
        }
        
        auto end = std::chrono::high_resolution_clock::now();
        result.execution_time = std::chrono::duration<double, std::milli>(end - start).count();
        
        result.passed = all_valid;
        result.error_count = total_errors;
        result.total_elements = config.array_size * config.num_sub_tasks * test_patterns.size();
        
        return result;
    }
    
    // Run comprehensive validation test
    TestResult test_comprehensive_validation() {
        TestResult result;
        result.test_name = "Comprehensive Validation";
        
        auto start = std::chrono::high_resolution_clock::now();
        
        // Run all test components
        std::vector<TestResult> results;
        
        results.push_back(test_sequential_execution());
        
        if (config.run_edge_cases) {
            results.push_back(test_edge_cases());
        }
        
        if (config.run_memory_analysis) {
            results.push_back(test_memory_layout());
        }
        
        auto end = std::chrono::high_resolution_clock::now();
        result.execution_time = std::chrono::duration<double, std::milli>(end - start).count();
        
        // Aggregate results
        bool all_passed = true;
        int total_errors = 0;
        
        for (const auto& r : results) {
            if (!r.passed) {
                all_passed = false;
            }
            total_errors += r.error_count;
        }
        
        result.passed = all_passed;
        result.error_count = total_errors;
        
        // Store detailed results
        Json::Value test_array(Json::arrayValue);
        for (const auto& r : results) {
            Json::Value test_result;
            test_result["name"] = r.test_name;
            test_result["passed"] = r.passed;
            test_result["execution_time"] = r.execution_time;
            test_result["error_count"] = r.error_count;
            test_array.append(test_result);
        }
        
        result.detailed_results["tests"] = test_array;
        result.detailed_results["total_tests"] = (int)results.size();
        result.detailed_results["total_errors"] = total_errors;
        
        return result;
    }
    
    // Generate comprehensive report
    void generate_report(const TestResult& result) {
        if (!config.generate_json) return;
        
        // Create output directory if it doesn't exist
        mkdir(config.output_dir.c_str(), 0755);
        
        // Generate JSON report
        std::string filename = config.output_dir + "/validation_report_" + 
                              std::to_string(std::time(nullptr)) + ".json";
        
        Json::Value report;
        report["timestamp"] = std::time(nullptr);
        report["array_size"] = config.array_size;
        report["num_sub_tasks"] = config.num_sub_tasks;
        report["tolerance"] = config.tolerance;
        report["overall_result"] = result.passed ? "PASSED" : "FAILED";
        report["total_execution_time"] = result.execution_time;
        report["total_errors"] = result.error_count;
        report["detailed_results"] = result.detailed_results;
        
        // Write to file
        std::ofstream file(filename);
        Json::StreamWriterBuilder builder;
        std::unique_ptr<Json::StreamWriter> writer(builder.newStreamWriter());
        writer->write(report, &file);
        file.close();
        
        printf("Report generated: %s\n", filename.c_str());
    }
    
    // Run all tests
    TestResult run_all_tests() {
        printf("Starting Comprehensive GPU Correctness Validation\n");
        printf("=============================================\n");
        printf("Array size: %d\n", config.array_size);
        printf("Sub-tasks: %d\n", config.num_sub_tasks);
        printf("Tolerance: %.2e\n", config.tolerance);
        printf("Iterations: %d\n", config.num_iterations);
        printf("\n");
        
        TestResult result = test_comprehensive_validation();
        
        printf("\n");
        result.print_summary();
        
        generate_report(result);
        
        return result;
    }
    
private:
    void cleanup() {
        // Free host memory
        for (int i = 0; i < h_a.size(); i++) {
            if (h_a[i]) cudaFreeHost(h_a[i]);
            if (h_b[i]) cudaFreeHost(h_b[i]);
            if (h_c[i]) cudaFreeHost(h_c[i]);
            if (h_expected[i]) cudaFreeHost(h_expected[i]);
        }
        
        // Free device memory
        for (int i = 0; i < d_a.size(); i++) {
            if (d_a[i]) cudaFree(d_a[i]);
            if (d_b[i]) cudaFree(d_b[i]);
            if (d_c[i]) cudaFree(d_c[i]);
        }
        
        h_a.clear();
        h_b.clear();
        h_c.clear();
        h_expected.clear();
        d_a.clear();
        d_b.clear();
        d_c.clear();
    }
};

// Command line interface
int main(int argc, char* argv[]) {
    TestConfig config;
    
    // Parse command line arguments
    for (int i = 1; i < argc; i++) {
        std::string arg = argv[i];
        
        if (arg == "--array-size" && i + 1 < argc) {
            config.array_size = std::atoi(argv[++i]);
        } else if (arg == "--sub-tasks" && i + 1 < argc) {
            config.num_sub_tasks = std::atoi(argv[++i]);
        } else if (arg == "--tolerance" && i + 1 < argc) {
            config.tolerance = std::atof(argv[++i]);
        } else if (arg == "--iterations" && i + 1 < argc) {
            config.num_iterations = std::atoi(argv[++i]);
        } else if (arg == "--verbose") {
            config.verbose = true;
        } else if (arg == "--no-json") {
            config.generate_json = false;
        } else if (arg == "--skip-edge-cases") {
            config.run_edge_cases = false;
        } else if (arg == "--skip-memory") {
            config.run_memory_analysis = false;
        } else if (arg == "--output-dir" && i + 1 < argc) {
            config.output_dir = argv[++i];
        } else if (arg == "--help") {
            print_usage(argv[0]);
            return 0;
        }
    }
    
    try {
        AutomatedTestSuite suite(config);
        TestResult result = suite.run_all_tests();
        
        return result.passed ? 0 : 1;
    } catch (const std::exception& e) {
        printf("Error: %s\n", e.what());
        return 1;
    }
}

void print_usage(const char* prog_name) {
    printf("Usage: %s [options]\n", prog_name);
    printf("Options:\n");
    printf("  --array-size <size>     Array size per sub-task (default: 1048576)\n");
    printf("  --sub-tasks <count>    Number of sub-tasks (default: 128)\n");
    printf("  --tolerance <value>   Error tolerance (default: 1e-6)\n");
    printf("  --iterations <count>  Number of iterations (default: 5)\n");
    printf("  --verbose            Enable verbose output\n");
    printf("  --no-json            Disable JSON report generation\n");
    printf("  --skip-edge-cases    Skip edge case testing\n");
    printf("  --skip-memory        Skip memory layout testing\n");
    printf("  --output-dir <dir>    Output directory for reports (default: ./validation_results)\n");
    printf("  --help               Show this help\n");
}

// C-style interface functions
extern "C" {
    AutomatedTestSuite* create_test_suite(int array_size, int num_sub_tasks, 
                                         float tolerance, int iterations) {
        TestConfig config;
        config.array_size = array_size;
        config.num_sub_tasks = num_sub_tasks;
        config.tolerance = tolerance;
        config.num_iterations = iterations;
        return new AutomatedTestSuite(config);
    }
    
    void destroy_test_suite(AutomatedTestSuite* suite) {
        delete suite;
    }
    
    int run_validation_tests(int array_size, int num_sub_tasks, float tolerance) {
        try {
            TestConfig config;
            config.array_size = array_size;
            config.num_sub_tasks = num_sub_tasks;
            config.tolerance = tolerance;
            config.verbose = true;
            
            AutomatedTestSuite suite(config);
            TestResult result = suite.run_all_tests();
            
            return result.passed ? 0 : 1;
        } catch (const std::exception& e) {
            printf("Validation failed: %s\n", e.what());
            return 1;
        }
    }
}