/*
 * DCU Parallel Computing Performance Evaluation
 * Comprehensive test implementation comparing multi-stream vs single-task masquerading
 * 
 * This program implements all test modes:
 * 1. Sequential Baseline (no streams)
 * 2. Multi-Stream Execution (Approach A) 
 * 3. Overlapped Streams (optimized multi-stream)
 * 4. Single-Task Masquerading (Approach B)
 */

#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <chrono>
#include <cmath>
#include <cstdlib>
#include <string>
#include <memory>
#include <iomanip>

// Error checking macro
#define CHECK_HIP(call) \
    do { \
        hipError_t error = call; \
        if (error != hipSuccess) { \
            std::cerr << "HIP error at " << __FILE__ << ":" << __LINE__ \
                      << " - " << hipGetErrorString(error) << std::endl; \
            exit(1); \
        } \
    } while(0)

// Configuration structure
struct TestConfig {
    int arraySize = 1048576;     // Default 1M elements per array
    int numSubTasks = 8;         // Number of parallel sub-tasks
    int threadsPerBlock = 256;   // GPU threads per block
    int numRuns = 5;             // Number of runs for averaging
    std::string testMode = "all"; // Test mode selection
    bool verbose = false;        // Detailed output
};

// Performance metrics structure
struct PerformanceMetrics {
    double avgTime = 0.0;
    double minTime = 1e9;
    double maxTime = 0.0;
    double stdDev = 0.0;
    std::vector<double> times;
    
    void addTime(double time) {
        times.push_back(time);
        avgTime = (avgTime * (times.size() - 1) + time) / times.size();
        minTime = std::min(minTime, time);
        maxTime = std::max(maxTime, time);
    }
    
    void calculateStats() {
        if (times.size() < 2) return;
        
        double sum = 0.0;
        for (double time : times) {
            sum += (time - avgTime) * (time - avgTime);
        }
        stdDev = std::sqrt(sum / (times.size() - 1));
    }
};

// Base kernel for vector addition with computational intensity
__global__ void vectorAddKernel(float* a, float* b, float* c, int n) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < n) {
        // Computationally intensive operation to stress GPU
        float result = 0.0f;
        for (int i = 0; i < 100; i++) {
            result += a[idx] * b[idx] + sinf(a[idx] * 0.01f) * cosf(b[idx] * 0.01f);
        }
        c[idx] = result;
    }
}

// Single-task masquerading kernel (Approach B)
__global__ void masqueradingKernel(float** a_arrays, float** b_arrays, float** c_arrays, 
                                  int arraySize, int numSubTasks) {
    int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
    int totalElements = arraySize * numSubTasks;
    
    if (globalIdx < totalElements) {
        // Determine sub-task assignment (introduces branching overhead)
        int subTaskId = globalIdx / arraySize;
        int localIdx = globalIdx % arraySize;
        
        // Conditional branching for task assignment
        if (subTaskId < numSubTasks && localIdx < arraySize) {
            float* a = a_arrays[subTaskId];
            float* b = b_arrays[subTaskId];
            float* c = c_arrays[subTaskId];
            
            // Same computational intensity as base kernel
            float result = 0.0f;
            for (int i = 0; i < 100; i++) {
                result += a[localIdx] * b[localIdx] + sinf(a[localIdx] * 0.01f) * cosf(b[localIdx] * 0.01f);
            }
            c[localIdx] = result;
        }
    }
}

class DCUPerformanceTest {
private:
    TestConfig config;
    std::vector<float*> h_a, h_b, h_c;  // Host arrays
    std::vector<float*> d_a, d_b, d_c;  // Device arrays
    std::vector<hipStream_t> streams;   // HIP streams
    
    // Device array pointers for masquerading kernel
    float** d_a_ptrs = nullptr;
    float** d_b_ptrs = nullptr; 
    float** d_c_ptrs = nullptr;
    
public:
    DCUPerformanceTest(const TestConfig& cfg) : config(cfg) {
        setupMemory();
    }
    
    ~DCUPerformanceTest() {
        cleanup();
    }
    
    void setupMemory() {
        // Allocate host pinned memory for optimal transfer performance
        h_a.resize(config.numSubTasks);
        h_b.resize(config.numSubTasks);
        h_c.resize(config.numSubTasks);
        
        for (int i = 0; i < config.numSubTasks; i++) {
            CHECK_HIP(hipHostMalloc(&h_a[i], config.arraySize * sizeof(float)));
            CHECK_HIP(hipHostMalloc(&h_b[i], config.arraySize * sizeof(float)));
            CHECK_HIP(hipHostMalloc(&h_c[i], config.arraySize * sizeof(float)));
            
            // Initialize with random data
            initializeArray(h_a[i], config.arraySize);
            initializeArray(h_b[i], config.arraySize);
        }
        
        // Allocate device memory
        d_a.resize(config.numSubTasks);
        d_b.resize(config.numSubTasks);
        d_c.resize(config.numSubTasks);
        
        for (int i = 0; i < config.numSubTasks; i++) {
            CHECK_HIP(hipMalloc(&d_a[i], config.arraySize * sizeof(float)));
            CHECK_HIP(hipMalloc(&d_b[i], config.arraySize * sizeof(float)));
            CHECK_HIP(hipMalloc(&d_c[i], config.arraySize * sizeof(float)));
        }
        
        // Setup device pointer arrays for masquerading kernel
        CHECK_HIP(hipMalloc(&d_a_ptrs, config.numSubTasks * sizeof(float*)));
        CHECK_HIP(hipMalloc(&d_b_ptrs, config.numSubTasks * sizeof(float*)));
        CHECK_HIP(hipMalloc(&d_c_ptrs, config.numSubTasks * sizeof(float*)));
        
        CHECK_HIP(hipMemcpy(d_a_ptrs, d_a.data(), config.numSubTasks * sizeof(float*), hipMemcpyHostToDevice));
        CHECK_HIP(hipMemcpy(d_b_ptrs, d_b.data(), config.numSubTasks * sizeof(float*), hipMemcpyHostToDevice));
        CHECK_HIP(hipMemcpy(d_c_ptrs, d_c.data(), config.numSubTasks * sizeof(float*), hipMemcpyHostToDevice));
        
        // Create streams
        streams.resize(config.numSubTasks);
        for (int i = 0; i < config.numSubTasks; i++) {
            CHECK_HIP(hipStreamCreate(&streams[i]));
        }
        
        if (config.verbose) {
            std::cout << "Memory setup complete:" << std::endl;
            std::cout << "  Sub-tasks: " << config.numSubTasks << std::endl;
            std::cout << "  Array size: " << config.arraySize << " elements" << std::endl;
            std::cout << "  Total memory: " << (config.numSubTasks * config.arraySize * 3 * sizeof(float) / 1024 / 1024) << " MB" << std::endl;
        }
    }
    
    void initializeArray(float* array, int size) {
        for (int i = 0; i < size; i++) {
            array[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
        }
    }
    
    // Test Mode 1: Sequential Baseline (no streams)
    PerformanceMetrics testSequentialBaseline() {
        PerformanceMetrics metrics;
        
        const int gridSize = (config.arraySize + config.threadsPerBlock - 1) / config.threadsPerBlock;
        
        for (int run = 0; run < config.numRuns; run++) {
            auto start = std::chrono::high_resolution_clock::now();
            
            // Sequential execution: process each sub-task one by one
            for (int i = 0; i < config.numSubTasks; i++) {
                // Copy data to device
                CHECK_HIP(hipMemcpy(d_a[i], h_a[i], config.arraySize * sizeof(float), hipMemcpyHostToDevice));
                CHECK_HIP(hipMemcpy(d_b[i], h_b[i], config.arraySize * sizeof(float), hipMemcpyHostToDevice));
                
                // Launch kernel
                hipLaunchKernelGGL(vectorAddKernel, dim3(gridSize), dim3(config.threadsPerBlock), 
                                 0, 0, d_a[i], d_b[i], d_c[i], config.arraySize);
                CHECK_HIP(hipGetLastError());
                
                // Copy result back
                CHECK_HIP(hipMemcpy(h_c[i], d_c[i], config.arraySize * sizeof(float), hipMemcpyDeviceToHost));
            }
            
            CHECK_HIP(hipDeviceSynchronize());
            
            auto end = std::chrono::high_resolution_clock::now();
            double duration = std::chrono::duration<double, std::milli>(end - start).count();
            metrics.addTime(duration);
            
            if (config.verbose) {
                std::cout << "  Sequential run " << (run + 1) << ": " << duration << " ms" << std::endl;
            }
        }
        
        metrics.calculateStats();
        return metrics;
    }
    
    // Test Mode 2: Multi-Stream Execution (Approach A)
    PerformanceMetrics testMultiStream() {
        PerformanceMetrics metrics;
        
        const int gridSize = (config.arraySize + config.threadsPerBlock - 1) / config.threadsPerBlock;
        
        for (int run = 0; run < config.numRuns; run++) {
            auto start = std::chrono::high_resolution_clock::now();
            
            // Launch all sub-tasks on separate streams
            for (int i = 0; i < config.numSubTasks; i++) {
                CHECK_HIP(hipMemcpyAsync(d_a[i], h_a[i], config.arraySize * sizeof(float), 
                                       hipMemcpyHostToDevice, streams[i]));
                CHECK_HIP(hipMemcpyAsync(d_b[i], h_b[i], config.arraySize * sizeof(float), 
                                       hipMemcpyHostToDevice, streams[i]));
                
                hipLaunchKernelGGL(vectorAddKernel, dim3(gridSize), dim3(config.threadsPerBlock), 
                                 0, streams[i], d_a[i], d_b[i], d_c[i], config.arraySize);
                CHECK_HIP(hipGetLastError());
                
                CHECK_HIP(hipMemcpyAsync(h_c[i], d_c[i], config.arraySize * sizeof(float), 
                                       hipMemcpyDeviceToHost, streams[i]));
            }
            
            // Wait for all streams to complete
            for (int i = 0; i < config.numSubTasks; i++) {
                CHECK_HIP(hipStreamSynchronize(streams[i]));
            }
            
            auto end = std::chrono::high_resolution_clock::now();
            double duration = std::chrono::duration<double, std::milli>(end - start).count();
            metrics.addTime(duration);
            
            if (config.verbose) {
                std::cout << "  Multi-stream run " << (run + 1) << ": " << duration << " ms" << std::endl;
            }
        }
        
        metrics.calculateStats();
        return metrics;
    }
    
    // Test Mode 3: Overlapped Streams (optimized multi-stream)
    PerformanceMetrics testOverlappedStreams() {
        PerformanceMetrics metrics;
        
        const int gridSize = (config.arraySize + config.threadsPerBlock - 1) / config.threadsPerBlock;
        const int numActiveStreams = std::min(4, config.numSubTasks); // Limit concurrent streams
        
        for (int run = 0; run < config.numRuns; run++) {
            auto start = std::chrono::high_resolution_clock::now();
            
            // Overlapped execution with stream cycling
            for (int i = 0; i < config.numSubTasks; i++) {
                int streamIdx = i % numActiveStreams;
                
                CHECK_HIP(hipMemcpyAsync(d_a[i], h_a[i], config.arraySize * sizeof(float), 
                                       hipMemcpyHostToDevice, streams[streamIdx]));
                CHECK_HIP(hipMemcpyAsync(d_b[i], h_b[i], config.arraySize * sizeof(float), 
                                       hipMemcpyHostToDevice, streams[streamIdx]));
                
                hipLaunchKernelGGL(vectorAddKernel, dim3(gridSize), dim3(config.threadsPerBlock), 
                                 0, streams[streamIdx], d_a[i], d_b[i], d_c[i], config.arraySize);
                CHECK_HIP(hipGetLastError());
                
                CHECK_HIP(hipMemcpyAsync(h_c[i], d_c[i], config.arraySize * sizeof(float), 
                                       hipMemcpyDeviceToHost, streams[streamIdx]));
            }
            
            // Wait for all active streams
            for (int i = 0; i < numActiveStreams; i++) {
                CHECK_HIP(hipStreamSynchronize(streams[i]));
            }
            
            auto end = std::chrono::high_resolution_clock::now();
            double duration = std::chrono::duration<double, std::milli>(end - start).count();
            metrics.addTime(duration);
            
            if (config.verbose) {
                std::cout << "  Overlapped stream run " << (run + 1) << ": " << duration << " ms" << std::endl;
            }
        }
        
        metrics.calculateStats();
        return metrics;
    }
    
    // Test Mode 4: Single-Task Masquerading (Approach B)
    PerformanceMetrics testSingleTaskMasquerading() {
        PerformanceMetrics metrics;
        
        const int totalElements = config.arraySize * config.numSubTasks;
        const int gridSize = (totalElements + config.threadsPerBlock - 1) / config.threadsPerBlock;
        
        for (int run = 0; run < config.numRuns; run++) {
            // Copy all data to device first
            for (int i = 0; i < config.numSubTasks; i++) {
                CHECK_HIP(hipMemcpy(d_a[i], h_a[i], config.arraySize * sizeof(float), hipMemcpyHostToDevice));
                CHECK_HIP(hipMemcpy(d_b[i], h_b[i], config.arraySize * sizeof(float), hipMemcpyHostToDevice));
            }
            
            auto start = std::chrono::high_resolution_clock::now();
            
            // Single kernel launch with masquerading logic
            hipLaunchKernelGGL(masqueradingKernel, dim3(gridSize), dim3(config.threadsPerBlock), 
                             0, 0, d_a_ptrs, d_b_ptrs, d_c_ptrs, config.arraySize, config.numSubTasks);
            CHECK_HIP(hipGetLastError());
            CHECK_HIP(hipDeviceSynchronize());
            
            auto end = std::chrono::high_resolution_clock::now();
            
            // Copy results back
            for (int i = 0; i < config.numSubTasks; i++) {
                CHECK_HIP(hipMemcpy(h_c[i], d_c[i], config.arraySize * sizeof(float), hipMemcpyDeviceToHost));
            }
            
            double duration = std::chrono::duration<double, std::milli>(end - start).count();
            metrics.addTime(duration);
            
            if (config.verbose) {
                std::cout << "  Masquerading run " << (run + 1) << ": " << duration << " ms" << std::endl;
            }
        }
        
        metrics.calculateStats();
        return metrics;
    }
    
    void runAllTests() {
        printDeviceInfo();
        
        std::cout << "\nDCU Performance Evaluation" << std::endl;
        std::cout << "==========================" << std::endl;
        std::cout << "Array size: " << config.arraySize << " elements" << std::endl;
        std::cout << "Sub-tasks: " << config.numSubTasks << std::endl;
        std::cout << "Threads per block: " << config.threadsPerBlock << std::endl;
        std::cout << "Test runs: " << config.numRuns << std::endl;
        std::cout << "Test mode: " << config.testMode << std::endl << std::endl;
        
        PerformanceMetrics sequentialMetrics, multiStreamMetrics, overlappedMetrics, masqueradingMetrics;
        
        // Run tests based on selected mode
        if (config.testMode == "all" || config.testMode == "sequential") {
            std::cout << "Running Sequential Baseline Test..." << std::endl;
            sequentialMetrics = testSequentialBaseline();
        }
        
        if (config.testMode == "all" || config.testMode == "multistream") {
            std::cout << "Running Multi-Stream Test (Approach A)..." << std::endl;
            multiStreamMetrics = testMultiStream();
        }
        
        if (config.testMode == "all" || config.testMode == "overlapped") {
            std::cout << "Running Overlapped Streams Test..." << std::endl;
            overlappedMetrics = testOverlappedStreams();
        }
        
        if (config.testMode == "all" || config.testMode == "masquerading") {
            std::cout << "Running Single-Task Masquerading Test (Approach B)..." << std::endl;
            masqueradingMetrics = testSingleTaskMasquerading();
        }
        
        // Display results
        displayResults(sequentialMetrics, multiStreamMetrics, overlappedMetrics, masqueradingMetrics);
    }
    
    void displayResults(const PerformanceMetrics& sequential, const PerformanceMetrics& multiStream,
                       const PerformanceMetrics& overlapped, const PerformanceMetrics& masquerading) {
        std::cout << "\nPerformance Results" << std::endl;
        std::cout << "==================" << std::endl;
        std::cout << std::fixed << std::setprecision(2);
        
        if (sequential.times.size() > 0) {
            std::cout << "Sequential Baseline:     " << sequential.avgTime << " ± " << sequential.stdDev << " ms" << std::endl;
        }
        if (multiStream.times.size() > 0) {
            std::cout << "Multi-Stream (A):        " << multiStream.avgTime << " ± " << multiStream.stdDev << " ms";
            if (sequential.times.size() > 0) {
                double speedup = sequential.avgTime / multiStream.avgTime;
                std::cout << " (speedup: " << speedup << "x)";
            }
            std::cout << std::endl;
        }
        if (overlapped.times.size() > 0) {
            std::cout << "Overlapped Streams:      " << overlapped.avgTime << " ± " << overlapped.stdDev << " ms";
            if (sequential.times.size() > 0) {
                double speedup = sequential.avgTime / overlapped.avgTime;
                std::cout << " (speedup: " << speedup << "x)";
            }
            std::cout << std::endl;
        }
        if (masquerading.times.size() > 0) {
            std::cout << "Single-Task Masq. (B):   " << masquerading.avgTime << " ± " << masquerading.stdDev << " ms";
            if (sequential.times.size() > 0) {
                double speedup = sequential.avgTime / masquerading.avgTime;
                std::cout << " (speedup: " << speedup << "x)";
            }
            std::cout << std::endl;
        }
        
        // Performance comparison
        if (multiStream.times.size() > 0 && masquerading.times.size() > 0) {
            double advantage = ((masquerading.avgTime - multiStream.avgTime) / masquerading.avgTime) * 100;
            std::cout << "\nApproach A vs B: Multi-stream is " << advantage << "% faster than masquerading" << std::endl;
        }
        
        std::cout << "\nConclusion: ";
        if (multiStream.avgTime < masquerading.avgTime) {
            std::cout << "Multi-Stream Execution (Approach A) outperforms Single-Task Masquerading (Approach B)" << std::endl;
        } else {
            std::cout << "Single-Task Masquerading (Approach B) outperforms Multi-Stream Execution (Approach A)" << std::endl;
        }
    }
    
    void printDeviceInfo() {
        hipDeviceProp_t prop;
        CHECK_HIP(hipGetDeviceProperties(&prop, 0));
        
        std::cout << "Device Information" << std::endl;
        std::cout << "==================" << std::endl;
        std::cout << "Device: " << prop.name << std::endl;
        std::cout << "Compute Capability: " << prop.major << "." << prop.minor << std::endl;
        std::cout << "Multiprocessors: " << prop.multiProcessorCount << std::endl;
        std::cout << "Max Threads per Block: " << prop.maxThreadsPerBlock << std::endl;
        std::cout << "Global Memory: " << (prop.totalGlobalMem / (1024 * 1024)) << " MB" << std::endl;
        std::cout << "Memory Clock Rate: " << prop.memoryClockRate << " kHz" << std::endl;
        std::cout << "Memory Bus Width: " << prop.memoryBusWidth << " bits" << std::endl;
        std::cout << "Peak Memory Bandwidth: " << (2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6) << " GB/s" << std::endl;
    }
    
    void cleanup() {
        // Free host memory
        for (int i = 0; i < h_a.size(); i++) {
            if (h_a[i]) hipHostFree(h_a[i]);
            if (h_b[i]) hipHostFree(h_b[i]);
            if (h_c[i]) hipHostFree(h_c[i]);
        }
        
        // Free device memory
        for (int i = 0; i < d_a.size(); i++) {
            if (d_a[i]) hipFree(d_a[i]);
            if (d_b[i]) hipFree(d_b[i]);
            if (d_c[i]) hipFree(d_c[i]);
        }
        
        if (d_a_ptrs) hipFree(d_a_ptrs);
        if (d_b_ptrs) hipFree(d_b_ptrs);
        if (d_c_ptrs) hipFree(d_c_ptrs);
        
        // Destroy streams
        for (auto& stream : streams) {
            hipStreamDestroy(stream);
        }
    }
};

void printUsage(const char* progName) {
    std::cout << "Usage: " << progName << " [options]" << std::endl;
    std::cout << "Options:" << std::endl;
    std::cout << "  --array-size <size>      Array size per sub-task (default: 1048576)" << std::endl;
    std::cout << "  --sub-tasks <count>      Number of parallel sub-tasks (default: 8)" << std::endl;
    std::cout << "  --threads <count>        Threads per block (default: 256)" << std::endl;
    std::cout << "  --runs <count>           Number of test runs (default: 5)" << std::endl;
    std::cout << "  --mode <mode>            Test mode: all, sequential, multistream, overlapped, masquerading (default: all)" << std::endl;
    std::cout << "  --verbose                Enable verbose output" << std::endl;
    std::cout << "  --help                   Show this help" << std::endl;
}

int main(int argc, char* argv[]) {
    TestConfig config;
    
    // Parse command line arguments
    for (int i = 1; i < argc; i++) {
        std::string arg = argv[i];
        
        if (arg == "--help") {
            printUsage(argv[0]);
            return 0;
        } else if (arg == "--array-size" && i + 1 < argc) {
            config.arraySize = std::atoi(argv[++i]);
        } else if (arg == "--sub-tasks" && i + 1 < argc) {
            config.numSubTasks = std::atoi(argv[++i]);
        } else if (arg == "--threads" && i + 1 < argc) {
            config.threadsPerBlock = std::atoi(argv[++i]);
        } else if (arg == "--runs" && i + 1 < argc) {
            config.numRuns = std::atoi(argv[++i]);
        } else if (arg == "--mode" && i + 1 < argc) {
            config.testMode = argv[++i];
        } else if (arg == "--verbose") {
            config.verbose = true;
        } else {
            std::cerr << "Unknown argument: " << arg << std::endl;
            printUsage(argv[0]);
            return 1;
        }
    }
    
    // Validate configuration
    if (config.arraySize <= 0 || config.numSubTasks <= 0 || config.threadsPerBlock <= 0 || config.numRuns <= 0) {
        std::cerr << "Error: All numeric parameters must be positive" << std::endl;
        return 1;
    }
    
    if (config.testMode != "all" && config.testMode != "sequential" && 
        config.testMode != "multistream" && config.testMode != "overlapped" && 
        config.testMode != "masquerading") {
        std::cerr << "Error: Invalid test mode. Use: all, sequential, multistream, overlapped, masquerading" << std::endl;
        return 1;
    }
    
    try {
        DCUPerformanceTest test(config);
        test.runAllTests();
    } catch (const std::exception& e) {
        std::cerr << "Error: " << e.what() << std::endl;
        return 1;
    }
    
    return 0;
}