/*
 * CUDA Validation Kernels for GPU Correctness Verification Framework
 * Implements validation kernels for all execution modes: sequential, masquerading, and sub-task
 * Includes correctness checking and memory layout analysis
 */

#include <cuda_runtime.h>
#include <math.h>
#include <stdio.h>

// Validation kernel for sequential execution mode
__global__ void sequentialValidationKernel(
    const float* __restrict__ a,
    const float* __restrict__ b,
    float* __restrict__ c,
    int n,
    int* __restrict__ debug_info,
    float* __restrict__ debug_values,
    int debug_offset
) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (idx < n) {
        // Simple vector addition for validation
        float result = a[idx] + b[idx];
        c[idx] = result;
        
        // Debug information
        if (debug_info && idx < debug_offset) {
            debug_info[idx * 3] = blockIdx.x;
            debug_info[idx * 3 + 1] = threadIdx.x;
            debug_info[idx * 3 + 2] = idx;
            
            if (debug_values && idx < debug_offset) {
                debug_values[idx * 4] = a[idx];
                debug_values[idx * 4 + 1] = b[idx];
                debug_values[idx * 4 + 2] = result;
                debug_values[idx * 4 + 3] = a[idx] + b[idx]; // Expected
            }
        }
    }
}

// Validation kernel for masquerading execution mode
__global__ void masqueradingValidationKernel(
    const float** __restrict__ a_arrays,
    const float** __restrict__ b_arrays,
    float** __restrict__ c_arrays,
    int arraySize,
    int numSubTasks,
    int* __restrict__ debug_info,
    float* __restrict__ debug_values,
    int debug_offset
) {
    int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
    int totalElements = arraySize * numSubTasks;
    
    if (globalIdx < totalElements) {
        // Calculate sub-task and local indices
        int subTaskId = globalIdx / arraySize;
        int localIdx = globalIdx % arraySize;
        
        if (subTaskId < numSubTasks && localIdx < arraySize) {
            const float* a = a_arrays[subTaskId];
            const float* b = b_arrays[subTaskId];
            float* c = c_arrays[subTaskId];
            
            // Perform addition
            float result = a[localIdx] + b[localIdx];
            c[localIdx] = result;
            
            // Debug information
            if (debug_info && globalIdx < debug_offset) {
                debug_info[globalIdx * 4] = blockIdx.x;
                debug_info[globalIdx * 4 + 1] = threadIdx.x;
                debug_info[globalIdx * 4 + 2] = subTaskId;
                debug_info[globalIdx * 4 + 3] = localIdx;
                
                if (debug_values && globalIdx < debug_offset) {
                    debug_values[globalIdx * 4] = a[localIdx];
                    debug_values[globalIdx * 4 + 1] = b[localIdx];
                    debug_values[globalIdx * 4 + 2] = result;
                    debug_values[globalIdx * 4 + 3] = a[localIdx] + b[localIdx]; // Expected
                }
            }
        }
    }
}

// Validation kernel for sub-task sequential execution
__global__ void subTaskSequentialValidationKernel(
    const float* __restrict__ a,
    const float* __restrict__ b,
    float* __restrict__ c,
    int n,
    int subTaskId,
    int* __restrict__ debug_info,
    float* __restrict__ debug_values,
    int debug_offset
) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (idx < n) {
        // Simple vector addition
        float result = a[idx] + b[idx];
        c[idx] = result;
        
        // Debug information
        if (debug_info && idx < debug_offset) {
            debug_info[idx * 4] = blockIdx.x;
            debug_info[idx * 4 + 1] = threadIdx.x;
            debug_info[idx * 4 + 2] = subTaskId;
            debug_info[idx * 4 + 3] = idx;
            
            if (debug_values && idx < debug_offset) {
                debug_values[idx * 4] = a[idx];
                debug_values[idx * 4 + 1] = b[idx];
                debug_values[idx * 4 + 2] = result;
                debug_values[idx * 4 + 3] = a[idx] + b[idx]; // Expected
            }
        }
    }
}

// Memory layout analysis kernel
__global__ void memoryLayoutAnalysisKernel(
    const float* __restrict__ input,
    int* __restrict__ access_pattern,
    int n,
    int stride,
    int offset
) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (idx < n) {
        // Calculate actual memory access based on pattern
        int actual_idx;
        
        switch (access_pattern[0]) {
            case 0: // Linear access
                actual_idx = idx;
                break;
            case 1: // Interleaved access
                actual_idx = idx * stride + offset;
                break;
            case 2: // Blocked access
                actual_idx = (idx / stride) * stride + (idx % stride) + offset;
                break;
            default:
                actual_idx = idx;
        }
        
        if (actual_idx < n) {
            // Store access pattern for analysis
            access_pattern[idx + 1] = actual_idx;
        }
    }
}

// Error checking kernel
__global__ void errorCheckingKernel(
    const float* __restrict__ computed,
    const float* __restrict__ expected,
    int* __restrict__ error_flags,
    float* __restrict__ error_values,
    int n,
    float tolerance
) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (idx < n) {
        float diff = fabsf(computed[idx] - expected[idx]);
        
        if (diff > tolerance) {
            error_flags[idx] = 1;
            error_values[idx * 2] = computed[idx];
            error_values[idx * 2 + 1] = expected[idx];
        } else {
            error_flags[idx] = 0;
        }
    }
}

// Edge case handling kernel
__global__ void edgeCaseHandlingKernel(
    const float* __restrict__ a,
    const float* __restrict__ b,
    float* __restrict__ c,
    int n,
    int test_case,  // 0: zeros, 1: NaN, 2: Inf, 3: denormals
    int* __restrict__ status_flags
) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (idx < n) {
        float result;
        
        switch (test_case) {
            case 0: // Zero arrays
                result = 0.0f + 0.0f;
                break;
            case 1: // NaN handling
                result = isnan(a[idx]) ? 0.0f : a[idx] + b[idx];
                break;
            case 2: // Infinity handling
                if (isinf(a[idx]) || isinf(b[idx])) {
                    result = INFINITY;
                } else {
                    result = a[idx] + b[idx];
                }
                break;
            case 3: // Denormals
                result = (fabsf(a[idx]) < 1e-38f || fabsf(b[idx]) < 1e-38f) ? 0.0f : a[idx] + b[idx];
                break;
            default:
                result = a[idx] + b[idx];
        }
        
        c[idx] = result;
        
        if (status_flags) {
            if (isnan(result)) status_flags[idx] = 1;
            else if (isinf(result)) status_flags[idx] = 2;
            else if (fabsf(result) < 1e-38f && result != 0.0f) status_flags[idx] = 3;
            else status_flags[idx] = 0;
        }
    }
}

// Launch configuration helper
inline int calculateGridSize(int n, int blockSize) {
    return (n + blockSize - 1) / blockSize;
}

// Wrapper functions for kernel launches
extern "C" {
    void launchSequentialValidation(
        const float* a, const float* b, float* c, int n,
        int* debug_info, float* debug_values, int debug_offset,
        int threads_per_block = 256
    ) {
        int grid_size = calculateGridSize(n, threads_per_block);
        sequentialValidationKernel<<<grid_size, threads_per_block>>>(
            a, b, c, n, debug_info, debug_values, debug_offset
        );
    }
    
    void launchMasqueradingValidation(
        const float** a_arrays, const float** b_arrays, float** c_arrays,
        int array_size, int num_sub_tasks,
        int* debug_info, float* debug_values, int debug_offset,
        int threads_per_block = 256
    ) {
        int total_elements = array_size * num_sub_tasks;
        int grid_size = calculateGridSize(total_elements, threads_per_block);
        masqueradingValidationKernel<<<grid_size, threads_per_block>>>(
            a_arrays, b_arrays, c_arrays, array_size, num_sub_tasks,
            debug_info, debug_values, debug_offset
        );
    }
    
    void launchSubTaskSequentialValidation(
        const float* a, const float* b, float* c, int n, int sub_task_id,
        int* debug_info, float* debug_values, int debug_offset,
        int threads_per_block = 256
    ) {
        int grid_size = calculateGridSize(n, threads_per_block);
        subTaskSequentialValidationKernel<<<grid_size, threads_per_block>>>(
            a, b, c, n, sub_task_id, debug_info, debug_values, debug_offset
        );
    }
    
    void launchErrorChecking(
        const float* computed, const float* expected,
        int* error_flags, float* error_values, int n,
        float tolerance = 1e-6f,
        int threads_per_block = 256
    ) {
        int grid_size = calculateGridSize(n, threads_per_block);
        errorCheckingKernel<<<grid_size, threads_per_block>>>(
            computed, expected, error_flags, error_values, n, tolerance
        );
    }
}