/*
 * Memory Layout Analysis for GPU Correctness Verification
 * Analyzes different memory access patterns and validates memory layout correctness
 */

#include <cuda_runtime.h>
#include <cstdio.h>
#include <cstdlib.h>
#include <string>
#include <vector>

// Memory access pattern types
enum MemoryPattern {
    LINEAR = 0,      // 0, 1, 2, 3, ...
    INTERLEAVED = 1, // 0, stride, 2*stride, 3*stride, ...
    BLOCKED = 2,     // [0..block_size-1], [block_size..2*block_size-1], ...
    STRIDED = 3,     // offset, offset+stride, offset+2*stride, ...
    RANDOM = 4       // Random permutation
};

// Memory layout analysis structure
struct MemoryLayout {
    MemoryPattern pattern;
    int stride;
    int block_size;
    int offset;
    int total_elements;
    
    MemoryLayout(MemoryPattern p, int s = 1, int b = 1024, int o = 0, int t = 0)
        : pattern(p), stride(s), block_size(b), offset(o), total_elements(t) {}
};

// Memory access analysis result
struct MemoryAnalysisResult {
    bool valid = false;
    int access_count = 0;
    float coalescence_ratio = 0.0f;
    float spatial_locality = 0.0f;
    float temporal_locality = 0.0f;
    std::vector<int> access_pattern;
    std::vector<int> memory_addresses;
    
    void print_summary() const {
        printf("Memory Analysis Summary:\n");
        printf("  Access count: %d\n", access_count);
        printf("  Coalescence ratio: %.2f%%\n", coalescence_ratio * 100.0f);
        printf("  Spatial locality: %.2f%%\n", spatial_locality * 100.0f);
        printf("  Temporal locality: %.2f%%\n", temporal_locality * 100.0f);
        printf("  Valid: %s\n", valid ? "Yes" : "No");
    }
};

// GPU kernel for memory layout analysis
__global__ void memory_layout_analysis_kernel(
    const float* __restrict__ input,
    int* __restrict__ access_indices,
    int* __restrict__ memory_addresses,
    int n,
    int pattern_type,
    int stride,
    int block_size,
    int offset
) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (idx < n) {
        int actual_idx;
        int memory_addr;
        
        switch (pattern_type) {
            case LINEAR:
                actual_idx = idx;
                break;
            case INTERLEAVED:
                actual_idx = idx * stride;
                break;
            case BLOCKED:
                actual_idx = (idx / block_size) * block_size + (idx % block_size) + offset;
                break;
            case STRIDED:
                actual_idx = offset + idx * stride;
                break;
            case RANDOM:
                // Simple pseudo-random pattern for testing
                actual_idx = (idx * 1103515245 + 12345) % n;
                break;
            default:
                actual_idx = idx;
        }
        
        // Ensure bounds
        actual_idx = max(0, min(actual_idx, n - 1));
        memory_addr = actual_idx * sizeof(float);
        
        // Store analysis data
        if (access_indices) access_indices[idx] = actual_idx;
        if (memory_addresses) memory_addresses[idx] = memory_addr;
    }
}

// Memory coalescence analysis kernel
__global__ void coalescence_analysis_kernel(
    const float* __restrict__ data,
    float* __restrict__ results,
    int* __restrict__ coalescence_flags,
    int n,
    int warp_size = 32
) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int warp_id = threadIdx.x / warp_size;
    int lane_id = threadIdx.x % warp_size;
    
    if (idx < n) {
        // Check memory access pattern within warp
        int base_addr = (idx - lane_id) * sizeof(float);
        int current_addr = idx * sizeof(float);
        
        // Check if accesses are coalesced
        bool coalesced = (current_addr - base_addr) == lane_id * sizeof(float);
        
        if (coalescence_flags) {
            coalescence_flags[idx] = coalesced ? 1 : 0;
        }
        
        if (results) {
            results[idx] = data[idx];
        }
    }
}

// Memory layout validation class
class MemoryLayoutAnalyzer {
private:
    MemoryLayout layout;
    
public:
    MemoryLayoutAnalyzer(const MemoryLayout& l) : layout(l) {}
    
    // Generate memory layout pattern
    void generate_pattern(float* array, int size, MemoryPattern pattern, 
                         int stride = 1, int block_size = 1024, int offset = 0) {
        switch (pattern) {
            case LINEAR:
                for (int i = 0; i < size; i++) {
                    array[i] = (float)i;
                }
                break;
                
            case INTERLEAVED:
                for (int i = 0; i < size; i++) {
                    array[i] = (float)(i * stride);
                }
                break;
                
            case BLOCKED:
                for (int i = 0; i < size; i++) {
                    int block = i / block_size;
                    int pos = i % block_size;
                    array[i] = (float)(block * block_size + pos + offset);
                }
                break;
                
            case STRIDED:
                for (int i = 0; i < size; i++) {
                    array[i] = (float)(offset + i * stride);
                }
                break;
                
            case RANDOM:
                srand(12345);  // Fixed seed for reproducibility
                for (int i = 0; i < size; i++) {
                    array[i] = (float)(rand() % size);
                }
                break;
        }
    }
    
    // Analyze memory access pattern
    MemoryAnalysisResult analyze_pattern(const float* input, int size, 
                                       const MemoryLayout& layout) {
        MemoryAnalysisResult result;
        result.access_count = size;
        
        // Allocate device memory
        float* d_input = nullptr;
        int* d_access_indices = nullptr;
        int* d_memory_addresses = nullptr;
        
        cudaMalloc(&d_input, size * sizeof(float));
        cudaMalloc(&d_access_indices, size * sizeof(int));
        cudaMalloc(&d_memory_addresses, size * sizeof(int));
        
        cudaMemcpy(d_input, input, size * sizeof(float), cudaMemcpyHostToDevice);
        
        // Launch analysis kernel
        int threads_per_block = 256;
        int blocks = (size + threads_per_block - 1) / threads_per_block;
        
        memory_layout_analysis_kernel<<<blocks, threads_per_block>>>(
            d_input, d_access_indices, d_memory_addresses, size,
            layout.pattern, layout.stride, layout.block_size, layout.offset
        );
        
        cudaDeviceSynchronize();
        
        // Copy results back
        std::vector<int> h_access_indices(size);
        std::vector<int> h_memory_addresses(size);
        
        cudaMemcpy(h_access_indices.data(), d_access_indices, 
                  size * sizeof(int), cudaMemcpyDeviceToHost);
        cudaMemcpy(h_memory_addresses.data(), d_memory_addresses, 
                  size * sizeof(int), cudaMemcpyDeviceToHost);
        
        result.access_pattern = h_access_indices;
        result.memory_addresses = h_memory_addresses;
        
        // Calculate coalescence ratio (simplified)
        int coalesced_accesses = 0;
        for (int i = 1; i < size; i++) {
            if (h_memory_addresses[i] - h_memory_addresses[i-1] == sizeof(float)) {
                coalesced_accesses++;
            }
        }
        
        result.coalescence_ratio = (size > 1) ? 
            (float)coalesced_accesses / (size - 1) : 1.0f;
        
        // Calculate spatial locality (simplified)
        float total_distance = 0.0f;
        for (int i = 1; i < size; i++) {
            total_distance += fabsf(h_memory_addresses[i] - h_memory_addresses[i-1]);
        }
        
        result.spatial_locality = (size > 1) ? 
            1.0f - (total_distance / (size * sizeof(float))) : 1.0f;
        
        result.temporal_locality = 1.0f;  // Simplified for this use case
        result.valid = true;
        
        // Cleanup
        cudaFree(d_input);
        cudaFree(d_access_indices);
        cudaFree(d_memory_addresses);
        
        return result;
    }
    
    // Validate memory layout correctness
    bool validate_layout(const float* computed, const float* expected, int size,
                        const MemoryLayout& layout) {
        std::vector<float> expected_pattern(size);
        generate_pattern(expected_pattern.data(), size, layout.pattern, 
                        layout.stride, layout.block_size, layout.offset);
        
        for (int i = 0; i < size; i++) {
            if (fabsf(computed[i] - expected_pattern[i]) > 1e-6f) {
                printf("Layout validation failed at index %d: computed=%.1f, expected=%.1f\n",
                       i, computed[i], expected_pattern[i]);
                return false;
            }
        }
        
        return true;
    }
    
    // Generate test data for different patterns
    void generate_test_data(float** arrays, int num_arrays, int array_size,
                           MemoryPattern pattern, int stride = 1, 
                           int block_size = 1024, int offset = 0) {
        for (int i = 0; i < num_arrays; i++) {
            generate_pattern(arrays[i], array_size, pattern, stride, block_size, offset + i);
        }
    }
    
    // Print memory layout details
    void print_layout_info(const MemoryLayout& layout) {
        printf("Memory Layout Configuration:\n");
        printf("  Pattern: ");
        switch (layout.pattern) {
            case LINEAR: printf("Linear\n"); break;
            case INTERLEAVED: printf("Interleaved (stride=%d)\n", layout.stride); break;
            case BLOCKED: printf("Blocked (block_size=%d)\n", layout.block_size); break;
            case STRIDED: printf("Strided (stride=%d, offset=%d)\n", layout.stride, layout.offset); break;
            case RANDOM: printf("Random\n"); break;
        }
        printf("  Total elements: %d\n", layout.total_elements);
    }
};

// Memory layout test utilities
class MemoryLayoutTester {
public:
    static bool test_linear_pattern(int size) {
        MemoryLayout layout(LINEAR, 1, 1024, 0, size);
        MemoryLayoutAnalyzer analyzer(layout);
        
        std::vector<float> test_data(size);
        analyzer.generate_pattern(test_data.data(), size, LINEAR);
        
        MemoryAnalysisResult result = analyzer.analyze_pattern(test_data.data(), size, layout);
        
        bool valid = true;
        for (int i = 0; i < size; i++) {
            if (fabsf(test_data[i] - i) > 1e-6f) {
                valid = false;
                break;
            }
        }
        
        return valid && result.valid;
    }
    
    static bool test_interleaved_pattern(int size, int stride) {
        MemoryLayout layout(INTERLEAVED, stride, 1024, 0, size);
        MemoryLayoutAnalyzer analyzer(layout);
        
        std::vector<float> test_data(size);
        analyzer.generate_pattern(test_data.data(), size, INTERLEAVED, stride);
        
        bool valid = true;
        for (int i = 0; i < size; i++) {
            if (fabsf(test_data[i] - (i * stride)) > 1e-6f) {
                valid = false;
                break;
            }
        }
        
        return valid;
    }
    
    static bool test_blocked_pattern(int size, int block_size) {
        MemoryLayout layout(BLOCKED, 1, block_size, 0, size);
        MemoryLayoutAnalyzer analyzer(layout);
        
        std::vector<float> test_data(size);
        analyzer.generate_pattern(test_data.data(), size, BLOCKED, 1, block_size);
        
        bool valid = true;
        for (int i = 0; i < size; i++) {
            int expected = (i / block_size) * block_size + (i % block_size);
            if (fabsf(test_data[i] - expected) > 1e-6f) {
                valid = false;
                break;
            }
        }
        
        return valid;
    }
};

// C-style interface functions
extern "C" {
    MemoryLayoutAnalyzer* create_memory_layout_analyzer(int pattern_type, int stride, 
                                                        int block_size, int offset, int total_elements) {
        MemoryPattern pattern = static_cast<MemoryPattern>(pattern_type);
        MemoryLayout* layout = new MemoryLayout(pattern, stride, block_size, offset, total_elements);
        return new MemoryLayoutAnalyzer(*layout);
    }
    
    void destroy_memory_layout_analyzer(MemoryLayoutAnalyzer* analyzer) {
        delete analyzer;
    }
    
    int test_memory_pattern(int size, int pattern_type, int stride, int block_size) {
        MemoryPattern pattern = static_cast<MemoryPattern>(pattern_type);
        
        switch (pattern) {
            case LINEAR:
                return MemoryLayoutTester::test_linear_pattern(size) ? 1 : 0;
            case INTERLEAVED:
                return MemoryLayoutTester::test_interleaved_pattern(size, stride) ? 1 : 0;
            case BLOCKED:
                return MemoryLayoutTester::test_blocked_pattern(size, block_size) ? 1 : 0;
            default:
                return 0;
        }
    }
    
    void generate_memory_pattern(float* array, int size, int pattern_type,
                               int stride, int block_size, int offset) {
        MemoryPattern pattern = static_cast<MemoryPattern>(pattern_type);
        MemoryLayout layout(pattern, stride, block_size, offset, size);
        MemoryLayoutAnalyzer analyzer(layout);
        analyzer.generate_pattern(array, size, pattern, stride, block_size, offset);
    }
}