#include <iostream>
#include <vector>
#include <chrono>
#include <cuda_runtime.h>
#include "benchmark_utils.cuh"
#include "../linear_layouts_core/linear_layout.cuh"
#include "../layouts/blocked_layout.cuh"
#include "../layouts/mma_layout.cuh"
#include "../layouts/swizzled_layout.cuh"

using namespace linear_layouts;

// Baseline memory access patterns (non-vectorized)
__global__ void baseline_memory_access(const float* input, float* output, int num_elements) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < num_elements) {
        output[idx] = input[idx] * 2.0f; // Simple operation
    }
}

// Vectorized memory access using float4
__global__ void vectorized_memory_access_float4(const float4* input, float4* output, int num_elements) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < num_elements) {
        float4 data = input[idx];
        output[idx] = make_float4(data.x * 2.0f, data.y * 2.0f, data.z * 2.0f, data.w * 2.0f);
    }
}

// Layout-aware memory access using BlockedLayout
template<int Rank, int BlockSize>
__global__ void blocked_layout_access(const float* input, float* output, 
                                     BlockedLayout<Rank, BlockSize> layout, int num_elements) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < num_elements) {
        // Use layout to determine memory access pattern
        int physical_addr = layout(idx);
        if (physical_addr < num_elements) {
            output[idx] = input[physical_addr] * 2.0f;
        }
    }
}

// Coalesced memory access using thread cooperation
__global__ void coalesced_memory_access(const float* input, float* output, int num_elements) {
    int tid = threadIdx.x;
    int bid = blockIdx.x;
    int idx = bid * blockDim.x + tid;
    
    // Use shared memory to improve coalescing
    __shared__ float shared_data[256];
    
    if (idx < num_elements) {
        // Load with coalesced pattern
        shared_data[tid] = input[idx];
        __syncthreads();
        
        // Process data
        shared_data[tid] *= 2.0f;
        __syncthreads();
        
        // Store with coalesced pattern
        output[idx] = shared_data[tid];
    }
}

// Matrix transpose with layout optimization
template<typename Layout>
__global__ void layout_transpose_kernel(const float* input, float* output, 
                                       Layout layout, int rows, int cols) {
    int row = blockIdx.y * blockDim.y + threadIdx.y;
    int col = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (row < rows && col < cols) {
        int src_idx = layout(row * cols + col);
        int dst_idx = layout(col * rows + row);
        output[dst_idx] = input[src_idx];
    }
}

// Benchmark memory bandwidth with different access patterns
void benchmark_memory_bandwidth(const std::string& test_name, int num_elements, int num_iterations) {
    GPUTimer timer;
    
    // Allocate memory
    size_t bytes = num_elements * sizeof(float);
    float *d_input, *d_output;
    cudaMalloc(&d_input, bytes);
    cudaMalloc(&d_output, bytes);
    
    // Initialize input data
    std::vector<float> h_input(num_elements, 1.0f);
    cudaMemcpy(d_input, h_input.data(), bytes, cudaMemcpyHostToDevice);
    
    int block_size = 256;
    int grid_size = (num_elements + block_size - 1) / block_size;
    
    // Warm up
    for (int i = 0; i < 3; i++) {
        baseline_memory_access<<<grid_size, block_size>>>(d_input, d_output, num_elements);
    }
    cudaDeviceSynchronize();
    
    // Benchmark
    std::vector<double> times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        baseline_memory_access<<<grid_size, block_size>>>(d_input, d_output, num_elements);
        cudaDeviceSynchronize();
        timer.stop();
        times.push_back(timer.elapsed_ms());
    }
    
    BenchmarkStats stats(times);
    double avg_time = stats.mean;
    double bandwidth = (2 * bytes) / (avg_time * 1e-9 * 1e9); // GB/s (read + write)
    
    std::cout << test_name << ":" << std::endl;
    std::cout << "  Bandwidth: " << bandwidth << " GB/s" << std::endl;
    std::cout << "  Avg Time: " << avg_time << " ns" << std::endl;
    std::cout << std::endl;
    
    cudaFree(d_input);
    cudaFree(d_output);
}

// Benchmark vectorized memory access
void benchmark_vectorized_access(int num_elements, int num_iterations) {
    GPUTimer timer;
    
    // Ensure elements are multiple of 4 for float4
    int vec_elements = (num_elements / 4) * 4;
    size_t bytes = vec_elements * sizeof(float);
    
    float *d_input, *d_output;
    cudaMalloc(&d_input, bytes);
    cudaMalloc(&d_output, bytes);
    
    // Initialize data
    std::vector<float> h_input(vec_elements, 1.0f);
    cudaMemcpy(d_input, h_input.data(), bytes, cudaMemcpyHostToDevice);
    
    // Cast to float4 pointers
    float4 *d_input_vec = reinterpret_cast<float4*>(d_input);
    float4 *d_output_vec = reinterpret_cast<float4*>(d_output);
    
    int vec_count = vec_elements / 4;
    int block_size = 256;
    int grid_size = (vec_count + block_size - 1) / block_size;
    
    // Benchmark vectorized access
    std::vector<double> times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        vectorized_memory_access_float4<<<grid_size, block_size>>>(d_input_vec, d_output_vec, vec_count);
        cudaDeviceSynchronize();
        timer.stop();
        times.push_back(timer.elapsed_ms());
    }
    
    BenchmarkStats stats(times);
    double avg_time = stats.mean;
    double bandwidth = (2 * bytes) / (avg_time * 1e-9 * 1e9); // GB/s
    
    std::cout << "Vectorized float4 Access:" << std::endl;
    std::cout << "  Bandwidth: " << bandwidth << " GB/s" << std::endl;
    std::cout << "  Avg Time: " << avg_time << " ns" << std::endl;
    std::cout << std::endl;
    
    cudaFree(d_input);
    cudaFree(d_output);
}

// Benchmark layout-aware memory access
template<typename Layout>
void benchmark_layout_access(const std::string& test_name, Layout layout, 
                           int num_elements, int num_iterations) {
    GPUTimer timer;
    
    size_t bytes = num_elements * sizeof(float);
    float *d_input, *d_output;
    cudaMalloc(&d_input, bytes);
    cudaMalloc(&d_output, bytes);
    
    // Initialize data
    std::vector<float> h_input(num_elements, 1.0f);
    cudaMemcpy(d_input, h_input.data(), bytes, cudaMemcpyHostToDevice);
    
    int block_size = 256;
    int grid_size = (num_elements + block_size - 1) / block_size;
    
    // Benchmark layout-aware access
    std::vector<double> times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        blocked_layout_access<<<grid_size, block_size>>>(d_input, d_output, layout, num_elements);
        cudaDeviceSynchronize();
        timer.stop();
        times.push_back(timer.elapsed_ms());
    }
    
    BenchmarkStats stats(times);
    double avg_time = stats.mean;
    double bandwidth = (2 * bytes) / (avg_time * 1e-9 * 1e9); // GB/s
    
    std::cout << test_name << ":" << std::endl;
    std::cout << "  Bandwidth: " << bandwidth << " GB/s" << std::endl;
    std::cout << "  Avg Time: " << avg_time << " ns" << std::endl;
    std::cout << std::endl;
    
    cudaFree(d_input);
    cudaFree(d_output);
}

// Benchmark coalesced memory access
void benchmark_coalesced_access(int num_elements, int num_iterations) {
    GPUTimer timer;
    
    size_t bytes = num_elements * sizeof(float);
    float *d_input, *d_output;
    cudaMalloc(&d_input, bytes);
    cudaMalloc(&d_output, bytes);
    
    // Initialize data
    std::vector<float> h_input(num_elements, 1.0f);
    cudaMemcpy(d_input, h_input.data(), bytes, cudaMemcpyHostToDevice);
    
    int block_size = 256;
    int grid_size = (num_elements + block_size - 1) / block_size;
    
    // Benchmark coalesced access
    std::vector<double> times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        coalesced_memory_access<<<grid_size, block_size>>>(d_input, d_output, num_elements);
        cudaDeviceSynchronize();
        timer.stop();
        times.push_back(timer.elapsed_ms());
    }
    
    BenchmarkStats stats(times);
    double avg_time = stats.mean;
    double bandwidth = (2 * bytes) / (avg_time * 1e-9 * 1e9); // GB/s
    
    std::cout << "Coalesced Memory Access:" << std::endl;
    std::cout << "  Bandwidth: " << bandwidth << " GB/s" << std::endl;
    std::cout << "  Avg Time: " << avg_time << " ns" << std::endl;
    std::cout << std::endl;
    
    cudaFree(d_input);
    cudaFree(d_output);
}

// Benchmark matrix transpose with different layouts
void benchmark_transpose_layouts(int rows, int cols, int num_iterations) {
    std::cout << "=== Matrix Transpose Layout Benchmarks ===" << std::endl;
    
    GPUTimer timer;
    int num_elements = rows * cols;
    size_t bytes = num_elements * sizeof(float);
    
    float *d_input, *d_output;
    cudaMalloc(&d_input, bytes);
    cudaMalloc(&d_output, bytes);
    
    // Initialize matrix data
    std::vector<float> h_input(num_elements);
    for (int i = 0; i < num_elements; i++) {
        h_input[i] = static_cast<float>(i);
    }
    cudaMemcpy(d_input, h_input.data(), bytes, cudaMemcpyHostToDevice);
    
    dim3 block_size(16, 16);
    dim3 grid_size((cols + 15) / 16, (rows + 15) / 16);
    
    // Test different layouts for matrix transpose
    auto blocked_layout = BlockedLayout<2, 64>();
    auto mma_layout = MMALayout<MMAVariant::MMA_16x8x16, MMAOperand::C_MATRIX>();
    
    // Benchmark blocked layout transpose
    std::vector<double> blocked_times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        layout_transpose_kernel<<<grid_size, block_size>>>(d_input, d_output, blocked_layout, rows, cols);
        cudaDeviceSynchronize();
        timer.stop();
        blocked_times.push_back(timer.elapsed_ms());
    }
    
    // Benchmark MMA layout transpose
    std::vector<double> mma_times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        layout_transpose_kernel<<<grid_size, block_size>>>(d_input, d_output, mma_layout, rows, cols);
        cudaDeviceSynchronize();
        timer.stop();
        mma_times.push_back(timer.elapsed_ms());
    }
    
    BenchmarkStats blocked_stats(blocked_times);
    BenchmarkStats mma_stats(mma_times);
    double blocked_avg = blocked_stats.mean;
    double mma_avg = mma_stats.mean;
    double blocked_bandwidth = (2 * bytes) / (blocked_avg * 1e-9 * 1e9);
    double mma_bandwidth = (2 * bytes) / (mma_avg * 1e-9 * 1e9);
    
    std::cout << "Matrix Transpose (" << rows << "x" << cols << "):" << std::endl;
    std::cout << "  Blocked Layout: " << blocked_bandwidth << " GB/s" << std::endl;
    std::cout << "  MMA Layout: " << mma_bandwidth << " GB/s" << std::endl;
    std::cout << "  Speedup: " << mma_bandwidth / blocked_bandwidth << "x" << std::endl;
    std::cout << std::endl;
    
    cudaFree(d_input);
    cudaFree(d_output);
}

// Benchmark memory access patterns with different data sizes
void benchmark_data_size_scaling(int num_iterations) {
    std::cout << "=== Data Size Scaling Benchmarks ===" << std::endl;
    
    std::vector<int> data_sizes = {1024, 4096, 16384, 65536, 262144, 1048576, 4194304};
    
    for (int size : data_sizes) {
        std::cout << "Data Size: " << size << " elements (" << (size * sizeof(float)) / 1024 << " KB)" << std::endl;
        
        benchmark_memory_bandwidth("  Baseline", size, num_iterations);
        benchmark_vectorized_access(size, num_iterations);
        benchmark_coalesced_access(size, num_iterations);
        
        std::cout << std::endl;
    }
}

int main() {
    std::cout << "=== Linear Layout Vectorization Benchmarks ===" << std::endl;
    std::cout << std::endl;
    
    // Initialize CUDA
    cudaSetDevice(0);
    
    // Print GPU information
    cudaDeviceProp prop;
    cudaGetDeviceProperties(&prop, 0);
    std::cout << "GPU: " << prop.name << std::endl;
    std::cout << "SM Count: " << prop.multiProcessorCount << std::endl;
    std::cout << "Memory Bandwidth: " << (prop.memoryBusWidth * prop.memoryClockRate * 2.0) / (8 * 1000) << " GB/s" << std::endl;
    std::cout << "Shared Memory per Block: " << prop.sharedMemPerBlock / 1024 << " KB" << std::endl;
    std::cout << std::endl;
    
    const int num_elements = 1024 * 1024; // 1M elements
    const int num_iterations = 100;
    
    std::cout << "=== Memory Access Pattern Benchmarks ===" << std::endl;
    
    // Benchmark different memory access patterns
    benchmark_memory_bandwidth("Baseline Memory Access", num_elements, num_iterations);
    benchmark_vectorized_access(num_elements, num_iterations);
    benchmark_coalesced_access(num_elements, num_iterations);
    
    // Benchmark layout-aware access patterns
    auto blocked_64 = BlockedLayout<2, 64>();
    auto blocked_128 = BlockedLayout<2, 128>();
    
    benchmark_layout_access("Blocked Layout (64)", blocked_64, num_elements, num_iterations);
    benchmark_layout_access("Blocked Layout (128)", blocked_128, num_elements, num_iterations);
    
    // Benchmark matrix operations with different layouts
    benchmark_transpose_layouts(1024, 1024, num_iterations);
    benchmark_transpose_layouts(2048, 2048, num_iterations);
    
    // Test scaling with different data sizes
    benchmark_data_size_scaling(50);
    
    std::cout << "=== Vectorization Analysis ===" << std::endl;
    std::cout << "1. Vectorized access (float4) should show improved bandwidth utilization" << std::endl;
    std::cout << "2. Coalesced access patterns reduce memory transaction overhead" << std::endl;
    std::cout << "3. Layout-aware access can improve cache efficiency" << std::endl;
    std::cout << "4. Matrix transpose benefits from layout optimization" << std::endl;
    std::cout << "5. Performance should scale with data size until memory bandwidth saturation" << std::endl;
    std::cout << std::endl;
    
    std::cout << "=== Expected Improvements ===" << std::endl;
    std::cout << "- Vectorized access: 2-4x bandwidth improvement over baseline" << std::endl;
    std::cout << "- Coalesced access: 1.5-3x improvement in memory efficiency" << std::endl;
    std::cout << "- Layout optimization: 1.2-2x improvement in cache utilization" << std::endl;
    std::cout << "- Combined optimizations: 3-8x overall improvement" << std::endl;
    
    return 0;
}