#include <iostream>
#include <vector>
#include <chrono>
#include <random>
#include <cuda_runtime.h>
#include "benchmark_utils.cuh"
#include "../linear_layouts_core/linear_layout.cuh"
#include "../layouts/blocked_layout.cuh"
#include "../layouts/mma_layout.cuh" 
#include "../layouts/swizzled_layout.cuh"

using namespace linear_layouts;
using namespace benchmark_utils;

// CUDA kernels for layout conversion benchmarks
template<typename SrcLayout, typename DstLayout>
__global__ void conversion_kernel(const int* src_coords, int* dst_coords, 
                                 SrcLayout src_layout, DstLayout dst_layout, 
                                 int num_elements) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < num_elements) {
        // Convert coordinate from source to destination layout
        auto logical_coord = src_layout.inverse(src_coords[idx]);
        dst_coords[idx] = dst_layout(logical_coord);
    }
}

template<typename Layout>
__global__ void coordinate_transform_kernel(const int* logical_coords, int* physical_coords,
                                          Layout layout, int num_elements) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < num_elements) {
        physical_coords[idx] = layout(logical_coords[idx]);
    }
}

template<typename Layout>
__global__ void inverse_transform_kernel(const int* physical_coords, int* logical_coords,
                                       Layout layout, int num_elements) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < num_elements) {
        logical_coords[idx] = layout.inverse(physical_coords[idx]);
    }
}

// Benchmark layout conversion performance
template<typename SrcLayout, typename DstLayout>
void benchmark_layout_conversion(const std::string& test_name,
                               SrcLayout src_layout, DstLayout dst_layout,
                               int num_elements, int num_iterations) {
    
    GPUTimer timer;
    
    // Allocate host memory
    std::vector<int> h_src_coords(num_elements);
    std::vector<int> h_dst_coords(num_elements);
    
    // Initialize with random coordinates
    std::random_device rd;
    std::mt19937 gen(rd());
    std::uniform_int_distribution<> dis(0, 65535);
    
    for (int i = 0; i < num_elements; i++) {
        h_src_coords[i] = dis(gen);
    }
    
    // Allocate device memory
    int *d_src_coords, *d_dst_coords;
    cudaMalloc(&d_src_coords, num_elements * sizeof(int));
    cudaMalloc(&d_dst_coords, num_elements * sizeof(int));
    
    cudaMemcpy(d_src_coords, h_src_coords.data(), num_elements * sizeof(int), cudaMemcpyHostToDevice);
    
    // Configure kernel launch parameters
    int block_size = 256;
    int grid_size = (num_elements + block_size - 1) / block_size;
    
    // Warm up
    for (int i = 0; i < 3; i++) {
        conversion_kernel<<<grid_size, block_size>>>(d_src_coords, d_dst_coords, 
                                                   src_layout, dst_layout, num_elements);
    }
    cudaDeviceSynchronize();
    
    // Benchmark conversion
    std::vector<double> times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        conversion_kernel<<<grid_size, block_size>>>(d_src_coords, d_dst_coords,
                                                   src_layout, dst_layout, num_elements);
        cudaDeviceSynchronize();
        timer.stop();
        times.push_back(timer.elapsed_ms());
    }
    
    // Calculate throughput
    BenchmarkStats stats(times);
    double avg_time = stats.mean;
    double throughput = num_elements / (avg_time * 1e-9); // ops/second
    double bandwidth = (2 * num_elements * sizeof(int)) / (avg_time * 1e-9 * 1e9); // GB/s
    
    std::cout << test_name << ":" << std::endl;
    std::cout << "  Elements: " << num_elements << std::endl;
    std::cout << "  Avg Time: " << avg_time << " ns" << std::endl;
    std::cout << "  Throughput: " << throughput / 1e6 << " M ops/s" << std::endl;
    std::cout << "  Bandwidth: " << bandwidth << " GB/s" << std::endl;
    std::cout << "  Min Time: " << *std::min_element(times.begin(), times.end()) << " ns" << std::endl;
    std::cout << "  Max Time: " << *std::max_element(times.begin(), times.end()) << " ns" << std::endl;
    std::cout << std::endl;
    
    cudaFree(d_src_coords);
    cudaFree(d_dst_coords);
}

// Benchmark coordinate transformation performance
template<typename Layout>
void benchmark_coordinate_transform(const std::string& test_name, Layout layout,
                                  int num_elements, int num_iterations) {
    
    GPUTimer timer;
    
    // Allocate host memory
    std::vector<int> h_logical_coords(num_elements);
    std::vector<int> h_physical_coords(num_elements);
    
    // Initialize with sequential logical coordinates
    for (int i = 0; i < num_elements; i++) {
        h_logical_coords[i] = i;
    }
    
    // Allocate device memory
    int *d_logical_coords, *d_physical_coords;
    cudaMalloc(&d_logical_coords, num_elements * sizeof(int));
    cudaMalloc(&d_physical_coords, num_elements * sizeof(int));
    
    cudaMemcpy(d_logical_coords, h_logical_coords.data(), num_elements * sizeof(int), cudaMemcpyHostToDevice);
    
    // Configure kernel launch parameters
    int block_size = 256;
    int grid_size = (num_elements + block_size - 1) / block_size;
    
    // Benchmark forward transformation
    std::vector<double> forward_times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        coordinate_transform_kernel<<<grid_size, block_size>>>(d_logical_coords, d_physical_coords,
                                                             layout, num_elements);
        cudaDeviceSynchronize();
        timer.stop();
        forward_times.push_back(timer.elapsed_ms());
    }
    
    // Benchmark inverse transformation
    std::vector<double> inverse_times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        inverse_transform_kernel<<<grid_size, block_size>>>(d_physical_coords, d_logical_coords,
                                                          layout, num_elements);
        cudaDeviceSynchronize();
        timer.stop();
        inverse_times.push_back(timer.elapsed_ms());
    }
    
    // Calculate statistics
    BenchmarkStats forward_stats(forward_times);
    BenchmarkStats inverse_stats(inverse_times);
    double avg_forward = forward_stats.mean;
    double avg_inverse = inverse_stats.mean;
    double forward_throughput = num_elements / (avg_forward * 1e-9);
    double inverse_throughput = num_elements / (avg_inverse * 1e-9);
    
    std::cout << test_name << " - Forward Transform:" << std::endl;
    std::cout << "  Avg Time: " << avg_forward << " ns" << std::endl;
    std::cout << "  Throughput: " << forward_throughput / 1e6 << " M ops/s" << std::endl;
    
    std::cout << test_name << " - Inverse Transform:" << std::endl;
    std::cout << "  Avg Time: " << avg_inverse << " ns" << std::endl;
    std::cout << "  Throughput: " << inverse_throughput / 1e6 << " M ops/s" << std::endl;
    std::cout << std::endl;
    
    cudaFree(d_logical_coords);
    cudaFree(d_physical_coords);
}

// Benchmark layout composition performance
void benchmark_layout_composition(int num_elements, int num_iterations) {
    std::cout << "=== Layout Composition Benchmarks ===" << std::endl;
    
    GPUTimer timer;
    
    // Create test layouts
    auto blocked = BlockedLayout<2, 64>();
    auto mma = MMALayout<MMAVariant::MMA_16x8x16, MMAOperand::C_MATRIX>();
    auto swizzled = SwizzledLayout<SwizzleType::MMA_SWIZZLE>();
    
    // Benchmark different layout compositions
    std::vector<int> h_coords(num_elements);
    for (int i = 0; i < num_elements; i++) {
        h_coords[i] = i;
    }
    
    int *d_coords, *d_result;
    cudaMalloc(&d_coords, num_elements * sizeof(int));
    cudaMalloc(&d_result, num_elements * sizeof(int));
    cudaMemcpy(d_coords, h_coords.data(), num_elements * sizeof(int), cudaMemcpyHostToDevice);
    
    int block_size = 256;
    int grid_size = (num_elements + block_size - 1) / block_size;
    
    // Test blocked + swizzled composition
    std::vector<double> times;
    for (int i = 0; i < num_iterations; i++) {
        timer.start();
        
        // First apply blocked layout
        coordinate_transform_kernel<<<grid_size, block_size>>>(d_coords, d_result, blocked, num_elements);
        cudaDeviceSynchronize();
        
        // Then apply swizzled layout
        coordinate_transform_kernel<<<grid_size, block_size>>>(d_result, d_coords, swizzled, num_elements);
        cudaDeviceSynchronize();
        
        timer.stop();
        times.push_back(timer.elapsed_ms());
    }
    
    BenchmarkStats stats(times);
    double avg_time = stats.mean;
    double throughput = num_elements / (avg_time * 1e-9);
    
    std::cout << "Blocked + Swizzled Composition:" << std::endl;
    std::cout << "  Avg Time: " << avg_time << " ns" << std::endl;
    std::cout << "  Throughput: " << throughput / 1e6 << " M ops/s" << std::endl;
    std::cout << std::endl;
    
    cudaFree(d_coords);
    cudaFree(d_result);
}

int main() {
    std::cout << "=== Linear Layout Conversion Benchmarks ===" << std::endl;
    std::cout << std::endl;
    
    // Initialize CUDA
    cudaSetDevice(0);
    
    // Print GPU information
    cudaDeviceProp prop;
    cudaGetDeviceProperties(&prop, 0);
    std::cout << "GPU: " << prop.name << std::endl;
    std::cout << "SM Count: " << prop.multiProcessorCount << std::endl;
    std::cout << "Memory Bandwidth: " << (prop.memoryBusWidth * prop.memoryClockRate * 2.0) / (8 * 1000) << " GB/s" << std::endl;
    std::cout << std::endl;
    
    const int num_elements = 1024 * 1024; // 1M elements
    const int num_iterations = 100;
    
    // Create layout instances for testing
    auto blocked_64 = BlockedLayout<2, 64>();
    auto blocked_128 = BlockedLayout<2, 128>();
    auto mma_16x16 = MMALayout<MMAVariant::MMA_16x8x16, MMAOperand::C_MATRIX>();
    auto mma_32x8 = MMALayout<MMAVariant::MMA_16x8x8, MMAOperand::C_MATRIX>();
    auto swizzled_128 = SwizzledLayout<SwizzleType::MMA_SWIZZLE>();
    auto swizzled_256 = SwizzledLayout<SwizzleType::MMA_SWIZZLE>();
    
    std::cout << "=== Single Layout Transform Benchmarks ===" << std::endl;
    
    // Benchmark individual layout transformations
    benchmark_coordinate_transform("BlockedLayout<2,64>", blocked_64, num_elements, num_iterations);
    benchmark_coordinate_transform("BlockedLayout<2,128>", blocked_128, num_elements, num_iterations);
    benchmark_coordinate_transform("MMALayout<16,16>", mma_16x16, num_elements, num_iterations);
    benchmark_coordinate_transform("MMALayout<32,8>", mma_32x8, num_elements, num_iterations);
    benchmark_coordinate_transform("SwizzledLayout<128,4>", swizzled_128, num_elements, num_iterations);
    benchmark_coordinate_transform("SwizzledLayout<256,8>", swizzled_256, num_elements, num_iterations);
    
    std::cout << "=== Layout Conversion Benchmarks ===" << std::endl;
    
    // Benchmark layout-to-layout conversions
    benchmark_layout_conversion("Blocked64 -> Blocked128", blocked_64, blocked_128, num_elements, num_iterations);
    benchmark_layout_conversion("Blocked64 -> MMA16x16", blocked_64, mma_16x16, num_elements, num_iterations);
    benchmark_layout_conversion("Blocked64 -> Swizzled128", blocked_64, swizzled_128, num_elements, num_iterations);
    benchmark_layout_conversion("MMA16x16 -> Swizzled128", mma_16x16, swizzled_128, num_elements, num_iterations);
    benchmark_layout_conversion("Swizzled128 -> MMA32x8", swizzled_128, mma_32x8, num_elements, num_iterations);
    
    // Benchmark layout compositions
    benchmark_layout_composition(num_elements, num_iterations);
    
    std::cout << "=== Performance Analysis ===" << std::endl;
    std::cout << "1. Coordinate transformation throughput indicates layout computation efficiency" << std::endl;
    std::cout << "2. Layout conversion performance shows the cost of switching between representations" << std::endl;
    std::cout << "3. Composition benchmarks measure the overhead of combining multiple layouts" << std::endl;
    std::cout << "4. Compare against paper claims of 1.5-14x speedups in micro-benchmarks" << std::endl;
    
    return 0;
}