#include <iostream>
#include <cuda_runtime.h>
#include <vector>
#include <chrono>

// Include our Linear Layouts implementation
#include "../linear_layouts_core/binary_matrix.cuh"
#include "../linear_layouts_core/linear_layout.cuh"
#include "../linear_layouts_core/layout_operations.cuh"
#include "../layouts/blocked_layout.cuh"
#include "../layouts/mma_layout.cuh"
#include "../layouts/swizzled_layout.cuh"

using namespace linear_layouts;

/**
 * Demonstration of Linear Layouts functionality
 * 
 * This example reproduces key concepts from the Linear Layouts paper:
 * 1. Creating and manipulating blocked layouts
 * 2. MMA layouts for Tensor Core operations
 * 3. Swizzled layouts for bank conflict reduction
 * 4. Layout conversions and optimizations
 * 5. Performance comparisons
 */

// Test kernel to demonstrate layout application
__global__ void test_blocked_layout_kernel(float* input, float* output, int width, int height) {
    // Create a blocked layout similar to the paper's example
    // 16x16 tensor with 2x2 registers, 4x8 threads, 2x1 warps
    auto blocked_layout = BlockedLayout<16>::create_row_major(2, 2, 4, 8, 2, 1);
    
    int thread_id = threadIdx.x;
    int warp_id = thread_id / 32;
    int lane_id = thread_id % 32;
    
    // Apply layout to determine which tensor elements this thread handles
    for (int reg = 0; reg < 4; reg++) { // 2x2 = 4 registers per thread
        HardwareCoord coord(reg, lane_id, warp_id);
        auto tensor_coord = blocked_layout.apply(coord);
        
        if (tensor_coord.x < width && tensor_coord.y < height) {
            int input_idx = tensor_coord.y * width + tensor_coord.x;
            int output_idx = input_idx; // For this demo, just copy
            
            output[output_idx] = input[input_idx] * 2.0f; // Simple operation
        }
    }
}

// Test kernel for MMA layout
__global__ void test_mma_layout_kernel(float* a_matrix, float* b_matrix, float* c_matrix,
                                      int m, int n, int k) {
    // Create MMA layouts for 16x8x16 operation
    auto mma_a = MMA16x8x16_A(16); // FP16 elements
    auto mma_b = MMA16x8x16_B(16);
    auto mma_c = MMA16x8x16_C(16);
    
    int thread_id = threadIdx.x;
    int warp_id = blockIdx.x;
    
    // For simplicity, just demonstrate layout application
    // Real implementation would use mma.sync instructions
    
    for (int reg = 0; reg < mma_c.registers_per_thread(); reg++) {
        HardwareCoord coord(reg, thread_id, warp_id);
        auto tensor_coord = mma_c.apply(coord);
        
        if (tensor_coord.x < n && tensor_coord.y < m) {
            int c_idx = tensor_coord.y * n + tensor_coord.x;
            
            // Simplified matrix multiplication (not using actual Tensor Cores)
            float sum = 0.0f;
            for (int i = 0; i < k; i++) {
                sum += a_matrix[tensor_coord.y * k + i] * b_matrix[i * n + tensor_coord.x];
            }
            c_matrix[c_idx] = sum;
        }
    }
}

// Test kernel for swizzled memory access
__global__ void test_swizzled_layout_kernel(float* input, float* output, int width, int height) {
    // Create swizzled layout to reduce bank conflicts
    auto swizzled = MMASwizzle16x16(width, height, 32); // FP32 elements
    
    int thread_id = threadIdx.x;
    int block_id = blockIdx.x;
    
    // Shared memory with swizzling
    __shared__ float shared_mem[16 * 16];
    
    // Load data with swizzling pattern
    if (thread_id < width && block_id < height) {
        auto coord = TensorCoord(thread_id, block_id);
        auto swizzled_coord = swizzled.apply_swizzle(coord);
        
        int input_idx = block_id * width + thread_id;
        int shared_idx = swizzled_coord.y * width + swizzled_coord.x;
        
        shared_mem[shared_idx] = input[input_idx];
    }
    
    __syncthreads();
    
    // Process data and write back
    if (thread_id < width && block_id < height) {
        auto coord = TensorCoord(thread_id, block_id);
        auto swizzled_coord = swizzled.apply_swizzle(coord);
        
        int shared_idx = swizzled_coord.y * width + swizzled_coord.x;
        int output_idx = block_id * width + thread_id;
        
        output[output_idx] = shared_mem[shared_idx] * 3.0f;
    }
}

// Host functions for testing and benchmarking
void test_binary_matrix_operations() {
    std::cout << "\n=== Testing Binary Matrix Operations ===\n";
    
    // Test basic matrix operations over F₂
    BinaryMatrix<4, 4> mat1;
    mat1.set_bit(0, 0, true);
    mat1.set_bit(1, 1, true);
    mat1.set_bit(2, 2, true);
    mat1.set_bit(3, 3, true);
    
    std::cout << "Identity matrix:\n";
    mat1.print();
    
    BinaryMatrix<4, 4> mat2;
    mat2.set_bit(0, 1, true);
    mat2.set_bit(1, 0, true);
    mat2.set_bit(2, 3, true);
    mat2.set_bit(3, 2, true);
    
    std::cout << "\nPermutation matrix:\n";
    mat2.print();
    
    auto product = mat1 * mat2;
    std::cout << "\nMatrix product:\n";
    product.print();
    
    auto inverse = mat2.inverse();
    std::cout << "\nInverse matrix:\n";
    inverse.print();
    
    std::cout << "Matrix rank: " << mat2.rank() << "\n";
}

void test_layout_operations() {
    std::cout << "\n=== Testing Layout Operations ===\n";
    
    // Test blocked layout creation and properties
    auto blocked = BlockedLayout<16>::create_row_major(2, 2, 4, 8, 2, 1);
    
    std::cout << "Blocked Layout Properties:\n";
    std::cout << "  Elements per thread: " << blocked.elements_per_thread() << "\n";
    std::cout << "  Vectorization factor: " << blocked.vectorization_factor() << "\n";
    std::cout << "  Coalesced access: " << (blocked.is_coalesced() ? "Yes" : "No") << "\n";
    std::cout << "  Supports 4-element vectors: " << (blocked.supports_vectorized_load(4) ? "Yes" : "No") << "\n";
    
    // Test MMA layout
    auto mma_layout = MMA16x8x16_C(16);
    std::cout << "\nMMA Layout Properties:\n";
    std::cout << "  Registers per thread: " << mma_layout.registers_per_thread() << "\n";
    std::cout << "  Memory utilization: " << mma_layout.memory_utilization() << "\n";
    
    // Test layout conversion
    auto conversion = blocked.convert_to(blocked); // Self-conversion (should be identity)
    std::cout << "  Self-conversion is identity: " << (conversion.matrix().is_zero() ? "No" : "Yes") << "\n";
    
    // Test swizzled layout
    auto swizzled = MMASwizzle16x16(16, 16, 16);
    std::cout << "\nSwizzled Layout Properties:\n";
    std::cout << "  Supports ldmatrix: " << (swizzled.supports_ldmatrix() ? "Yes" : "No") << "\n";
    std::cout << "  Estimated bandwidth: " << swizzled.estimated_bandwidth_utilization() << "\n";
}

void benchmark_layout_conversions() {
    std::cout << "\n=== Benchmarking Layout Conversions ===\n";
    
    const int NUM_ITERATIONS = 1000;
    const int TENSOR_SIZE = 16 * 16;
    
    // Setup test data
    std::vector<float> input(TENSOR_SIZE, 1.0f);
    std::vector<float> output(TENSOR_SIZE, 0.0f);
    
    float *d_input, *d_output;
    cudaMalloc(&d_input, TENSOR_SIZE * sizeof(float));
    cudaMalloc(&d_output, TENSOR_SIZE * sizeof(float));
    cudaMemcpy(d_input, input.data(), TENSOR_SIZE * sizeof(float), cudaMemcpyHostToDevice);
    
    // Benchmark blocked layout kernel
    auto start = std::chrono::high_resolution_clock::now();
    
    for (int i = 0; i < NUM_ITERATIONS; i++) {
        test_blocked_layout_kernel<<<1, 256>>>(d_input, d_output, 16, 16);
    }
    cudaDeviceSynchronize();
    
    auto end = std::chrono::high_resolution_clock::now();
    auto blocked_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
    
    std::cout << "Blocked layout kernel time: " << blocked_time / NUM_ITERATIONS << " μs/iteration\n";
    
    // Benchmark swizzled layout kernel
    start = std::chrono::high_resolution_clock::now();
    
    for (int i = 0; i < NUM_ITERATIONS; i++) {
        test_swizzled_layout_kernel<<<16, 16>>>(d_input, d_output, 16, 16);
    }
    cudaDeviceSynchronize();
    
    end = std::chrono::high_resolution_clock::now();
    auto swizzled_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
    
    std::cout << "Swizzled layout kernel time: " << swizzled_time / NUM_ITERATIONS << " μs/iteration\n";
    
    // Compare results
    float speedup = static_cast<float>(blocked_time) / swizzled_time;
    std::cout << "Swizzled vs Blocked speedup: " << speedup << "x\n";
    
    // Cleanup
    cudaFree(d_input);
    cudaFree(d_output);
}

void demonstrate_paper_example() {
    std::cout << "\n=== Reproducing Paper's Motivating Example ===\n";
    
    // Reproduce the 16x16 tensor example from the paper
    // Layout A: 2x2 registers, 4x8 threads, 2x1 warps
    auto layout_a = BlockedLayout<16>::create_row_major(2, 2, 4, 8, 2, 1);
    
    // Layout B: Same configuration but column-major (less efficient for row-major data)
    auto layout_b = BlockedLayout<16>::create_column_major(2, 2, 4, 8, 2, 1);
    
    std::cout << "Layout A (row-major friendly):\n";
    std::cout << "  Coalesced: " << (layout_a.is_coalesced() ? "Yes" : "No") << "\n";
    std::cout << "  Vectorization: " << layout_a.vectorization_factor() << " elements\n";
    
    std::cout << "Layout B (column-major):\n";
    std::cout << "  Coalesced: " << (layout_b.is_coalesced() ? "Yes" : "No") << "\n";
    std::cout << "  Vectorization: " << layout_b.vectorization_factor() << " elements\n";
    
    // Show how different threads access memory
    std::cout << "\nMemory access pattern for first few threads:\n";
    for (int thread = 0; thread < 4; thread++) {
        for (int reg = 0; reg < 4; reg++) {
            HardwareCoord coord(reg, thread, 0);
            auto coord_a = layout_a.apply(coord);
            auto coord_b = layout_b.apply(coord);
            
            std::cout << "Thread " << thread << ", Reg " << reg << ": ";
            std::cout << "A(" << coord_a.x << "," << coord_a.y << ") ";
            std::cout << "B(" << coord_b.x << "," << coord_b.y << ")\n";
        }
    }
}

void test_hardware_compatibility() {
    std::cout << "\n=== Testing Hardware Compatibility ===\n";
    
    // Check compute capability
    int device;
    cudaGetDevice(&device);
    
    cudaDeviceProp prop;
    cudaGetDeviceProperties(&prop, device);
    
    std::cout << "GPU: " << prop.name << "\n";
    std::cout << "Compute Capability: " << prop.major << "." << prop.minor << "\n";
    std::cout << "SM Count: " << prop.multiProcessorCount << "\n";
    std::cout << "Max Threads per Block: " << prop.maxThreadsPerBlock << "\n";
    std::cout << "Shared Memory per Block: " << prop.sharedMemPerBlock / 1024 << " KB\n";
    
    // Test MMA support
    bool supports_mma = MMA16x8x16_C::is_supported_on_sm(prop.major * 10 + prop.minor);
    bool supports_wgmma = WGMMA64x256x8_C::is_supported_on_sm(prop.major * 10 + prop.minor);
    
    std::cout << "Supports MMA (Tensor Cores): " << (supports_mma ? "Yes" : "No") << "\n";
    std::cout << "Supports WGMMA: " << (supports_wgmma ? "Yes" : "No") << "\n";
    
    // Test layout properties
    auto blocked = BlockedLayout<16>::create_for_shape(prop.maxThreadsPerBlock / 32, 16);
    std::cout << "Optimal blocked layout vectorization: " << blocked.vectorization_factor() << "\n";
}

int main() {
    std::cout << "Linear Layouts CUDA Implementation Demo\n";
    std::cout << "========================================\n";
    
    // Initialize CUDA
    int device_count;
    cudaGetDeviceCount(&device_count);
    
    if (device_count == 0) {
        std::cerr << "No CUDA devices found!\n";
        return -1;
    }
    
    std::cout << "Found " << device_count << " CUDA device(s)\n";
    
    // Run tests
    try {
        test_binary_matrix_operations();
        test_layout_operations();
        test_hardware_compatibility();
        demonstrate_paper_example();
        benchmark_layout_conversions();
        
        std::cout << "\n=== All Tests Completed Successfully ===\n";
        
    } catch (const std::exception& e) {
        std::cerr << "Error: " << e.what() << "\n";
        return -1;
    }
    
    return 0;
}