#pragma once

#include "../linear_layouts_core/linear_layout.cuh"
#include "../linear_layouts_core/layout_operations.cuh"

namespace linear_layouts {

/**
 * MMA Layout Implementation
 * 
 * MMA layouts are specialized for matrix multiplication operations
 * using Tensor Cores (mma, wgmma instructions) on NVIDIA GPUs.
 * These layouts optimize data arrangement for maximum throughput
 * on hardware matrix multiplication units.
 * 
 * Supports:
 * - mma.sync (Ampere/Ada): 16x8x16, 16x8x8 operations
 * - wgmma (Hopper+): 64x256x8, 64x128x16 operations
 * - Mixed precision: FP16, BF16, INT8, FP8, MXFP4
 */

enum class MMAVariant {
    MMA_16x8x16,   // Standard mma.sync on Ampere/Ada
    MMA_16x8x8,    // Half-precision mma.sync
    WGMMA_64x256x8, // wgmma on Hopper (SM_90+)
    WGMMA_64x128x16, // wgmma variant
    WGMMA_64x64x32   // Small wgmma variant
};

enum class MMAOperand {
    A_MATRIX,    // Left operand (M x K)
    B_MATRIX,    // Right operand (K x N)  
    C_MATRIX     // Output/accumulator (M x N)
};

template<MMAVariant VARIANT = MMAVariant::MMA_16x8x16, 
         MMAOperand OPERAND = MMAOperand::C_MATRIX,
         int OUTPUT_DIMS = 16, int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
class MMALayout {
public:
    static constexpr int INPUT_DIMS = REG_BITS + THREAD_BITS + WARP_BITS;
    using LayoutType = LinearLayout<INPUT_DIMS, OUTPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>;

private:
    LayoutType layout_;
    
    struct MMAConfig {
        int m_dim, n_dim, k_dim;        // Matrix dimensions
        int regs_per_thread;            // Registers required per thread
        int threads_per_warp;           // Threads participating (usually 32)
        int warps_per_warpgroup;        // Warps in warp group (1 for mma, 4 for wgmma)
        int element_bitwidth;           // Bits per element
    };
    
    MMAConfig config_;

public:
    __host__ __device__ MMALayout(int element_bitwidth = 16) {
        // Initialize configuration based on variant and operand
        initialize_config(element_bitwidth);
        
        // Build the layout matrix  
        build_layout_matrix();
    }

private:
    __host__ __device__ void initialize_config(int element_bitwidth) {
        config_.element_bitwidth = element_bitwidth;
        config_.threads_per_warp = 32;
        
        if constexpr (VARIANT == MMAVariant::MMA_16x8x16) {
            config_.m_dim = 16;
            config_.n_dim = 8; 
            config_.k_dim = 16;
            config_.warps_per_warpgroup = 1;
            
            if constexpr (OPERAND == MMAOperand::A_MATRIX) {
                config_.regs_per_thread = (32 / element_bitwidth) * 2; // 16x16 / 32 threads
            } else if constexpr (OPERAND == MMAOperand::B_MATRIX) {
                config_.regs_per_thread = (32 / element_bitwidth) * 1; // 8x16 / 32 threads  
            } else { // C_MATRIX
                config_.regs_per_thread = (32 / element_bitwidth) * 4; // 16x8 / 32 threads
            }
        }
        else if constexpr (VARIANT == MMAVariant::WGMMA_64x256x8) {
            config_.m_dim = 64;
            config_.n_dim = 256;
            config_.k_dim = 8;
            config_.warps_per_warpgroup = 4;
            
            if constexpr (OPERAND == MMAOperand::A_MATRIX) {
                config_.regs_per_thread = (32 / element_bitwidth) * 16; // 64x8 / 128 threads
            } else if constexpr (OPERAND == MMAOperand::B_MATRIX) {
                config_.regs_per_thread = 0; // Loaded from shared memory directly
            } else { // C_MATRIX
                config_.regs_per_thread = (32 / element_bitwidth) * 64; // 64x256 / 128 threads
            }
        }
        // Add other variants as needed...
    }

    __host__ __device__ void build_layout_matrix() {
        layout_ = LayoutType();
        
        if constexpr (VARIANT == MMAVariant::MMA_16x8x16) {
            build_mma_16x8x16_layout();
        } else if constexpr (VARIANT == MMAVariant::WGMMA_64x256x8) {
            build_wgmma_64x256x8_layout();
        }
        // Add other variants...
    }

    __host__ __device__ void build_mma_16x8x16_layout() {
        // Based on the mma.sync.aligned instruction specification
        // and the Linear Layouts paper appendix
        
        if constexpr (OPERAND == MMAOperand::C_MATRIX) {
            // Output matrix C: 16x8 elements distributed across 32 threads
            // Each thread gets 4 elements (2x2 sub-tile)
            
            // Bit allocation for 16x8 = 128 elements = 7 bits
            // Elements are distributed in a specific pattern for mma instruction
            
            int bit_idx = 0;
            
            // Register bits: Each thread handles 2x2 = 4 elements
            layout_.template set_dimension_bits<Dimension::REG>(0, bit_idx++); // reg[0] -> j[0]
            layout_.template set_dimension_bits<Dimension::REG>(4, bit_idx++); // reg[1] -> i[0] 
            
            // Thread bits: 32 threads in specific pattern
            layout_.template set_dimension_bits<Dimension::THREAD>(1, bit_idx++); // thread[0] -> j[1]
            layout_.template set_dimension_bits<Dimension::THREAD>(2, bit_idx++); // thread[1] -> j[2]  
            layout_.template set_dimension_bits<Dimension::THREAD>(5, bit_idx++); // thread[2] -> i[1]
            layout_.template set_dimension_bits<Dimension::THREAD>(6, bit_idx++); // thread[3] -> i[2]
            layout_.template set_dimension_bits<Dimension::THREAD>(7, bit_idx++); // thread[4] -> i[3]
            
            // Additional register bits for full 4 elements per thread
            layout_.template set_dimension_bits<Dimension::REG>(3, bit_idx++); // reg[1] -> j[3]
        }
        else if constexpr (OPERAND == MMAOperand::A_MATRIX) {
            // Input matrix A: 16x16 elements for reduction dimension
            // Distributed across threads for optimal data reuse
            
            int bit_idx = 0;
            
            // Register bits for k-dimension (inner product)
            int regs_for_k = __builtin_ctz(32 / config_.element_bitwidth);
            for (int i = 0; i < regs_for_k; i++) {
                layout_.template set_dimension_bits<Dimension::REG>(i, bit_idx++);
            }
            
            // Thread bits for m-dimension distribution
            layout_.template set_dimension_bits<Dimension::THREAD>(regs_for_k, bit_idx++);
            layout_.template set_dimension_bits<Dimension::THREAD>(regs_for_k + 1, bit_idx++);
            layout_.template set_dimension_bits<Dimension::THREAD>(regs_for_k + 2, bit_idx++);
            
            // Additional register bit for m-dimension
            layout_.template set_dimension_bits<Dimension::REG>(regs_for_k, bit_idx++);
        }
        else if constexpr (OPERAND == MMAOperand::B_MATRIX) {
            // Input matrix B: 16x8 elements (transposed layout of A)
            // Similar to A but with different dimension mapping
            
            int bit_idx = 0;
            
            // Register bits for k-dimension
            int regs_for_k = __builtin_ctz(32 / config_.element_bitwidth);
            for (int i = 0; i < regs_for_k; i++) {
                layout_.template set_dimension_bits<Dimension::REG>(OUTPUT_DIMS/2 + i, bit_idx++);
            }
            
            // Thread bits for n-dimension distribution  
            layout_.template set_dimension_bits<Dimension::THREAD>(regs_for_k, bit_idx++);
            layout_.template set_dimension_bits<Dimension::THREAD>(regs_for_k + 1, bit_idx++);
            layout_.template set_dimension_bits<Dimension::THREAD>(regs_for_k + 2, bit_idx++);
        }
    }

    __host__ __device__ void build_wgmma_64x256x8_layout() {
        // WGMMA instruction operates on warp groups (4 warps = 128 threads)
        // Larger tiles enable higher throughput
        
        if constexpr (OPERAND == MMAOperand::C_MATRIX) {
            // 64x256 output distributed across 128 threads
            // Each thread handles multiple elements
            
            int bit_idx = 0;
            
            // More complex register distribution for larger tiles
            for (int i = 0; i < 6; i++) { // 64 elements per register dimension
                layout_.template set_dimension_bits<Dimension::REG>(i, bit_idx++);
            }
            
            // Thread distribution across warp group
            for (int i = 0; i < 7; i++) { // 128 threads
                layout_.template set_dimension_bits<Dimension::THREAD>(6 + i, bit_idx++);
            }
            
            // Warp group coverage
            layout_.template set_dimension_bits<Dimension::WARP>(13, bit_idx++);
            layout_.template set_dimension_bits<Dimension::WARP>(14, bit_idx++);
        }
        // A and B matrices for WGMMA typically use TMA (Tensor Memory Accelerator)
        // and have different layout requirements
    }

public:
    // Get the underlying linear layout
    __host__ __device__ const LayoutType& get_layout() const {
        return layout_;
    }
    
    __host__ __device__ LayoutType& get_layout() {
        return layout_;
    }

    // Apply layout to hardware coordinates  
    __host__ __device__ TensorCoord apply(const HardwareCoord& coord) const {
        uint64_t result = layout_.apply(coord);
        
        // Decode based on matrix dimensions
        int m_bits = __builtin_ctz(config_.m_dim);
        int n_bits = __builtin_ctz(config_.n_dim);
        
        uint32_t m_mask = (1U << m_bits) - 1;
        uint32_t n_mask = (1U << n_bits) - 1;
        
        int m = static_cast<int>(result & m_mask);
        int n = static_cast<int>((result >> m_bits) & n_mask);
        
        return TensorCoord(n, m); // Note: may need transpose depending on layout
    }

    // Get matrix configuration
    __host__ __device__ const MMAConfig& get_config() const {
        return config_;
    }

    // Check if this layout is compatible with specific MMA instruction
    __host__ __device__ bool is_compatible_with_instruction(const char* instruction_name) const {
        if constexpr (VARIANT == MMAVariant::MMA_16x8x16) {
            return strncmp(instruction_name, "mma.sync", 8) == 0;
        } else if constexpr (VARIANT == MMAVariant::WGMMA_64x256x8) {
            return strncmp(instruction_name, "wgmma", 5) == 0;
        }
        return false;
    }

    // Get required shared memory layout for operand loading
    __host__ __device__ auto get_shared_memory_layout() const {
        // Return appropriate swizzled layout for this MMA operand
        // This reduces bank conflicts when loading from shared memory
        
        if constexpr (OPERAND == MMAOperand::A_MATRIX) {
            // A matrix needs row-major layout with swizzling
            return create_mma_swizzled_layout(config_.m_dim, config_.k_dim);
        } else if constexpr (OPERAND == MMAOperand::B_MATRIX) {
            // B matrix needs column-major layout with swizzling  
            return create_mma_swizzled_layout(config_.k_dim, config_.n_dim);
        } else {
            // C matrix doesn't typically use shared memory
            return LayoutType::identity();
        }
    }

private:
    __host__ __device__ LayoutType create_mma_swizzled_layout(int rows, int cols) const {
        // Create MMA-compatible swizzled layout
        // Based on the paper's MMA swizzling definition
        
        LayoutType swizzled_layout;
        
        // Parameters for MMA swizzling (from paper)
        int vec = 4;           // Vectorization factor
        int per_phase = 8;     // Elements per phase
        int max_phase = 4;     // Maximum phases
        
        // Build swizzle matrix according to the formula:
        // ((i/per_phase) % max_phase) ⊕ (j/vec)) * vec ⊕ (j % vec)
        
        for (int i = 0; i < rows; i++) {
            for (int j = 0; j < cols; j++) {
                int phase = (i / per_phase) % max_phase;
                int vec_group = j / vec;
                int vec_offset = j % vec;
                
                int swizzled_offset = (phase ^ vec_group) * vec + vec_offset;
                int linear_idx = i * cols + swizzled_offset;
                
                // Set appropriate bits in the layout matrix
                for (int bit = 0; bit < OUTPUT_DIMS; bit++) {
                    if (linear_idx & (1 << bit)) {
                        // Map this bit to appropriate input dimension
                        if (bit < REG_BITS) {
                            swizzled_layout.template set_dimension_bits<Dimension::REG>(bit, bit);
                        } else if (bit < REG_BITS + THREAD_BITS) {
                            swizzled_layout.template set_dimension_bits<Dimension::THREAD>(bit, bit);
                        } else {
                            swizzled_layout.template set_dimension_bits<Dimension::WARP>(bit, bit);
                        }
                    }
                }
            }
        }
        
        return swizzled_layout;
    }

public:
    // Convert between MMA layouts (e.g., A -> C, B -> C)
    template<MMAOperand TARGET_OPERAND>
    __host__ __device__ auto convert_to() const {
        MMALayout<VARIANT, TARGET_OPERAND, OUTPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS> target;
        return compose(target.get_layout().inverse(), layout_);
    }

    // Check if conversion can use warp shuffles
    template<MMAOperand TARGET_OPERAND>
    __host__ __device__ bool can_shuffle_to() const {
        MMALayout<VARIANT, TARGET_OPERAND, OUTPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS> target;
        return can_convert_via_warp_shuffle(layout_, target.get_layout());
    }

    // Get register usage per thread
    __host__ __device__ int registers_per_thread() const {
        return config_.regs_per_thread;
    }

    // Get memory bandwidth utilization
    __host__ __device__ float memory_utilization() const {
        // Compute effective bandwidth based on layout efficiency
        int total_elements = config_.m_dim * config_.n_dim;
        int total_threads = config_.threads_per_warp * config_.warps_per_warpgroup;
        
        return static_cast<float>(total_elements) / (total_threads * config_.regs_per_thread);
    }

    // Create common MMA layout variants
    __host__ __device__ static MMALayout create_ampere_mma16x8x16() {
        return MMALayout<MMAVariant::MMA_16x8x16, OPERAND>();
    }

    __host__ __device__ static MMALayout create_hopper_wgmma64x256x8() {
        return MMALayout<MMAVariant::WGMMA_64x256x8, OPERAND>();
    }

    // Check hardware support
    __host__ __device__ static bool is_supported_on_sm(int compute_capability) {
        if constexpr (VARIANT == MMAVariant::MMA_16x8x16) {
            return compute_capability >= 80; // Ampere+
        } else if constexpr (VARIANT == MMAVariant::WGMMA_64x256x8) {
            return compute_capability >= 90; // Hopper+
        }
        return false;
    }

    // Debug information
    __host__ void print_info() const {
        printf("MMA Layout Configuration:\n");
        printf("  Variant: ");
        if constexpr (VARIANT == MMAVariant::MMA_16x8x16) printf("MMA_16x8x16\n");
        else if constexpr (VARIANT == MMAVariant::WGMMA_64x256x8) printf("WGMMA_64x256x8\n");
        
        printf("  Operand: ");
        if constexpr (OPERAND == MMAOperand::A_MATRIX) printf("A_MATRIX\n");
        else if constexpr (OPERAND == MMAOperand::B_MATRIX) printf("B_MATRIX\n");
        else printf("C_MATRIX\n");
        
        printf("  Matrix dimensions: %dx%dx%d\n", config_.m_dim, config_.n_dim, config_.k_dim);
        printf("  Registers per thread: %d\n", config_.regs_per_thread);
        printf("  Threads per warp: %d\n", config_.threads_per_warp);
        printf("  Warps per warpgroup: %d\n", config_.warps_per_warpgroup);
        printf("  Element bitwidth: %d\n", config_.element_bitwidth);
        printf("  Memory utilization: %.2f\n", memory_utilization());
        printf("  Matrix representation:\n");
        layout_.print();
    }
};

// Common MMA layout type aliases
using MMA16x8x16_C = MMALayout<MMAVariant::MMA_16x8x16, MMAOperand::C_MATRIX>;
using MMA16x8x16_A = MMALayout<MMAVariant::MMA_16x8x16, MMAOperand::A_MATRIX>;
using MMA16x8x16_B = MMALayout<MMAVariant::MMA_16x8x16, MMAOperand::B_MATRIX>;

using WGMMA64x256x8_C = MMALayout<MMAVariant::WGMMA_64x256x8, MMAOperand::C_MATRIX>;
using WGMMA64x256x8_A = MMALayout<MMAVariant::WGMMA_64x256x8, MMAOperand::A_MATRIX>;

} // namespace linear_layouts