#pragma once

#include "../linear_layouts_core/linear_layout.cuh"
#include "../linear_layouts_core/layout_operations.cuh"

namespace linear_layouts {

/**
 * Blocked Layout Implementation
 * 
 * Blocked layouts tile tensors across registers, threads, and warps
 * in a hierarchical pattern. This is the most common layout type
 * for contiguous memory access patterns.
 * 
 * Based on the motivating example from the Linear Layouts paper:
 * A 16×16 tensor with 2×2 registers, 4×8 threads, and 2×1 warps
 */
template<int OUTPUT_DIMS = 16, int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
class BlockedLayout {
public:
    static constexpr int INPUT_DIMS = REG_BITS + THREAD_BITS + WARP_BITS;
    using LayoutType = LinearLayout<INPUT_DIMS, OUTPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>;

private:
    LayoutType layout_;
    
    // Tile configuration
    struct TileConfig {
        int reg_tile_x, reg_tile_y;       // Register tile dimensions
        int thread_tile_x, thread_tile_y; // Thread tile dimensions  
        int warp_tile_x, warp_tile_y;     // Warp tile dimensions
        int order[3];                     // Dimension order (0=x, 1=y, 2=z)
    };
    
    TileConfig config_;

public:
    /**
     * Constructor for blocked layout
     * 
     * @param reg_tile_x: Number of elements per thread in X dimension (power of 2)
     * @param reg_tile_y: Number of elements per thread in Y dimension (power of 2)
     * @param thread_tile_x: Number of threads per warp in X dimension (power of 2)
     * @param thread_tile_y: Number of threads per warp in Y dimension (power of 2)
     * @param warp_tile_x: Number of warps per block in X dimension (power of 2)
     * @param warp_tile_y: Number of warps per block in Y dimension (power of 2)
     * @param order: Array specifying dimension ordering (fastest to slowest)
     */
    __host__ __device__ BlockedLayout(
        int reg_tile_x = 2, int reg_tile_y = 2,
        int thread_tile_x = 4, int thread_tile_y = 8,
        int warp_tile_x = 2, int warp_tile_y = 1,
        const int* order = nullptr) {
        
        // Store configuration
        config_.reg_tile_x = reg_tile_x;
        config_.reg_tile_y = reg_tile_y;
        config_.thread_tile_x = thread_tile_x;
        config_.thread_tile_y = thread_tile_y;
        config_.warp_tile_x = warp_tile_x;
        config_.warp_tile_y = warp_tile_y;
        
        // Default order: X fastest, then Y, then Z
        if (order == nullptr) {
            config_.order[0] = 0; // X
            config_.order[1] = 1; // Y
            config_.order[2] = 2; // Z
        } else {
            config_.order[0] = order[0];
            config_.order[1] = order[1];
            config_.order[2] = order[2];
        }
        
        // Validate power-of-2 requirements
        assert((reg_tile_x & (reg_tile_x - 1)) == 0);
        assert((reg_tile_y & (reg_tile_y - 1)) == 0);
        assert((thread_tile_x & (thread_tile_x - 1)) == 0);
        assert((thread_tile_y & (thread_tile_y - 1)) == 0);
        assert((warp_tile_x & (warp_tile_x - 1)) == 0);
        assert((warp_tile_y & (warp_tile_y - 1)) == 0);
        
        // Build the layout matrix
        build_layout_matrix();
    }

private:
    __host__ __device__ void build_layout_matrix() {
        // Clear the layout matrix
        layout_ = LayoutType();
        
        // Bit positions for each dimension
        int reg_bits_x = __builtin_ctz(config_.reg_tile_x);
        int reg_bits_y = __builtin_ctz(config_.reg_tile_y);
        int thread_bits_x = __builtin_ctz(config_.thread_tile_x);
        int thread_bits_y = __builtin_ctz(config_.thread_tile_y);
        int warp_bits_x = __builtin_ctz(config_.warp_tile_x);
        int warp_bits_y = __builtin_ctz(config_.warp_tile_y);
        
        // Output dimension split (assuming 2D tensor for simplicity)
        int output_dim_x = OUTPUT_DIMS / 2;
        int output_dim_y = OUTPUT_DIMS - output_dim_x;
        
        // Build matrix according to the dimension order
        int reg_input_bit = 0;
        int thread_input_bit = 0;  
        int warp_input_bit = 0;
        
        for (int order_idx = 0; order_idx < 3; order_idx++) {
            int dim = config_.order[order_idx];
            
            if (dim == 0) { // X dimension
                // Register bits for X
                for (int i = 0; i < reg_bits_x; i++) {
                    layout_.template set_dimension_bits<Dimension::REG>(
                        i, reg_input_bit++);
                }
                
                // Thread bits for X  
                for (int i = 0; i < thread_bits_x; i++) {
                    layout_.template set_dimension_bits<Dimension::THREAD>(
                        reg_bits_x + i, thread_input_bit++);
                }
                
                // Warp bits for X
                for (int i = 0; i < warp_bits_x; i++) {
                    layout_.template set_dimension_bits<Dimension::WARP>(
                        reg_bits_x + thread_bits_x + i, warp_input_bit++);
                }
            }
            else if (dim == 1) { // Y dimension
                // Register bits for Y
                for (int i = 0; i < reg_bits_y; i++) {
                    layout_.template set_dimension_bits<Dimension::REG>(
                        output_dim_x + i, reg_input_bit++);
                }
                
                // Thread bits for Y
                for (int i = 0; i < thread_bits_y; i++) {
                    layout_.template set_dimension_bits<Dimension::THREAD>(
                        output_dim_x + reg_bits_y + i, thread_input_bit++);
                }
                
                // Warp bits for Y
                for (int i = 0; i < warp_bits_y; i++) {
                    layout_.template set_dimension_bits<Dimension::WARP>(
                        output_dim_x + reg_bits_y + thread_bits_y + i, warp_input_bit++);
                }
            }
            // Z dimension can be added for 3D tensors
        }
    }

public:
    // Get the underlying linear layout
    __host__ __device__ const LayoutType& get_layout() const {
        return layout_;
    }
    
    __host__ __device__ LayoutType& get_layout() {
        return layout_;
    }

    // Apply the layout to hardware coordinates
    __host__ __device__ TensorCoord apply(const HardwareCoord& coord) const {
        uint64_t result = layout_.apply(coord);
        
        // Extract X and Y coordinates from result
        int output_dim_x = OUTPUT_DIMS / 2;
        uint32_t x_mask = (1U << output_dim_x) - 1;
        uint32_t y_mask = (1U << (OUTPUT_DIMS - output_dim_x)) - 1;
        
        int x = static_cast<int>(result & x_mask);
        int y = static_cast<int>((result >> output_dim_x) & y_mask);
        
        return TensorCoord(x, y);
    }

    // Compute tensor element address for given hardware coordinate
    __host__ __device__ int tensor_address(uint8_t reg, uint16_t thread, uint8_t warp,
                                          int tensor_width, int tensor_height) const {
        auto coord = apply(HardwareCoord(reg, thread, warp));
        return coord.y * tensor_width + coord.x;
    }

    // Check if this layout enables coalesced memory access
    __host__ __device__ bool is_coalesced() const {
        // For coalesced access, the fastest-changing dimension should map to
        // consecutive memory addresses
        auto reg_matrix = layout_.template extract_sublayout<Dimension::REG>();
        
        // Check if register bits form identity mapping for fastest dimension
        int contiguous = layout_.contiguous_elements();
        return contiguous >= config_.reg_tile_x || contiguous >= config_.reg_tile_y;
    }

    // Get vectorization factor for memory operations
    __host__ __device__ int vectorization_factor() const {
        return layout_.contiguous_elements();
    }

    // Get number of elements per thread
    __host__ __device__ int elements_per_thread() const {
        return config_.reg_tile_x * config_.reg_tile_y;
    }

    // Get tile configuration
    __host__ __device__ const TileConfig& get_config() const {
        return config_;
    }

    // Create common blocked layout configurations
    __host__ __device__ static BlockedLayout create_row_major(
        int reg_x = 2, int reg_y = 2,
        int thread_x = 4, int thread_y = 8,
        int warp_x = 2, int warp_y = 1) {
        
        int order[3] = {0, 1, 2}; // X fastest (row-major)
        return BlockedLayout(reg_x, reg_y, thread_x, thread_y, warp_x, warp_y, order);
    }

    __host__ __device__ static BlockedLayout create_column_major(
        int reg_x = 2, int reg_y = 2,
        int thread_x = 4, int thread_y = 8,
        int warp_x = 2, int warp_y = 1) {
        
        int order[3] = {1, 0, 2}; // Y fastest (column-major)
        return BlockedLayout(reg_x, reg_y, thread_x, thread_y, warp_x, warp_y, order);
    }

    // Create layout optimized for specific tensor shapes
    __host__ __device__ static BlockedLayout create_for_shape(
        int tensor_width, int tensor_height,
        int preferred_vector_size = 4) {
        
        // Compute optimal tile sizes based on tensor shape
        int reg_x = std::min(preferred_vector_size, tensor_width);
        int reg_y = std::min(2, tensor_height);
        
        // Ensure power of 2
        reg_x = 1 << (__builtin_clz(reg_x) ^ 31);
        reg_y = 1 << (__builtin_clz(reg_y) ^ 31);
        
        // Default thread and warp configurations
        int thread_x = std::min(32, tensor_width / reg_x);
        int thread_y = std::min(32, tensor_height / reg_y);
        
        thread_x = 1 << (__builtin_clz(thread_x) ^ 31);
        thread_y = 1 << (__builtin_clz(thread_y) ^ 31);
        
        return create_row_major(reg_x, reg_y, thread_x, thread_y, 1, 1);
    }

    // Convert to another blocked layout  
    __host__ __device__ auto convert_to(const BlockedLayout& target) const {
        // For now, return identity transformation for same layout
        // TODO: implement proper layout conversion
        auto identity_matrix = BinaryMatrix<INPUT_DIMS, INPUT_DIMS>::identity();
        return LinearLayout<INPUT_DIMS, INPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>(identity_matrix);
    }

    // Check compatibility with hardware primitives
    __host__ __device__ bool supports_vectorized_load(int vector_size) const {
        return vectorization_factor() >= vector_size;
    }

    __host__ __device__ bool supports_ldmatrix() const {
        // ldmatrix requires specific 8x8 structure
        return elements_per_thread() >= 4 && 
               config_.thread_tile_x >= 4 && config_.thread_tile_y >= 8;
    }

    // Debug information
    __host__ void print_info() const {
        printf("BlockedLayout Configuration:\n");
        printf("  Register tile: %dx%d\n", config_.reg_tile_x, config_.reg_tile_y);
        printf("  Thread tile: %dx%d\n", config_.thread_tile_x, config_.thread_tile_y);
        printf("  Warp tile: %dx%d\n", config_.warp_tile_x, config_.warp_tile_y);
        printf("  Dimension order: [%d, %d, %d]\n", 
               config_.order[0], config_.order[1], config_.order[2]);
        printf("  Elements per thread: %d\n", elements_per_thread());
        printf("  Vectorization factor: %d\n", vectorization_factor());
        printf("  Coalesced access: %s\n", is_coalesced() ? "Yes" : "No");
        printf("  Matrix representation:\n");
        layout_.print();
    }
};

// Common blocked layout type aliases
using BlockedLayout8x8 = BlockedLayout<8>;
using BlockedLayout16x16 = BlockedLayout<16>;
using BlockedLayout32x32 = BlockedLayout<32>;

// Standard GPU configuration (256 regs, 1024 threads, 32 warps)
using StandardBlockedLayout = BlockedLayout<16, 8, 10, 5>;

} // namespace linear_layouts