#pragma once

#include "binary_matrix.cuh"
#include <type_traits>
#include <tuple>

namespace linear_layouts {

/**
 * Labeled vector space dimensions for GPU programming
 * These represent the hardware hierarchy in GPU computation
 */
enum class Dimension {
    REG,     // Register dimension
    THREAD,  // Thread dimension  
    WARP,    // Warp dimension
    BLOCK,   // Block dimension (for future extension)
    TENSOR   // Logical tensor dimension
};

/**
 * Template for labeled vector spaces
 * Associates dimensions with their sizes
 */
template<Dimension DIM, int SIZE>
struct LabeledSpace {
    static constexpr Dimension dimension = DIM;
    static constexpr int size = SIZE;
    static constexpr int log2_size = SIZE > 0 ? __builtin_ctz(SIZE) : 0;
    
    static_assert((SIZE & (SIZE - 1)) == 0, "Size must be power of 2");
};

// Common labeled spaces
using RegSpace = LabeledSpace<Dimension::REG, 8>;      // 2^8 = 256 registers
using ThreadSpace = LabeledSpace<Dimension::THREAD, 10>; // 2^10 = 1024 threads  
using WarpSpace = LabeledSpace<Dimension::WARP, 5>;    // 2^5 = 32 warps
using TensorSpace = LabeledSpace<Dimension::TENSOR, 16>; // 2^16 logical elements

/**
 * Hardware coordinate structure
 * Represents a point in the (reg, thread, warp) space
 */
struct HardwareCoord {
    uint32_t reg_idx : 8;
    uint32_t thread_idx : 10; 
    uint32_t warp_idx : 5;
    uint32_t reserved : 9;

    __host__ __device__ HardwareCoord(uint8_t r = 0, uint16_t t = 0, uint8_t w = 0) 
        : reg_idx(r), thread_idx(t), warp_idx(w), reserved(0) {}

    __host__ __device__ uint32_t as_uint32() const {
        return *reinterpret_cast<const uint32_t*>(this);
    }

    __host__ __device__ uint64_t as_vector() const {
        uint64_t result = 0;
        result |= static_cast<uint64_t>(reg_idx);
        result |= static_cast<uint64_t>(thread_idx) << 8;
        result |= static_cast<uint64_t>(warp_idx) << 18;
        return result;
    }
};

/**
 * Logical tensor coordinate
 * Represents position in the logical tensor space
 */
struct TensorCoord {
    int x, y, z, w; // Up to 4D tensors

    __host__ __device__ TensorCoord(int x_ = 0, int y_ = 0, int z_ = 0, int w_ = 0)
        : x(x_), y(y_), z(z_), w(w_) {}

    __host__ __device__ uint64_t linearize(int dim_x, int dim_y = 1, int dim_z = 1, int dim_w = 1) const {
        return static_cast<uint64_t>(x) + 
               static_cast<uint64_t>(y) * dim_x +
               static_cast<uint64_t>(z) * dim_x * dim_y +
               static_cast<uint64_t>(w) * dim_x * dim_y * dim_z;
    }
};

/**
 * Linear Layout class
 * Models tensor layouts as linear maps between labeled vector spaces over F₂
 * 
 * Template parameters:
 * - INPUT_DIMS: Total input dimension (sum of reg, thread, warp bits)
 * - OUTPUT_DIMS: Total output dimension (logical tensor bits)
 * - REG_BITS: Number of register bits
 * - THREAD_BITS: Number of thread bits  
 * - WARP_BITS: Number of warp bits
 */
template<int INPUT_DIMS, int OUTPUT_DIMS, int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
class LinearLayout {
public:
    static constexpr int TOTAL_INPUT_BITS = REG_BITS + THREAD_BITS + WARP_BITS;
    static_assert(INPUT_DIMS >= TOTAL_INPUT_BITS, "Input dimension too small");
    static_assert(REG_BITS > 0 && THREAD_BITS > 0 && WARP_BITS > 0, "All dimensions must be positive");

private:
    BinaryMatrix<OUTPUT_DIMS, INPUT_DIMS> matrix_;
    
    // Dimension offsets for labeled access
    static constexpr int REG_OFFSET = 0;
    static constexpr int THREAD_OFFSET = REG_BITS;
    static constexpr int WARP_OFFSET = REG_BITS + THREAD_BITS;

public:
    __host__ __device__ LinearLayout() = default;

    __host__ __device__ LinearLayout(const BinaryMatrix<OUTPUT_DIMS, INPUT_DIMS>& matrix)
        : matrix_(matrix) {}

    // Apply the linear layout to a hardware coordinate
    __host__ __device__ uint64_t apply(const HardwareCoord& coord) const {
        uint64_t input_vector = coord.as_vector();
        return matrix_.multiply_vector(input_vector);
    }

    // Apply to explicit register, thread, warp indices
    __host__ __device__ uint64_t apply(uint8_t reg, uint16_t thread, uint8_t warp) const {
        return apply(HardwareCoord(reg, thread, warp));
    }

    // Get the underlying matrix
    __host__ __device__ const BinaryMatrix<OUTPUT_DIMS, INPUT_DIMS>& matrix() const {
        return matrix_;
    }

    __host__ __device__ BinaryMatrix<OUTPUT_DIMS, INPUT_DIMS>& matrix() {
        return matrix_;
    }

    // Extract sub-layout for specific dimension
    template<Dimension DIM>
    __host__ __device__ auto extract_sublayout() const {
        if constexpr (DIM == Dimension::REG) {
            BinaryMatrix<OUTPUT_DIMS, REG_BITS> sub_matrix;
            for (int i = 0; i < OUTPUT_DIMS; i++) {
                for (int j = 0; j < REG_BITS; j++) {
                    sub_matrix.set_bit(i, j, matrix_.get_bit(i, REG_OFFSET + j));
                }
            }
            return sub_matrix;
        } else if constexpr (DIM == Dimension::THREAD) {
            BinaryMatrix<OUTPUT_DIMS, THREAD_BITS> sub_matrix;
            for (int i = 0; i < OUTPUT_DIMS; i++) {
                for (int j = 0; j < THREAD_BITS; j++) {
                    sub_matrix.set_bit(i, j, matrix_.get_bit(i, THREAD_OFFSET + j));
                }
            }
            return sub_matrix;
        } else if constexpr (DIM == Dimension::WARP) {
            BinaryMatrix<OUTPUT_DIMS, WARP_BITS> sub_matrix;
            for (int i = 0; i < OUTPUT_DIMS; i++) {
                for (int j = 0; j < WARP_BITS; j++) {
                    sub_matrix.set_bit(i, j, matrix_.get_bit(i, WARP_OFFSET + j));
                }
            }
            return sub_matrix;
        }
    }

    // Set bits for a specific dimension
    template<Dimension DIM>
    __host__ __device__ void set_dimension_bits(int output_bit, int input_bit, bool value = true) {
        int offset;
        int max_bits;
        
        if constexpr (DIM == Dimension::REG) {
            offset = REG_OFFSET;
            max_bits = REG_BITS;
        } else if constexpr (DIM == Dimension::THREAD) {
            offset = THREAD_OFFSET;
            max_bits = THREAD_BITS;
        } else if constexpr (DIM == Dimension::WARP) {
            offset = WARP_OFFSET;
            max_bits = WARP_BITS;
        }
        
        assert(input_bit >= 0 && input_bit < max_bits);
        matrix_.set_bit(output_bit, offset + input_bit, value);
    }

    // Check if layout has identity mapping for a dimension
    template<Dimension DIM>
    __host__ __device__ bool is_identity_for_dimension() const {
        auto sub_matrix = extract_sublayout<DIM>();
        
        if constexpr (DIM == Dimension::REG) {
            auto identity = BinaryMatrix<OUTPUT_DIMS, REG_BITS>::identity();
            // Check if first REG_BITS columns match identity
            for (int i = 0; i < std::min(OUTPUT_DIMS, REG_BITS); i++) {
                for (int j = 0; j < REG_BITS; j++) {
                    if (sub_matrix.get_bit(i, j) != identity.get_bit(i, j)) {
                        return false;
                    }
                }
            }
            return true;
        }
        return false; // Simplified for other dimensions
    }

    // Compute number of contiguous elements (for vectorization)
    __host__ __device__ int contiguous_elements() const {
        auto reg_matrix = extract_sublayout<Dimension::REG>();
        
        // Find largest u such that reg_matrix(i) = i for i < u
        int contiguous = 0;
        for (int i = 0; i < std::min(OUTPUT_DIMS, REG_BITS); i++) {
            bool is_identity_row = true;
            for (int j = 0; j < REG_BITS; j++) {
                bool expected = (j == i);
                if (reg_matrix.get_bit(i, j) != expected) {
                    is_identity_row = false;
                    break;
                }
            }
            if (is_identity_row) {
                contiguous++;
            } else {
                break;
            }
        }
        
        return contiguous > 0 ? (1 << contiguous) : 1;
    }

    // Check if this is a valid distributed layout
    __host__ __device__ bool is_distributed_layout() const {
        // Each column should have at most one non-zero bit
        // No two non-zero columns should be identical
        for (int j = 0; j < INPUT_DIMS; j++) {
            int non_zero_count = 0;
            for (int i = 0; i < OUTPUT_DIMS; i++) {
                if (matrix_.get_bit(i, j)) {
                    non_zero_count++;
                }
            }
            if (non_zero_count > 1) {
                return false; // More than one bit set in column
            }
        }
        return true;
    }

    // Check if this is a valid memory layout  
    __host__ __device__ bool is_memory_layout() const {
        // Each column should have either 1 or 2 non-zero bits
        for (int j = 0; j < INPUT_DIMS; j++) {
            int non_zero_count = 0;
            for (int i = 0; i < OUTPUT_DIMS; i++) {
                if (matrix_.get_bit(i, j)) {
                    non_zero_count++;
                }
            }
            if (non_zero_count == 0 || non_zero_count > 2) {
                return false;
            }
        }
        return true;
    }

    // Compute inverse (for surjective layouts)
    __host__ __device__ LinearLayout<OUTPUT_DIMS, INPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS> inverse() const {
        auto inv_matrix = matrix_.right_inverse();
        return LinearLayout<OUTPUT_DIMS, INPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>(inv_matrix);
    }

    // Check if can be optimized with warp shuffles
    __host__ __device__ bool can_use_warp_shuffles() const {
        auto warp_matrix = extract_sublayout<Dimension::WARP>();
        return warp_matrix.is_zero(); // No inter-warp data movement
    }

    // Find broadcasting patterns (zero columns in matrix)
    __host__ __device__ uint64_t find_broadcast_mask() const {
        uint64_t broadcast_mask = 0;
        
        for (int j = 0; j < INPUT_DIMS && j < 64; j++) {
            bool is_zero_column = true;
            for (int i = 0; i < OUTPUT_DIMS; i++) {
                if (matrix_.get_bit(i, j)) {
                    is_zero_column = false;
                    break;
                }
            }
            if (is_zero_column) {
                broadcast_mask |= (1ULL << j);
            }
        }
        
        return broadcast_mask;
    }

    // Create identity layout
    __host__ __device__ static LinearLayout identity() {
        auto id_matrix = BinaryMatrix<OUTPUT_DIMS, INPUT_DIMS>::identity();
        return LinearLayout(id_matrix);
    }

    // Create blocked layout with given tile sizes
    __host__ __device__ static LinearLayout blocked_layout(
        int reg_tile_x, int reg_tile_y,
        int thread_tile_x, int thread_tile_y,
        int warp_tile_x, int warp_tile_y) {
        
        LinearLayout layout;
        
        // Set up the matrix according to blocked layout pattern
        // This follows the pattern from the paper's motivating example
        
        int bit_idx = 0;
        
        // Register bits (fastest changing)
        for (int i = 0; i < __builtin_ctz(reg_tile_y); i++) {
            layout.template set_dimension_bits<Dimension::REG>(i, bit_idx++);
        }
        for (int i = 0; i < __builtin_ctz(reg_tile_x); i++) {
            layout.template set_dimension_bits<Dimension::REG>(OUTPUT_DIMS/2 + i, bit_idx++);
        }
        
        // Thread bits
        for (int i = 0; i < __builtin_ctz(thread_tile_y); i++) {
            layout.template set_dimension_bits<Dimension::THREAD>(
                __builtin_ctz(reg_tile_y) + i, bit_idx++);
        }
        for (int i = 0; i < __builtin_ctz(thread_tile_x); i++) {
            layout.template set_dimension_bits<Dimension::THREAD>(
                OUTPUT_DIMS/2 + __builtin_ctz(reg_tile_x) + i, bit_idx++);
        }
        
        // Warp bits (slowest changing)
        for (int i = 0; i < __builtin_ctz(warp_tile_y); i++) {
            layout.template set_dimension_bits<Dimension::WARP>(
                __builtin_ctz(reg_tile_y) + __builtin_ctz(thread_tile_y) + i, bit_idx++);
        }
        for (int i = 0; i < __builtin_ctz(warp_tile_x); i++) {
            layout.template set_dimension_bits<Dimension::WARP>(
                OUTPUT_DIMS/2 + __builtin_ctz(reg_tile_x) + __builtin_ctz(thread_tile_x) + i, bit_idx++);
        }
        
        return layout;
    }

    // Debug print
    __host__ void print() const {
        printf("Linear Layout %dx%d:\n", OUTPUT_DIMS, INPUT_DIMS);
        printf("REG[%d:%d] THREAD[%d:%d] WARP[%d:%d]\n", 
               REG_OFFSET, REG_OFFSET + REG_BITS - 1,
               THREAD_OFFSET, THREAD_OFFSET + THREAD_BITS - 1, 
               WARP_OFFSET, WARP_OFFSET + WARP_BITS - 1);
        matrix_.print();
    }
};

// Common layout type aliases for typical GPU configurations
using Layout8x8 = LinearLayout<8, 8>;     // 8-bit input/output
using Layout16x16 = LinearLayout<16, 16>; // 16-bit input/output  
using Layout32x32 = LinearLayout<32, 32>; // 32-bit input/output

// Typical hardware configuration (2^8 regs, 2^10 threads, 2^5 warps -> 2^16 tensor)
using StandardLayout = LinearLayout<16, 23, 8, 10, 5>;

} // namespace linear_layouts