#pragma once

#include "linear_layout.cuh"
#include <type_traits>

namespace linear_layouts {

/**
 * Layout Operations
 * Implements composition, product, left division, and other operations
 * from the Linear Layouts mathematical framework
 */

/**
 * Composition of two linear layouts: L2 ∘ L1
 * Maps U -> V -> W as (L2 ∘ L1)(u) = L2(L1(u))
 * 
 * In matrix form: (L2 ∘ L1) = M2 * M1
 */
template<int U_DIMS, int V_DIMS, int W_DIMS, 
         int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ auto compose(
    const LinearLayout<V_DIMS, W_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& L2,
    const LinearLayout<U_DIMS, V_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& L1) {
    
    auto result_matrix = L2.matrix() * L1.matrix();
    return LinearLayout<U_DIMS, W_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>(result_matrix);
}

/**
 * Product (Direct Sum) of two linear layouts: L1 × L2
 * Maps (U1 × U2) -> (V1 × V2) as (L1 × L2)(u1, u2) = (L1(u1), L2(u2))
 * 
 * In matrix form: Block diagonal matrix [M1  0 ]
 *                                       [0   M2]
 */
template<int U1_DIMS, int U2_DIMS, int V1_DIMS, int V2_DIMS,
         int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ auto product(
    const LinearLayout<U1_DIMS, V1_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& L1,
    const LinearLayout<U2_DIMS, V2_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& L2) {
    
    constexpr int TOTAL_U_DIMS = U1_DIMS + U2_DIMS;
    constexpr int TOTAL_V_DIMS = V1_DIMS + V2_DIMS;
    
    BinaryMatrix<TOTAL_V_DIMS, TOTAL_U_DIMS> result_matrix;
    
    // Copy L1 to top-left block
    for (int i = 0; i < V1_DIMS; i++) {
        for (int j = 0; j < U1_DIMS; j++) {
            result_matrix.set_bit(i, j, L1.matrix().get_bit(i, j));
        }
    }
    
    // Copy L2 to bottom-right block
    for (int i = 0; i < V2_DIMS; i++) {
        for (int j = 0; j < U2_DIMS; j++) {
            result_matrix.set_bit(V1_DIMS + i, U1_DIMS + j, L2.matrix().get_bit(i, j));
        }
    }
    
    return LinearLayout<TOTAL_U_DIMS, TOTAL_V_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>(result_matrix);
}

/**
 * Left Division: M ÷ M1 = M2 where M = [M1  0 ]
 *                                       [0   M2]
 * Returns M2 if the division is valid, otherwise returns invalid layout
 */
template<int M_ROWS, int M_COLS, int M1_ROWS, int M1_COLS,
         int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ auto left_divide(
    const LinearLayout<M_COLS, M_ROWS, REG_BITS, THREAD_BITS, WARP_BITS>& M,
    const LinearLayout<M1_COLS, M1_ROWS, REG_BITS, THREAD_BITS, WARP_BITS>& M1) {
    
    static_assert(M1_ROWS <= M_ROWS && M1_COLS <= M_COLS, "M1 must fit in M");
    
    constexpr int M2_ROWS = M_ROWS - M1_ROWS;
    constexpr int M2_COLS = M_COLS - M1_COLS;
    
    // Check if M has the block diagonal structure
    bool is_divisible = true;
    
    // Check top-left block matches M1
    for (int i = 0; i < M1_ROWS && is_divisible; i++) {
        for (int j = 0; j < M1_COLS && is_divisible; j++) {
            if (M.matrix().get_bit(i, j) != M1.matrix().get_bit(i, j)) {
                is_divisible = false;
            }
        }
    }
    
    // Check top-right block is zero
    for (int i = 0; i < M1_ROWS && is_divisible; i++) {
        for (int j = M1_COLS; j < M_COLS && is_divisible; j++) {
            if (M.matrix().get_bit(i, j)) {
                is_divisible = false;
            }
        }
    }
    
    // Check bottom-left block is zero
    for (int i = M1_ROWS; i < M_ROWS && is_divisible; i++) {
        for (int j = 0; j < M1_COLS && is_divisible; j++) {
            if (M.matrix().get_bit(i, j)) {
                is_divisible = false;
            }
        }
    }
    
    if (!is_divisible) {
        // Return zero layout to indicate failure
        BinaryMatrix<M2_ROWS, M2_COLS> zero_matrix;
        return std::make_pair(false, LinearLayout<M2_COLS, M2_ROWS, REG_BITS, THREAD_BITS, WARP_BITS>(zero_matrix));
    }
    
    // Extract M2 from bottom-right block
    BinaryMatrix<M2_ROWS, M2_COLS> M2_matrix;
    for (int i = 0; i < M2_ROWS; i++) {
        for (int j = 0; j < M2_COLS; j++) {
            M2_matrix.set_bit(i, j, M.matrix().get_bit(M1_ROWS + i, M1_COLS + j));
        }
    }
    
    return std::make_pair(true, LinearLayout<M2_COLS, M2_ROWS, REG_BITS, THREAD_BITS, WARP_BITS>(M2_matrix));
}

/**
 * Slice operation: removes a dimension from the layout
 * Used for reductions and broadcasts
 */
template<int INPUT_DIMS, int OUTPUT_DIMS, Dimension SLICE_DIM,
         int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ auto slice(
    const LinearLayout<OUTPUT_DIMS, INPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& layout,
    int slice_dim_size) {
    
    // For slicing, we remove columns corresponding to the sliced dimension
    constexpr int NEW_INPUT_DIMS = INPUT_DIMS - slice_dim_size;
    
    BinaryMatrix<OUTPUT_DIMS, NEW_INPUT_DIMS> sliced_matrix;
    
    int src_col = 0;
    int dst_col = 0;
    
    // Determine which columns to skip based on the dimension being sliced
    int skip_start, skip_end;
    if constexpr (SLICE_DIM == Dimension::REG) {
        skip_start = 0;
        skip_end = slice_dim_size;
    } else if constexpr (SLICE_DIM == Dimension::THREAD) {
        skip_start = REG_BITS;
        skip_end = REG_BITS + slice_dim_size;
    } else if constexpr (SLICE_DIM == Dimension::WARP) {
        skip_start = REG_BITS + THREAD_BITS;
        skip_end = REG_BITS + THREAD_BITS + slice_dim_size;
    }
    
    for (int j = 0; j < INPUT_DIMS; j++) {
        if (j >= skip_start && j < skip_end) {
            // Skip this column (it's being sliced)
            continue;
        }
        
        // Copy column to new position
        for (int i = 0; i < OUTPUT_DIMS; i++) {
            sliced_matrix.set_bit(i, dst_col, layout.matrix().get_bit(i, j));
        }
        dst_col++;
    }
    
    return LinearLayout<OUTPUT_DIMS, NEW_INPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>(sliced_matrix);
}

/**
 * Transpose operation for layout matrices
 */
template<int ROWS, int COLS, int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ auto transpose(
    const LinearLayout<ROWS, COLS, REG_BITS, THREAD_BITS, WARP_BITS>& layout) {
    
    auto transposed_matrix = layout.matrix().transpose();
    return LinearLayout<COLS, ROWS, REG_BITS, THREAD_BITS, WARP_BITS>(transposed_matrix);
}

/**
 * Reshape operation: changes the logical tensor dimensions
 * while preserving the linear mapping structure
 */
template<int NEW_OUTPUT_DIMS, int INPUT_DIMS, int OLD_OUTPUT_DIMS,
         int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ auto reshape(
    const LinearLayout<OLD_OUTPUT_DIMS, INPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& layout) {
    
    static_assert(NEW_OUTPUT_DIMS <= OLD_OUTPUT_DIMS, "Can only reshape to smaller or equal dimensions");
    
    BinaryMatrix<NEW_OUTPUT_DIMS, INPUT_DIMS> reshaped_matrix;
    
    // Copy the first NEW_OUTPUT_DIMS rows
    for (int i = 0; i < NEW_OUTPUT_DIMS; i++) {
        for (int j = 0; j < INPUT_DIMS; j++) {
            reshaped_matrix.set_bit(i, j, layout.matrix().get_bit(i, j));
        }
    }
    
    return LinearLayout<NEW_OUTPUT_DIMS, INPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>(reshaped_matrix);
}

/**
 * Broadcast operation: adds zero columns to enable broadcasting
 */
template<int OUTPUT_DIMS, int INPUT_DIMS, int BROADCAST_DIMS,
         int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ auto broadcast(
    const LinearLayout<OUTPUT_DIMS, INPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& layout,
    const int* broadcast_positions) {
    
    constexpr int NEW_INPUT_DIMS = INPUT_DIMS + BROADCAST_DIMS;
    BinaryMatrix<OUTPUT_DIMS, NEW_INPUT_DIMS> broadcast_matrix;
    
    int src_col = 0;
    for (int dst_col = 0; dst_col < NEW_INPUT_DIMS; dst_col++) {
        bool is_broadcast_col = false;
        for (int b = 0; b < BROADCAST_DIMS; b++) {
            if (broadcast_positions[b] == dst_col) {
                is_broadcast_col = true;
                break;
            }
        }
        
        if (is_broadcast_col) {
            // Leave column as zero (broadcasting)
            continue;
        } else {
            // Copy from original matrix
            for (int i = 0; i < OUTPUT_DIMS; i++) {
                broadcast_matrix.set_bit(i, dst_col, layout.matrix().get_bit(i, src_col));
            }
            src_col++;
        }
    }
    
    return LinearLayout<OUTPUT_DIMS, NEW_INPUT_DIMS, REG_BITS, THREAD_BITS, WARP_BITS>(broadcast_matrix);
}

/**
 * Check if two layouts can be converted via warp shuffles
 * Returns true if conversion requires only intra-warp data movement
 */
template<int DIMS, int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ bool can_convert_via_warp_shuffle(
    const LinearLayout<DIMS, DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& src_layout,
    const LinearLayout<DIMS, DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& dst_layout) {
    
    // Compute conversion matrix: dst^(-1) * src
    auto dst_inv = dst_layout.inverse();
    auto conversion = compose(dst_inv, src_layout);
    
    // Check if warp components are identity
    auto warp_matrix = conversion.template extract_sublayout<Dimension::WARP>();
    return warp_matrix.is_zero();
}

/**
 * Compute optimal swizzling parameters for bank conflict reduction
 * Returns swizzle matrix that minimizes conflicts while maximizing vectorization
 */
template<int DIMS, int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ auto compute_optimal_swizzle(
    const LinearLayout<DIMS, DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& read_layout,
    const LinearLayout<DIMS, DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& write_layout,
    int vec_bits = 2, // log2 of vectorization factor
    int bank_bits = 5) { // log2 of number of banks
    
    // Extract thread matrices for analysis
    auto read_threads = read_layout.template extract_sublayout<Dimension::THREAD>();
    auto write_threads = write_layout.template extract_sublayout<Dimension::THREAD>();
    
    // Create swizzle matrix to minimize bank conflicts
    // This is a simplified version of the optimal swizzling algorithm from the paper
    BinaryMatrix<DIMS, DIMS> swizzle_matrix = BinaryMatrix<DIMS, DIMS>::identity();
    
    // Apply XOR pattern for bank conflict reduction
    for (int i = vec_bits; i < DIMS && i < vec_bits + bank_bits; i++) {
        for (int j = 0; j < vec_bits; j++) {
            swizzle_matrix.set_bit(i, j, true); // XOR pattern
        }
    }
    
    return LinearLayout<DIMS, DIMS, REG_BITS, THREAD_BITS, WARP_BITS>(swizzle_matrix);
}

/**
 * Check hardware primitive compatibility
 * Determines if a layout can use specific SIMD instructions
 */
enum class HardwarePrimitive {
    VECTORIZED_LOAD,  // v2.b64, v4.b32, etc.
    LDMATRIX,         // ldmatrix.sync.aligned
    STMATRIX,         // stmatrix.sync.aligned  
    WGMMA,            // wgmma instructions
    CP_ASYNC          // cp.async.bulk, cp.async.cg
};

template<int DIMS, int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ bool supports_hardware_primitive(
    const LinearLayout<DIMS, DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& layout,
    HardwarePrimitive primitive) {
    
    switch (primitive) {
        case HardwarePrimitive::VECTORIZED_LOAD: {
            // Check if layout supports contiguous vectorized access
            return layout.contiguous_elements() >= 2;
        }
        
        case HardwarePrimitive::LDMATRIX: {
            // Check if layout has proper 8x8 structure for ldmatrix
            auto reg_matrix = layout.template extract_sublayout<Dimension::REG>();
            auto thread_matrix = layout.template extract_sublayout<Dimension::THREAD>();
            
            // Simplified check: ensure proper structure exists
            return reg_matrix.rank() >= 2 && thread_matrix.rank() >= 3;
        }
        
        case HardwarePrimitive::WGMMA: {
            // Check if layout is compatible with wgmma tile structure
            // Requires specific 16x16 or larger tile patterns
            return layout.is_distributed_layout() && DIMS >= 8;
        }
        
        default:
            return false;
    }
}

/**
 * Generate conversion sequence for layout transformation
 * Returns a sequence of operations needed to convert between layouts
 */
struct ConversionStep {
    enum Type { REGISTER_PERMUTE, WARP_SHUFFLE, SHARED_MEMORY, VECTORIZED_COPY };
    Type type;
    uint32_t param1, param2; // Parameters specific to each operation type
};

template<int DIMS, int REG_BITS = 8, int THREAD_BITS = 10, int WARP_BITS = 5>
__host__ __device__ int generate_conversion_sequence(
    const LinearLayout<DIMS, DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& src_layout,
    const LinearLayout<DIMS, DIMS, REG_BITS, THREAD_BITS, WARP_BITS>& dst_layout,
    ConversionStep* steps, int max_steps) {
    
    int step_count = 0;
    
    // Compute conversion matrix
    auto dst_inv = dst_layout.inverse();
    auto conversion = compose(dst_inv, src_layout);
    
    // Extract components
    auto reg_conv = conversion.template extract_sublayout<Dimension::REG>();
    auto thread_conv = conversion.template extract_sublayout<Dimension::THREAD>();
    auto warp_conv = conversion.template extract_sublayout<Dimension::WARP>();
    
    // Step 1: Register permutation (if needed)
    if (!reg_conv.is_zero() && step_count < max_steps) {
        steps[step_count++] = {ConversionStep::REGISTER_PERMUTE, 0, 0};
    }
    
    // Step 2: Warp shuffle (if possible)
    if (warp_conv.is_zero() && !thread_conv.is_zero() && step_count < max_steps) {
        steps[step_count++] = {ConversionStep::WARP_SHUFFLE, 0, 0};
    }
    
    // Step 3: Shared memory (if warp shuffle not possible)
    if (!warp_conv.is_zero() && step_count < max_steps) {
        steps[step_count++] = {ConversionStep::SHARED_MEMORY, 0, 0};
    }
    
    return step_count;
}

} // namespace linear_layouts