#pragma once

#include <cstdint>
#include <cassert>
#include <cuda_runtime.h>

namespace linear_layouts {

/**
 * Binary Matrix operations over F₂ (field of two elements)
 * Implements efficient bit manipulation for Linear Layouts framework
 * Targeting SM_89 (RTX 4070 Super) architecture
 */
template<int M, int N>
class BinaryMatrix {
public:
    static constexpr int ROWS = M;
    static constexpr int COLS = N;
    static constexpr int BITS_PER_WORD = 64;
    static constexpr int WORDS_PER_ROW = (N + BITS_PER_WORD - 1) / BITS_PER_WORD;
    static constexpr int TOTAL_WORDS = M * WORDS_PER_ROW;

private:
    uint64_t data[TOTAL_WORDS];

public:
    __host__ __device__ BinaryMatrix() {
        #ifdef __CUDACC__
        #pragma unroll
        #endif
        for (int i = 0; i < TOTAL_WORDS; i++) {
            data[i] = 0;
        }
    }

    __host__ __device__ BinaryMatrix(const uint64_t* init_data) {
        #ifdef __CUDACC__
        #pragma unroll
        #endif
        for (int i = 0; i < TOTAL_WORDS; i++) {
            data[i] = init_data[i];
        }
    }

    // Set bit at position (row, col)
    __host__ __device__ void set_bit(int row, int col, bool value = true) {
        assert(row >= 0 && row < M);
        assert(col >= 0 && col < N);
        
        int word_idx = row * WORDS_PER_ROW + col / BITS_PER_WORD;
        int bit_idx = col % BITS_PER_WORD;
        
        if (value) {
            data[word_idx] |= (1ULL << bit_idx);
        } else {
            data[word_idx] &= ~(1ULL << bit_idx);
        }
    }

    // Get bit at position (row, col)
    __host__ __device__ bool get_bit(int row, int col) const {
        assert(row >= 0 && row < M);
        assert(col >= 0 && col < N);
        
        int word_idx = row * WORDS_PER_ROW + col / BITS_PER_WORD;
        int bit_idx = col % BITS_PER_WORD;
        
        return (data[word_idx] >> bit_idx) & 1ULL;
    }

    // Matrix addition (XOR in F₂)
    template<int K>
    __host__ __device__ BinaryMatrix<M, K> operator+(const BinaryMatrix<M, K>& other) const {
        static_assert(K == N, "Matrix dimensions must match for addition");
        
        BinaryMatrix<M, N> result;
        #ifdef __CUDACC__
        #pragma unroll
        #endif
        for (int i = 0; i < TOTAL_WORDS; i++) {
            result.data[i] = data[i] ^ other.data[i];
        }
        return result;
    }

    // Matrix multiplication over F₂
    template<int K>
    __host__ __device__ BinaryMatrix<M, K> operator*(const BinaryMatrix<N, K>& other) const {
        BinaryMatrix<M, K> result;
        
        for (int i = 0; i < M; i++) {
            for (int j = 0; j < K; j++) {
                bool sum = false;
                for (int k = 0; k < N; k++) {
                    sum ^= (get_bit(i, k) & other.get_bit(k, j));
                }
                result.set_bit(i, j, sum);
            }
        }
        return result;
    }

    // Optimized matrix-vector multiplication for small vectors
    __host__ __device__ uint64_t multiply_vector(uint64_t vector) const {
        static_assert(N <= 64, "Vector multiplication optimized for N <= 64");
        
        uint64_t result = 0;
        
        for (int i = 0; i < M; i++) {
            uint64_t row_data = data[i * WORDS_PER_ROW];
            if constexpr (N < 64) {
                row_data &= (1ULL << N) - 1; // Mask unused bits
            }
            
            // Compute dot product using population count
            uint64_t dot = row_data & vector;
            bool bit_result;
            #ifdef __CUDA_ARCH__
            bit_result = __popcll(dot) & 1; // Parity check on device
            #else
            bit_result = __builtin_popcountll(dot) & 1; // Parity check on host
            #endif
            
            if (bit_result) {
                result |= (1ULL << i);
            }
        }
        
        return result;
    }

    // Create identity matrix
    __host__ __device__ static BinaryMatrix<M, N> identity() {
        static_assert(M == N, "Identity matrix must be square");
        
        BinaryMatrix<M, N> result;
        for (int i = 0; i < M; i++) {
            result.set_bit(i, i, true);
        }
        return result;
    }

    // Create permutation matrix from permutation array
    __host__ __device__ static BinaryMatrix<M, N> permutation(const int* perm) {
        static_assert(M == N, "Permutation matrix must be square");
        
        BinaryMatrix<M, N> result;
        for (int i = 0; i < M; i++) {
            assert(perm[i] >= 0 && perm[i] < N);
            result.set_bit(i, perm[i], true);
        }
        return result;
    }

    // Gaussian elimination to compute matrix inverse over F₂
    __host__ __device__ BinaryMatrix<N, M> inverse() const {
        static_assert(M == N, "Only square matrices can be inverted");
        
        // Augmented matrix [A | I]
        BinaryMatrix<M, 2*M> augmented;
        
        // Copy original matrix to left half
        for (int i = 0; i < M; i++) {
            for (int j = 0; j < M; j++) {
                augmented.set_bit(i, j, get_bit(i, j));
            }
        }
        
        // Set right half to identity
        for (int i = 0; i < M; i++) {
            augmented.set_bit(i, M + i, true);
        }
        
        // Forward elimination
        for (int pivot = 0; pivot < M; pivot++) {
            // Find pivot
            int pivot_row = -1;
            for (int i = pivot; i < M; i++) {
                if (augmented.get_bit(i, pivot)) {
                    pivot_row = i;
                    break;
                }
            }
            
            // Matrix is singular if no pivot found
            assert(pivot_row != -1);
            
            // Swap rows if needed
            if (pivot_row != pivot) {
                for (int j = 0; j < 2*M; j++) {
                    bool temp = augmented.get_bit(pivot, j);
                    augmented.set_bit(pivot, j, augmented.get_bit(pivot_row, j));
                    augmented.set_bit(pivot_row, j, temp);
                }
            }
            
            // Eliminate column
            for (int i = 0; i < M; i++) {
                if (i != pivot && augmented.get_bit(i, pivot)) {
                    for (int j = 0; j < 2*M; j++) {
                        bool val = augmented.get_bit(i, j) ^ augmented.get_bit(pivot, j);
                        augmented.set_bit(i, j, val);
                    }
                }
            }
        }
        
        // Extract inverse from right half
        BinaryMatrix<M, M> result;
        for (int i = 0; i < M; i++) {
            for (int j = 0; j < M; j++) {
                result.set_bit(i, j, augmented.get_bit(i, M + j));
            }
        }
        
        return result;
    }

    // Compute right pseudo-inverse for rectangular matrices
    __host__ __device__ BinaryMatrix<N, M> right_inverse() const {
        // For surjective linear maps, compute least squares solution
        // This is used when M > N (more rows than columns)
        
        if constexpr (M == N) {
            return inverse();
        } else {
            // Use transpose and solve AtA x = At b
            auto At = transpose();
            auto AtA = At * (*this);
            auto AtA_inv = AtA.inverse();
            return AtA_inv * At;
        }
    }

    // Matrix transpose
    __host__ __device__ BinaryMatrix<N, M> transpose() const {
        BinaryMatrix<N, M> result;
        for (int i = 0; i < M; i++) {
            for (int j = 0; j < N; j++) {
                result.set_bit(j, i, get_bit(i, j));
            }
        }
        return result;
    }

    // Check if matrix is zero
    __host__ __device__ bool is_zero() const {
        for (int i = 0; i < TOTAL_WORDS; i++) {
            if (data[i] != 0) return false;
        }
        return true;
    }

    // Get rank of matrix (number of linearly independent rows)
    __host__ __device__ int rank() const {
        BinaryMatrix<M, N> temp = *this;
        int rank = 0;
        
        for (int col = 0, row = 0; col < N && row < M; col++) {
            // Find pivot
            int pivot_row = -1;
            for (int i = row; i < M; i++) {
                if (temp.get_bit(i, col)) {
                    pivot_row = i;
                    break;
                }
            }
            
            if (pivot_row == -1) continue; // No pivot in this column
            
            // Swap rows
            if (pivot_row != row) {
                for (int j = 0; j < N; j++) {
                    bool tmp = temp.get_bit(row, j);
                    temp.set_bit(row, j, temp.get_bit(pivot_row, j));
                    temp.set_bit(pivot_row, j, tmp);
                }
            }
            
            // Eliminate
            for (int i = row + 1; i < M; i++) {
                if (temp.get_bit(i, col)) {
                    for (int j = 0; j < N; j++) {
                        bool val = temp.get_bit(i, j) ^ temp.get_bit(row, j);
                        temp.set_bit(i, j, val);
                    }
                }
            }
            
            rank++;
            row++;
        }
        
        return rank;
    }

    // Debug: Print matrix in readable format
    __host__ void print() const {
        for (int i = 0; i < M; i++) {
            for (int j = 0; j < N; j++) {
                printf("%d ", get_bit(i, j) ? 1 : 0);
            }
            printf("\n");
        }
    }

    // Access raw data for advanced operations
    __host__ __device__ const uint64_t* raw_data() const { return data; }
    __host__ __device__ uint64_t* raw_data() { return data; }
};

// Type aliases for common matrix sizes used in GPU programming
using Matrix8x8 = BinaryMatrix<8, 8>;
using Matrix16x16 = BinaryMatrix<16, 16>;
using Matrix32x32 = BinaryMatrix<32, 32>;
using Matrix64x64 = BinaryMatrix<64, 64>;

// Vector types for hardware coordinates
using RegVector = BinaryMatrix<1, 8>;    // Up to 256 registers per thread
using ThreadVector = BinaryMatrix<1, 10>; // Up to 1024 threads per block
using WarpVector = BinaryMatrix<1, 5>;   // Up to 32 warps per block

} // namespace linear_layouts