/******************************************************************************
 * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *
 * NVIDIA CORPORATION and its licensors retain all intellectual property
 * and proprietary rights in and to this software, related documentation
 * and any modifications thereto.  Any use, reproduction, disclosure or
 * distribution of this software and related documentation without an express
 * license agreement from NVIDIA CORPORATION is strictly prohibited.
 ******************************************************************************/

#include <cusparseLt.h>       // cusparseLt header
#include <cuda_runtime_api.h> // cudaMalloc, cudaMemcpy, etc.
#include <cuda_fp8.h>
#include <cuda_fp4.h>

#include <cstdio>             // printf
#include <cstdlib>            // std::rand
#include <vector>
#include <random>
#include <omp.h>

#include "cxxopts.hpp"

#define FP32 1000 // SM8.0
#define FP16 1001 // SM8.0
#define INT8 1002 // SM8.0
#define FP8  1003 // SM9.0
#define FP4  1004 // SM10.0

/*
 * Choose your data type for matrices A and B
 */
#ifndef AB_TYPE
#define AB_TYPE FP16
#endif

#if AB_TYPE == FP32
using AB_t         = float;
using C_t          = float;
using COMPUTE_t    = float;
#elif AB_TYPE == FP16
using AB_t         = __half;
using C_t          = __half;
using COMPUTE_t    = float;
#elif AB_TYPE == INT8
using AB_t         = int8_t;
using C_t          = int; // can be int8_t, int, __half, __nv_bfloat16 (int8 & int32 are faster)
using COMPUTE_t    = int;
#elif AB_TYPE == FP8
using AB_t         = __nv_fp8_e4m3;
using C_t          = float; // can be __half, __nv_bfloat16, float (fp16 & bf16 not work on 8Kx8Kx8K)
using COMPUTE_t    = float;
#elif AB_TYPE == FP4
using AB_t         = __nv_fp4x2_e2m1;
using C_t          = __half; // can be __half, __nv_bfloat16, float (fp16 & bf16 are faster)
using COMPUTE_t    = float;
#endif

#if AB_TYPE == FP4
#define AB_TYPE_ELEMENTS 2
#else
#define AB_TYPE_ELEMENTS 1
#endif

template <typename value_t>
struct cuda_type { };

template <>
struct cuda_type <float> {
    static constexpr cudaDataType value = CUDA_R_32F;
};

template <>
struct cuda_type <__half> {
    static constexpr cudaDataType value = CUDA_R_16F;
};

template <>
struct cuda_type <__nv_bfloat16> {
    static constexpr cudaDataType value = CUDA_R_16BF;
};

template <>
struct cuda_type <__nv_fp8_e4m3> {
    static constexpr cudaDataType value = CUDA_R_8F_E4M3;
};

template <>
struct cuda_type <__nv_fp4x2_e2m1> {
    static constexpr cudaDataType value = CUDA_R_4F_E2M1;
};

template <>
struct cuda_type <int> {
    static constexpr cudaDataType value = CUDA_R_32I;
};

template <>
struct cuda_type <int8_t> {
    static constexpr cudaDataType value = CUDA_R_8I;
};

template <typename value_t>
struct cusparse_compute_type {  };

template <>
struct cusparse_compute_type<float> {
    static constexpr cusparseComputeType value = CUSPARSE_COMPUTE_32F;
};

template <>
struct cusparse_compute_type<int> {
    static constexpr cusparseComputeType value = CUSPARSE_COMPUTE_32I;
};

#define CHECK_CUDA(func)                                                       \
{                                                                              \
    cudaError_t status = (func);                                               \
    if (status != cudaSuccess) {                                               \
        printf("CUDA API failed at line %d with error: %s (%d)\n",             \
               __LINE__, cudaGetErrorString(status), status);                  \
        std::exit(EXIT_FAILURE);                                               \
    }                                                                          \
}

#define CHECK_CUSPARSE(func)                                                   \
{                                                                              \
    cusparseStatus_t status = (func);                                          \
    if (status != CUSPARSE_STATUS_SUCCESS) {                                   \
        printf("CUSPARSE API failed at line %d with error: %s (%d)\n",         \
               __LINE__, cusparseGetErrorString(status), status);              \
        std::exit(EXIT_FAILURE);                                               \
    }                                                                          \
}

constexpr int EXIT_UNSUPPORTED = 2;

void run(const size_t matrix_size, const int num_batches, const int repeat);
int pack_fp4_to_fp4x2(__nv_fp4_e2m1* in, size_t in_elements, __nv_fp4x2_e2m1* out, size_t out_elements);
int unpack_fp4x2_to_fp4(__nv_fp4x2_e2m1* in, size_t in_elements, __nv_fp4_e2m1* out, size_t out_elements);
void test_fp4_fp4x2_conversion();

int main(int argc, char* argv[]) {
    cxxopts::Options options(argv[0], "Structured sparse matrix multiplication benchmark based on cuSPARSELt");
    options.add_options()
        ("h,help", "Print usage")
        ("s,start", "Initial matrix size as base-2 exponent (7 -> 2^7=128)", cxxopts::value<size_t>()->default_value("7"))
        ("e,end", "Final matrix size as base-2 exponent (15 -> 2^15=32768)", cxxopts::value<size_t>()->default_value("15"))
        ("b,batch", "Number of batches", cxxopts::value<int>()->default_value("1"))
        ("r,repeat", "Number of iterations per matrix size", cxxopts::value<int>()->default_value("1"))
        ("t,step", "A step >= 0 will enable step mode. In this mode, start/end are interpreted as actual sizes instead of exponents. If step = 0, only start matrix size will be benchmarked",
            cxxopts::value<int>()->default_value("-1"))
        ;
    auto result = options.parse(argc, argv);

    if (result.count("help")) {
        std::printf("%s\n", options.help().c_str());
        return EXIT_SUCCESS;
    }

    size_t start = result["start"].as<size_t>();
    size_t end = result["end"].as<size_t>();
    int batch = result["batch"].as<int>();
    int repeat = result["repeat"].as<int>();
    int step = result["step"].as<int>();
    if (step < 0) {
        start = static_cast<size_t>(1) << start;
        end = static_cast<size_t>(1) << end;
    }

    std::printf("%s, start=%zu, end=%zu, batch=%d, repeat=%d, step=%d\n",
        argv[0], start, end, batch, repeat, step);

    int major_cc, minor_cc;
    CHECK_CUDA(cudaDeviceGetAttribute(&major_cc, cudaDevAttrComputeCapabilityMajor, 0));
    CHECK_CUDA(cudaDeviceGetAttribute(&minor_cc, cudaDevAttrComputeCapabilityMinor, 0));
    if (!(major_cc == 8 && minor_cc == 0) &&
        !(major_cc == 8 && minor_cc == 6) &&
        !(major_cc == 8 && minor_cc == 7) &&
        !(major_cc == 8 && minor_cc == 9) &&
        !(major_cc == 9 && minor_cc == 0) &&
        !(major_cc == 10 && minor_cc == 0) &&
        !(major_cc == 12 && minor_cc == 0)) {
        std::printf("\ncusparseLt is supported only on GPU devices with"
                    " compute capability == 8.0, 8.6, 8.7, 8.9, 9.0 10.0 12.0 current: %d.%d\n\n",
                     major_cc, minor_cc);

        return EXIT_UNSUPPORTED;
    }

    if (step < 0) {
        for (size_t i = start; i <= end; i <<= 1) {
            run(i, batch, repeat);
        }
    }
    else if (step == 0) {
        run(start, batch, repeat);
    }
    else {
        for (size_t i = start; i <= end; i += step) {
            run(i, batch, repeat);
        }
    }

    return EXIT_SUCCESS;
}

void run(const size_t matrix_size, const int num_batches, const int repeat) {
    // Host problem definition, row-major order
    // bigger sizes may require dynamic allocations
    const size_t        m = matrix_size;
    const size_t        n = m;
    const size_t        k = m;
    const size_t        batch_strideA = m * k;// +128;
    const size_t        batch_strideB = k * n;// +128;
    const size_t        batch_strideC = m * n;// +128;
    const auto          order = CUSPARSE_ORDER_ROW;
    const auto          opA = CUSPARSE_OPERATION_NON_TRANSPOSE;
    const auto          opB = CUSPARSE_OPERATION_TRANSPOSE;
    const auto          type_AB = cuda_type<AB_t>::value;
    const auto          type_C = cuda_type<C_t>::value;
    const auto          compute_type = cusparse_compute_type<COMPUTE_t>::value;
    const bool          matmul_search = true;
    const bool          is_rowmajor = (order == CUSPARSE_ORDER_ROW);
    const bool          isA_transposed = (opA != CUSPARSE_OPERATION_NON_TRANSPOSE);
    const bool          isB_transposed = (opB != CUSPARSE_OPERATION_NON_TRANSPOSE);
    const auto          num_A_rows = (isA_transposed) ? k : m;
    const auto          num_A_cols = (isA_transposed) ? m : k;
    const auto          num_B_rows = (isB_transposed) ? n : k;
    const auto          num_B_cols = (isB_transposed) ? k : n;
    const auto          num_C_rows = m;
    const auto          num_C_cols = n;
    const unsigned      alignment = 16;
    const auto          lda = (is_rowmajor) ? num_A_cols : num_A_rows;
    const auto          ldb = (is_rowmajor) ? num_B_cols : num_B_rows;
    const auto          ldc = (is_rowmajor) ? num_C_cols : num_C_rows;
    const auto          A_height = (is_rowmajor) ? num_A_rows : num_A_cols;
    const auto          B_height = (is_rowmajor) ? num_B_rows : num_B_cols;
    const auto          C_height = (is_rowmajor) ? num_C_rows : num_C_cols;
    const auto          A_width = (is_rowmajor) ? num_A_cols : num_A_rows;
    const auto          B_width = (is_rowmajor) ? num_B_cols : num_B_rows;
    const auto          C_width = (is_rowmajor) ? num_C_cols : num_C_rows;
    const auto          A_size = num_batches * batch_strideA / AB_TYPE_ELEMENTS;
    const auto          B_size = num_batches * batch_strideB / AB_TYPE_ELEMENTS;
    const auto          C_size = num_batches * batch_strideC;
    const auto          A_size_bytes = A_size * sizeof(AB_t);
    const auto          B_size_bytes = B_size * sizeof(AB_t);
    const auto          C_size_bytes = C_size * sizeof(C_t);
    std::vector<AB_t>   hA(A_size);
    std::vector<AB_t>   hB(B_size);
    std::vector<C_t>    hC(C_size);

    {
        const int num_threads = omp_get_max_threads();
        std::vector<std::mt19937_64> randgens(num_threads);
        std::random_device rd;
        for (auto& randgen : randgens) {
            randgen.seed(rd());
        }

#pragma omp parallel
        {
            const int tid = omp_get_thread_num();
            auto& randgen = randgens[tid];
            std::uniform_int_distribution<int> dist(-2, 2);

            for (int b = 0; b < num_batches; b++) {
#pragma omp for
                for (int i = 0; i < A_height; i++) {
                    for (int j = 0; j < A_width / AB_TYPE_ELEMENTS; j++) {
                        const size_t pos = b * batch_strideA / AB_TYPE_ELEMENTS
                            + i * lda  / AB_TYPE_ELEMENTS + j;
#if AB_TYPE_ELEMENTS == 1
                        if (j % 2 == 0) {
                            hA[pos] = static_cast<AB_t>(dist(randgen));
                        }
                        else {
                            hA[pos] = static_cast<AB_t>(0);
                        }
#elif AB_TYPE_ELEMENTS == 2
                        float2 f;
                        if (j % 2 == 0) {
                            f.x = dist(randgen);
                            f.y = dist(randgen);
                        }
                        else {
                            f.x = 0;
                            f.y = 0;
                        }
                        hA[pos] = AB_t(f);
#endif
                    }
                }
            }

            for (int b = 0; b < num_batches; b++) {
#pragma omp for
                for (int i = 0; i < B_height; i++) {
                    for (int j = 0; j < B_width / AB_TYPE_ELEMENTS; j++) {
                        const size_t pos = b * batch_strideB / AB_TYPE_ELEMENTS
                            + i * ldb / AB_TYPE_ELEMENTS + j;
#if AB_TYPE_ELEMENTS == 1
                        hB[pos] = static_cast<AB_t>(dist(randgen));
#elif AB_TYPE_ELEMENTS == 2
                        float2 f;
                        f.x = dist(randgen);
                        f.y = dist(randgen);
                        hB[pos] = AB_t(f);
#endif
                    }
                }
            }

            for (int b = 0; b < num_batches; b++) {
#pragma omp for
                for (int i = 0; i < C_height; i++) {
                    for (int j = 0; j < C_width; j++)
                        hC[b * batch_strideC + i * ldc + j] = static_cast<C_t>(dist(randgen));
                }
            }
        }
    }
    
    const float alpha = 1.0f;
    const float beta = 1.0f;

    //--------------------------------------------------------------------------
    // Device memory management

    AB_t* dA, * dB, * dA_compressed;
    C_t* dC, * dD;
    int* d_valid;

    CHECK_CUDA(cudaMalloc((void**)&dA, A_size_bytes));
    CHECK_CUDA(cudaMalloc((void**)&dB, B_size_bytes));
    CHECK_CUDA(cudaMalloc((void**)&dC, C_size_bytes));
    CHECK_CUDA(cudaMalloc((void**)&d_valid, sizeof(int)));
    dD = dC;

    CHECK_CUDA(cudaMemcpy(dA, hA.data(), A_size_bytes, cudaMemcpyHostToDevice));
    CHECK_CUDA(cudaMemcpy(dB, hB.data(), B_size_bytes, cudaMemcpyHostToDevice));
    CHECK_CUDA(cudaMemcpy(dC, hC.data(), C_size_bytes, cudaMemcpyHostToDevice));
    //--------------------------------------------------------------------------
    cusparseLtHandle_t             handle;
    cusparseLtMatDescriptor_t      matA, matB, matC;
    cusparseLtMatmulDescriptor_t   matmul;
    cusparseLtMatmulAlgSelection_t alg_sel;
    cusparseLtMatmulPlan_t         plan;
    cudaStream_t                   stream = 0;

    CHECK_CUSPARSE(cusparseLtInit(&handle));

    // matrix descriptor initialization
    CHECK_CUSPARSE(cusparseLtStructuredDescriptorInit(
        &handle, &matA, num_A_rows,
        num_A_cols, lda, alignment,
        type_AB, order,
        CUSPARSELT_SPARSITY_50_PERCENT));
    CHECK_CUSPARSE(cusparseLtDenseDescriptorInit(
        &handle, &matB, num_B_rows,
        num_B_cols, ldb, alignment,
        type_AB, order));
    CHECK_CUSPARSE(cusparseLtDenseDescriptorInit(
        &handle, &matC, num_C_rows,
        num_C_cols, ldc, alignment,
        type_C, order));
    
    //--------------------------------------------------------------------------
    // SET NUM BATCHES
    CHECK_CUSPARSE(cusparseLtMatDescSetAttribute(&handle, &matA,
        CUSPARSELT_MAT_NUM_BATCHES,
        &num_batches, sizeof(num_batches)));
    CHECK_CUSPARSE(cusparseLtMatDescSetAttribute(&handle, &matB,
        CUSPARSELT_MAT_NUM_BATCHES,
        &num_batches, sizeof(num_batches)));
    CHECK_CUSPARSE(cusparseLtMatDescSetAttribute(&handle, &matC,
        CUSPARSELT_MAT_NUM_BATCHES,
        &num_batches, sizeof(num_batches)));
    //--------------------------------------------------------------------------
    // SET BATCH STRIDE
    // if batch_strideA = 0, the matrix multiplication performs a broadcast of
    // the matrix A
    CHECK_CUSPARSE(cusparseLtMatDescSetAttribute(&handle, &matA,
        CUSPARSELT_MAT_BATCH_STRIDE,
        &batch_strideA,
        sizeof(batch_strideA)));
    CHECK_CUSPARSE(cusparseLtMatDescSetAttribute(&handle, &matB,
        CUSPARSELT_MAT_BATCH_STRIDE,
        &batch_strideB,
        sizeof(batch_strideB)));
    CHECK_CUSPARSE(cusparseLtMatDescSetAttribute(&handle, &matC,
        CUSPARSELT_MAT_BATCH_STRIDE,
        &batch_strideC,
        sizeof(batch_strideC)));

    // matmul, algorithm selection, and plan initialization
    CHECK_CUSPARSE(cusparseLtMatmulDescriptorInit(
        &handle, &matmul, opA, opB,
        &matA, &matB, &matC, &matC,
        compute_type));

    CHECK_CUSPARSE(cusparseLtMatmulAlgSelectionInit(
        &handle, &alg_sel, &matmul,
        CUSPARSELT_MATMUL_ALG_DEFAULT));

    CHECK_CUSPARSE(cusparseLtMatmulPlanInit(&handle, &plan, &matmul, &alg_sel));

    CHECK_CUSPARSE(cusparseLtMatmulDescSetAttribute(
        &handle,
        &matmul,
        CUSPARSELT_MATMUL_SPARSE_MAT_POINTER,
        &dA,
        sizeof(dA)));

#if AB_TYPE == FP4
    using ABSCALE_t = __nv_fp8_e4m3;    
    const cusparseLtMatmulMatrixScale_t AB_scale_mode = CUSPARSELT_MATMUL_MATRIX_SCALE_VEC32_UE4M3;    
    const int AB_scale_block = 32;
    std::vector<ABSCALE_t> hA_scale(A_size * AB_TYPE_ELEMENTS / AB_scale_block);
    std::vector<ABSCALE_t> hB_scale(B_size * AB_TYPE_ELEMENTS / AB_scale_block);
    ABSCALE_t* dA_scale, * dB_scale;
    for (int i = 0; i < hA_scale.size(); ++i) {
        hA_scale[i] = static_cast<ABSCALE_t>(1);
    }
    for (int i = 0; i < hB_scale.size(); ++i) {
        hB_scale[i] = static_cast<ABSCALE_t>(1);
    }
    CHECK_CUDA(cudaMalloc((void**)&dA_scale, hA_scale.size() * sizeof(ABSCALE_t)));
    CHECK_CUDA(cudaMalloc((void**)&dB_scale, hB_scale.size() * sizeof(ABSCALE_t)));
    CHECK_CUDA(cudaMemcpy(dA_scale, hA_scale.data(), hA_scale.size() * sizeof(ABSCALE_t), cudaMemcpyHostToDevice));
    CHECK_CUDA(cudaMemcpy(dB_scale, hB_scale.data(), hB_scale.size() * sizeof(ABSCALE_t), cudaMemcpyHostToDevice));
    CHECK_CUSPARSE(cusparseLtMatmulDescSetAttribute(
        &handle,
        &matmul,
        CUSPARSELT_MATMUL_A_SCALE_MODE,
        &AB_scale_mode,
        sizeof(AB_scale_mode)));
    CHECK_CUSPARSE(cusparseLtMatmulDescSetAttribute(
        &handle,
        &matmul,
        CUSPARSELT_MATMUL_A_SCALE_POINTER,
        &dA_scale,
        sizeof(dA_scale)));
    CHECK_CUSPARSE(cusparseLtMatmulDescSetAttribute(
        &handle,
        &matmul,
        CUSPARSELT_MATMUL_B_SCALE_MODE,
        &AB_scale_mode,
        sizeof(AB_scale_mode)));
    CHECK_CUSPARSE(cusparseLtMatmulDescSetAttribute(
        &handle,
        &matmul,
        CUSPARSELT_MATMUL_B_SCALE_POINTER,
        &dB_scale,
        sizeof(dB_scale)));
#endif

    //--------------------------------------------------------------------------
    // Prune the A matrix (in-place) and check the correctness
    //CHECK_CUSPARSE(cusparseLtSpMMAPrune(&handle, &matmul, dA, dA, CUSPARSELT_PRUNE_SPMMA_TILE, stream));
    CHECK_CUSPARSE(cusparseLtSpMMAPruneCheck(&handle, &matmul, dA, d_valid, stream));
    int is_valid;
    CHECK_CUDA(cudaMemcpyAsync(&is_valid, d_valid, sizeof(int), cudaMemcpyDeviceToHost, stream));
    CHECK_CUDA(cudaStreamSynchronize(stream));
    if (is_valid != 0) {
        std::printf("!!!! The matrix has been pruned in a wrong way. "
            "cusparseLtMatmul will not provide correct results\n");
        std::exit(EXIT_FAILURE);
    }
    //--------------------------------------------------------------------------
    // Compress the A matrix
    size_t compressed_size, compressed_buffer_size;
    void* dA_compressedBuffer;
    CHECK_CUSPARSE(cusparseLtSpMMACompressedSize(&handle, &plan, &compressed_size, &compressed_buffer_size));
    std::printf("matrix_size=%zu, A_size_bytes=%zu, compressed_size=%zu, compressed_buffer_size=%zu\n",
        matrix_size, A_size_bytes, compressed_size, compressed_buffer_size);
    CHECK_CUDA(cudaMalloc((void**)&dA_compressed, compressed_size));
    CHECK_CUDA(cudaMalloc((void**)&dA_compressedBuffer, compressed_buffer_size));
    CHECK_CUSPARSE(cusparseLtSpMMACompress(&handle, &plan, dA, dA_compressed, dA_compressedBuffer, stream));

    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    size_t workspace_size;
    CHECK_CUSPARSE(cusparseLtMatmulGetWorkspace(&handle, &plan, &workspace_size));
    
    void* d_workspace;
    CHECK_CUDA(cudaMalloc((void**)&d_workspace, workspace_size));
    
    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    // Search the best kernel
    if (matmul_search) {
        CHECK_CUSPARSE(cusparseLtMatmulSearch(&handle, &plan, &alpha,
            dA_compressed, dB, &beta,
            dC, dD, d_workspace,
            &stream, 1));
        // dC accumulates so reset dC for correctness check
        CHECK_CUDA(cudaMemcpy(dC, hC.data(), C_size_bytes, cudaMemcpyHostToDevice));
    }

    // Perform the matrix multiplication
    cudaEvent_t start, stop;
    CHECK_CUDA(cudaEventCreate(&start));
    CHECK_CUDA(cudaEventCreate(&stop));
    CHECK_CUDA(cudaEventRecord(start, stream));
    for (int i = 0; i < repeat; ++i) {
        CHECK_CUSPARSE(cusparseLtMatmul(
            &handle, &plan, &alpha, dA_compressed, dB, &beta, dC, dD, d_workspace, &stream, 1));
    }
    CHECK_CUDA(cudaEventRecord(stop, stream));
    CHECK_CUDA(cudaEventSynchronize(stop));
    
    float milliseconds = 0;
    CHECK_CUDA(cudaEventElapsedTime(&milliseconds, start, stop));
    std::printf("matrix_size=%zu, m=%zu, n=%zu, k=%zu, num_batches=%d, repeat=%d, milliseconds=%f, tflops=%f\n", 
        matrix_size, m, n, k, num_batches, repeat, milliseconds,
        static_cast<float>(m) * n * k * 2 * num_batches * repeat / milliseconds / 1e9);
    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    // destroy plan and handle
    CHECK_CUSPARSE(cusparseLtMatDescriptorDestroy(&matA));
    CHECK_CUSPARSE(cusparseLtMatDescriptorDestroy(&matB));
    CHECK_CUSPARSE(cusparseLtMatDescriptorDestroy(&matC));
    CHECK_CUSPARSE(cusparseLtMatmulPlanDestroy(&plan));
    CHECK_CUSPARSE(cusparseLtDestroy(&handle));

    //--------------------------------------------------------------------------
    // device result check
    // matrix A has been pruned
    if (repeat == 1 && static_cast<double>(m) * n * k <= 1024.0 * 1024 * 1024 + 0.5)
    {
        //CHECK_CUDA(cudaMemcpy(hA, dA, A_size, cudaMemcpyDeviceToHost));

#if AB_TYPE == FP4
        std::vector<__nv_fp4_e2m1> A_unpacked(A_size * AB_TYPE_ELEMENTS);
        std::vector<__nv_fp4_e2m1> B_unpacked(B_size * AB_TYPE_ELEMENTS);        
        unpack_fp4x2_to_fp4(hA.data(), A_size, A_unpacked.data(), A_unpacked.size());
        unpack_fp4x2_to_fp4(hB.data(), B_size, B_unpacked.data(), B_unpacked.size());
        const __nv_fp4_e2m1* A = A_unpacked.data();
        const __nv_fp4_e2m1* B = B_unpacked.data();
#else
        const AB_t* A = hA.data();
        const AB_t* B = hB.data();
#endif
        bool A_std_layout = (is_rowmajor != isA_transposed);
        bool B_std_layout = (is_rowmajor != isB_transposed);

        // host computation
        std::vector<C_t> hC_result(C_size);
        for (int b = 0; b < num_batches; b++) {
#pragma omp parallel for
            for (int i = 0; i < m; i++) {
                for (int j = 0; j < n; j++) {
                    COMPUTE_t sum = static_cast<COMPUTE_t>(0);
                    for (int k1 = 0; k1 < k; k1++) {
                        auto posA = (A_std_layout) ? i * lda + k1 : i + k1 * lda;
                        auto posB = (B_std_layout) ? k1 * ldb + j : k1 + j * ldb;
                        posA += b * batch_strideA;
                        posB += b * batch_strideB;
                        sum += static_cast<COMPUTE_t>(A[posA]) *  // [i][k]
                            static_cast<COMPUTE_t>(B[posB]);   // [k][j]
                    }
                    auto posC = (is_rowmajor) ? i * ldc + j : i + j * ldc;
                    posC += b * batch_strideC;
                    hC_result[posC] = static_cast<C_t>(
                        static_cast<COMPUTE_t>(alpha) * sum +
                        static_cast<COMPUTE_t>(beta) * static_cast<COMPUTE_t>(hC[posC]));  // [i][j]
                }
            }
        }

        // reuse hC for device results
        CHECK_CUDA(cudaMemcpy(hC.data(), dD, C_size_bytes, cudaMemcpyDeviceToHost))

        // host-device comparison
        int correct = 1;
        for (int b = 0; b < num_batches; b++) {
#pragma omp parallel for
            for (int i = 0; i < m; i++) {
                for (int j = 0; j < n; j++) {
                    auto pos = (is_rowmajor) ? i * ldc + j : i + j * ldc;
                    pos += b * batch_strideC;
                    auto device_value = hC[pos];
                    auto host_value = hC_result[pos];
                    if (device_value != host_value) {
                        // direct floating point comparison is not reliable
                        std::printf("(%d, %d):\th=%3.0f vs. d=%3.0f\n",
                            i, j, static_cast<float>(host_value), static_cast<float>(device_value));
                        correct = 0;
                        break;
                    }
                }
            }
        }
        if (correct) {
            std::printf("matrix_size=%zu, test PASSED\n", matrix_size);
        }
        else {
            std::printf("matrix_size=%zu, test FAILED: wrong result\n", matrix_size);
        }
    }
    //--------------------------------------------------------------------------
    // device memory deallocation
    CHECK_CUDA(cudaFree(dA_compressed));
    CHECK_CUDA(cudaFree(dA));
    CHECK_CUDA(cudaFree(dB));
    CHECK_CUDA(cudaFree(dC));
    CHECK_CUDA(cudaFree(d_valid));
    CHECK_CUDA(cudaFree(d_workspace));
    CHECK_CUDA(cudaFree(dA_compressedBuffer));
#if AB_TYPE == FP4
    CHECK_CUDA(cudaFree(dA_scale));
    CHECK_CUDA(cudaFree(dB_scale));
#endif
}

int pack_fp4_to_fp4x2(__nv_fp4_e2m1* in, size_t in_elements, __nv_fp4x2_e2m1* out, size_t out_elements) {
    size_t i;
    size_t j;
    for (i = 0, j = 0; i < in_elements && j < out_elements; i += 2, j += 1) {
        float2 f = float2();
        f.x = static_cast<float>(in[i]);
        f.y = static_cast<float>(in[i + 1]);
        out[j] = __nv_fp4x2_e2m1(f);
    }
    return j;
}

int unpack_fp4x2_to_fp4(__nv_fp4x2_e2m1* in, size_t in_elements, __nv_fp4_e2m1* out, size_t out_elements) {
    size_t i;
    size_t j;
    for (i = 0, j = 0; i < in_elements && j < out_elements; i += 1, j += 2) {
        float2 f = static_cast<float2>(in[i]);
        out[j] = static_cast<__nv_fp4_e2m1>(f.x);
        out[j + 1] = static_cast<__nv_fp4_e2m1>(f.y);
    }
    return j;
}

void test_fp4_fp4x2_conversion() {
    __nv_fp4_e2m1   a[6] = { static_cast<__nv_fp4_e2m1>(1.0), static_cast<__nv_fp4_e2m1>(2.0), static_cast<__nv_fp4_e2m1>(3.0), static_cast<__nv_fp4_e2m1>(4.0), static_cast<__nv_fp4_e2m1>(3.0), static_cast<__nv_fp4_e2m1>(2.0) };
    __nv_fp4x2_e2m1 b[3];
    __nv_fp4_e2m1   c[6];
    pack_fp4_to_fp4x2(a, 6, b, 3);
    unpack_fp4x2_to_fp4(b, 3, c, 6);
    std::printf("%f %f %f %f %f %f\n", static_cast<float>(a[0]), static_cast<float>(a[1]), static_cast<float>(a[2]), static_cast<float>(a[3]), static_cast<float>(a[4]), static_cast<float>(a[5]));
    std::printf("%hhx %hhx %hhx\n", reinterpret_cast<unsigned char*>(b)[0], reinterpret_cast<unsigned char*>(b)[1], reinterpret_cast<unsigned char*>(b)[2]);
    std::printf("%f %f %f %f %f %f\n", static_cast<float>(c[0]), static_cast<float>(c[1]), static_cast<float>(c[2]), static_cast<float>(c[3]), static_cast<float>(c[4]), static_cast<float>(c[5]));
}

// vim: tabstop=4:shiftwidth=4:expandtab
