/*
CGBN-fix-windows-Mont-平方乘-模幂算法 在512位，32线程的情况下计算错误，待修复
*/

#include <gmp.h>
#include <cgbn/cgbn.h>
#include "utility/support.h"

#define BITS 2048
#define TPB 64
#define TPI 32
#define INSTANCES 1

typedef cgbn_context_t<TPI>             context_t;
typedef cgbn_env_t<context_t, BITS>     env_t;
typedef typename env_t::cgbn_t          bn_t;
typedef typename env_t::cgbn_local_t    bn_local_t;
typedef typename env_t::cgbn_wide_t     bn_wide_t;

// 生成随机素数
void generate_random_prime(mpz_t prime, int bits, gmp_randstate_t state) {
    do {
        mpz_urandomb(prime, state, bits);
        mpz_setbit(prime, bits - 1); // 确保是bits位
    } while (mpz_probab_prime_p(prime, 25) == 0); // 25次Miller-Rabin测试
}

// 辅助函数：将mpz_t转换为cgbn_mem_t<BITS>
void mpz_To_cgbn_mem_t(cgbn_mem_t<BITS> &cgbn_mem_t_val, const mpz_t mpz_val) {
    const uint32_t num_words = BITS / 32;
    uint32_t tmp[num_words];
    for (uint32_t i = 0; i < num_words; ++i) tmp[i] = 0;

    size_t count = 0;
    mpz_export(tmp, &count, -1, sizeof(uint32_t), 0, 0, mpz_val);

    for (uint32_t i = 0; i < num_words; ++i) {
        if (i < count) cgbn_mem_t_val._limbs[i] = tmp[i];
        else cgbn_mem_t_val._limbs[i] = 0;
    }
}

// 辅助函数：将cgbn_mem_t<BITS>转换为mpz_t
void cgbn_mem_t_To_mpz(mpz_t mpz_val, const cgbn_mem_t<BITS> &cgbn_mem_t_val){
    const uint32_t num_words = BITS / 32;
    mpz_import(mpz_val, num_words, -1, sizeof(uint32_t), 0, 0, cgbn_mem_t_val._limbs);
}

// 获取指数k的有效二进制位数量
__device__ __forceinline__ uint32_t get_bit_length(const cgbn_mem_t<BITS> &k, uint32_t num_words) {
    const uint32_t *k_limbs = k._limbs;
    for (int i = num_words - 1; i >= 0; i--) {
        uint32_t word = k_limbs[i];
        if (word != 0) {
            uint32_t bit_pos = 31;
            while ((word & (1u << bit_pos)) == 0) {
                bit_pos--;
            }
            return i * 32 + bit_pos + 1;
        }
    }
    return 0;
}

// 检查二进制第i位的值
__device__ __forceinline__ uint32_t get_bit(const cgbn_mem_t<BITS> &k, uint32_t i) {
    const uint32_t *k_limbs = k._limbs;
    uint32_t word_index = i / 32;
    uint32_t bit_index = i % 32;
    return (k_limbs[word_index] >> bit_index) & 1u;
}

__global__ void base_modexp_kernel(
    cgbn_mem_t<BITS>* result,
    cgbn_mem_t<BITS>* g,
    cgbn_mem_t<BITS>* k,
    cgbn_mem_t<BITS>* m
){
    uint32_t my_instance = (blockIdx.x * blockDim.x + threadIdx.x) / TPI;
    if(my_instance >= INSTANCES) return;

    context_t   context(cgbn_report_monitor);
    env_t       env(context);

    bn_t local_g, local_k, local_m, local_result;
    cgbn_load(env, local_g, g);
    cgbn_load(env, local_k, k);
    cgbn_load(env, local_m, m);
    
    cgbn_modular_power(env, local_result, local_g, local_k, local_m);

    cgbn_store(env, result, local_result);
}

// BASE——平方乘算法
__global__ void base_square_multiply_kernel(
    cgbn_mem_t<BITS>* result,
    cgbn_mem_t<BITS>* g,
    cgbn_mem_t<BITS>* k,
    cgbn_mem_t<BITS>* m
){
    int32_t my_instance = (blockIdx.x*blockDim.x + threadIdx.x)/TPI;
    if(my_instance >= INSTANCES) return;

    context_t   context(cgbn_report_monitor);
    env_t       env(context);

    bn_t a[2], local_m, to;
    cgbn_load(env, local_m, m);
    cgbn_load(env, a[0], g);
    cgbn_set_ui32(env, to, 2);
    
    cgbn_modular_power(env, a[1], a[0], to, local_m);

    // 获取指数 k 的 bit 长度
    uint32_t k_n = get_bit_length(*k, BITS / 32);
    if (k_n == 0) {
        cgbn_set_ui32(env, a[0], 1);
        cgbn_store(env, result, a[0]);
        return;
    }

    bn_wide_t wide_temp;
    bn_t temp;

    // 平方乘算法
    for (int i = (int)k_n - 2; i >= 0; i--) {
        uint32_t bit = get_bit(*k, (uint32_t)i);
        uint32_t not_bit = 1 - bit;

        // a[not_bit] = a[not_bit] * a[bit] mod m
        cgbn_mul_wide(env, wide_temp, a[not_bit], a[bit]);
        cgbn_rem_wide(env, temp, wide_temp, local_m);
        cgbn_set(env, a[not_bit], temp);

        // a[bit] = a[bit] * a[bit] mod m
        //cgbn_modular_power(env, a[bit], a[bit], to, local_m);
        cgbn_mul_wide(env, wide_temp, a[bit], a[bit]);
        cgbn_rem_wide(env, temp, wide_temp, local_m);
        cgbn_set(env, a[bit], temp);
    }

    // 最终结果：a[0]
    cgbn_store(env, result, a[0]);
}

// BASE——Mont-平方乘算法
__global__ void base_mont_square_multiply_kernel(
    cgbn_mem_t<BITS>* result,
    cgbn_mem_t<BITS>* g,
    cgbn_mem_t<BITS>* k,
    cgbn_mem_t<BITS>* m
){
    int32_t my_instance = (blockIdx.x*blockDim.x + threadIdx.x)/TPI;
    if(my_instance >= INSTANCES) return;

    context_t   context(cgbn_report_monitor);
    env_t       env(context);

    bn_t base, exponent, modulus, res;
    cgbn_load(env, base, g);
    cgbn_load(env, exponent, k);
    cgbn_load(env, modulus, m);


    // 获取指数 k 的 bit 长度
    uint32_t k_n = get_bit_length(*k, BITS / 32);
    if (k_n == 0) {
        cgbn_set_ui32(env, res, 1);
        cgbn_store(env, result, res);
        return;
    }

    uint32_t np0;

    // requires:  x<modulus,  modulus is odd
    np0 = cgbn_bn2mont(env, res, base, modulus);
    cgbn_bn2mont(env, base, base, modulus);
    bn_t temp;

    for (int i = (int)k_n - 2; i >= 0; i--) {

        cgbn_mont_sqr(env, res, res, modulus, np0);

        uint32_t bit = get_bit(*k, (uint32_t)i);

        if(bit == 1){
            cgbn_mont_mul(env, res, res, base, modulus, np0);
        }
    }

    //if(threadIdx.x == 0) printf("\n");

    cgbn_mont2bn(env, res, res, modulus, np0);

    cgbn_store(env, result, res);
}


__global__ void cgbn_fix_windows_powm_odd_kernel(
    cgbn_mem_t<BITS>* result,
    cgbn_mem_t<BITS>* g,
    cgbn_mem_t<BITS>* k,
    cgbn_mem_t<BITS>* m
){
    int32_t my_instance = (blockIdx.x*blockDim.x + threadIdx.x)/TPI;
    if(my_instance >= INSTANCES) return;

    context_t   context(cgbn_report_monitor);
    env_t       env(context);

    bn_t d_g, d_k, d_m, d_result;
    cgbn_load(env, d_g, g);
    cgbn_load(env, d_k, k);
    cgbn_load(env, d_m, m);

    static const uint32_t window_bits = 5;

    bn_t t;
    bn_local_t window[1<<window_bits];
    int32_t    index, position, offset;
    uint32_t   np0;

    // conmpute x^power mod modulus, using the fixed window algorithm
    // requires:  x<modulus,  modulus is odd

    // compute x^0 (in Montgomery space, this is just 2^BITS - modulus)
    cgbn_negate(env, t, d_m);
    cgbn_store(env, window+0, t);

    // convert x into Montgomery space, store into window table
    np0=cgbn_bn2mont(env, d_result, d_g, d_m);
    cgbn_store(env, window+1, d_result);
    cgbn_set(env, t, d_result);

        // compute x^2, x^3, ... x^(2^window_bits-1), store into window table
    #pragma unroll 1
    for(index=2;index<(1<<window_bits);index++) {
      cgbn_mont_mul(env, d_result, d_result, t, d_m, np0);
      cgbn_store(env, window+index, d_result);
    }

    // find leading high bit
    position=BITS - cgbn_clz(env, d_k);

    // break the exponent into chunks, each window_bits in length
    // load the most significant non-zero exponent chunk
    offset=position % window_bits;
    if(offset==0)
      position=position-window_bits;
    else
      position=position-offset;
    index=cgbn_extract_bits_ui32(env, d_k, position, window_bits);
    cgbn_load(env, d_result, window+index);

    // process the remaining exponent chunks
    while(position>0) {
      // square the result window_bits times
      #pragma unroll 1
      for(int sqr_count=0;sqr_count<window_bits;sqr_count++)
        cgbn_mont_sqr(env, d_result, d_result, d_m, np0);
      
      // multiply by next exponent chunk
      position=position-window_bits;
      index=cgbn_extract_bits_ui32(env, d_k, position, window_bits);
      cgbn_load(env, t, window+index);
      cgbn_mont_mul(env, d_result, d_result, t, d_m, np0);
    }

    // we've processed the exponent now, convert back to normal space
    cgbn_mont2bn(env, d_result, d_result, d_m, np0);

    cgbn_store(env, result, d_result);
}

__global__ void mont_square_multiply_kernel(
    cgbn_mem_t<BITS>* result,
    cgbn_mem_t<BITS>* g,
    cgbn_mem_t<BITS>* k,
    cgbn_mem_t<BITS>* m
){
    int32_t my_instance = (blockIdx.x*blockDim.x + threadIdx.x)/TPI;
    if(my_instance >= INSTANCES) return;

    context_t   context(cgbn_report_monitor);
    env_t       env(context);

    bn_t d_g, d_k, d_m;
    cgbn_load(env, d_g, g);
    cgbn_load(env, d_k, k);
    cgbn_load(env, d_m, m);

    bn_t a[2];

    // 获取指数 k 的 bit 长度
    uint32_t k_n = get_bit_length(*k, BITS / 32);
    if (k_n == 0) {
        cgbn_set_ui32(env, a[0], 1);
        cgbn_store(env, result, a[0]);
        return;
    }

    uint32_t np0;

    // requires:  x<modulus,  modulus is odd
    np0 = cgbn_bn2mont(env, a[0], d_g, d_m);
    cgbn_mont_sqr(env, a[1], a[0], d_m, np0);

    for (int i = (int)k_n - 2; i >= 0; i--) {
        uint32_t bit = get_bit(*k, (uint32_t)i);

        // if(threadIdx.x == 0){
        //     printf("%d", bit);
        // }
            
        uint32_t not_bit = 1 - bit;

        cgbn_mont_mul(env, a[not_bit], a[not_bit], a[bit], d_m, np0);
        //cgbn_mont_mul(env, a[bit], a[bit], a[bit], d_m, np0);
        cgbn_mont_sqr(env, a[bit], a[bit], d_m, np0);
    }

    //if(threadIdx.x == 0) printf("\n");

    cgbn_mont2bn(env, a[0], a[0], d_m, np0);

    cgbn_store(env, result, a[0]);
}

// ===== 双warp协作平方乘内核 =========
// 使用64个线程处理一个实例
__global__ void double_warp_square_multiply_kernel(
    cgbn_mem_t<BITS>* result,
    const cgbn_mem_t<BITS>* g,
    const cgbn_mem_t<BITS>* k,
    const cgbn_mem_t<BITS>* m) {

    // blockDim.x = 64 (两个warp)，一个block处理一个实例
    const uint32_t _TPI = 32;
    int32_t my_instance = (threadIdx.x + blockIdx.x * blockDim.x) / (_TPI * 2);
    if(threadIdx.x == 0){
        printf("my_instance = %d\n", my_instance);
    }
    if(my_instance >= INSTANCES){
        return ;
    }

    // 确定warp编号（0或1）
    uint32_t warp_id = (threadIdx.x + blockIdx.x * blockDim.x) / 32;

    typedef cgbn_context_t<_TPI> _context_t; 
    typedef cgbn_env_t<_context_t, BITS> _env_t;

    // 构造CGBN环境（每warp一个instance）
    context_t _context(cgbn_report_monitor);
    _env_t _env(_context);

    // 共享内存
    __shared__ cgbn_mem_t<BITS> shared_a[2];
    __shared__ cgbn_mem_t<BITS> shared_m;
    __shared__ cgbn_mem_t<BITS> shared_k;
    __shared__ uint32_t shared_k_n;

    if (threadIdx.x == 0) {
        shared_a[0] = g[my_instance];
        shared_a[1] = g[my_instance];
        shared_m = m[my_instance];
        shared_k = k[my_instance];
        shared_k_n = get_bit_length(shared_k, BITS / 32);
    }
    __syncthreads();

    typename _env_t::cgbn_t a_local, b_local, modulus, temp;
    typename _env_t::cgbn_wide_t wide_temp;
    cgbn_load(_env, modulus, &shared_m);

    // 计算 a[0] = g mod m, a[1] = g^2 mod m
    if (warp_id == 0) {
        cgbn_load(_env, a_local, &shared_a[0]);
        cgbn_rem(_env, a_local, a_local, modulus);
        cgbn_store(_env, &shared_a[0], a_local);
    } else {
        cgbn_load(_env, a_local, &shared_a[1]);
        cgbn_mul_wide(_env, wide_temp, a_local, a_local);
        cgbn_rem_wide(_env, a_local, wide_temp, modulus);
        cgbn_store(_env, &shared_a[1], a_local);
    }
    __syncthreads();

    uint32_t k_n = shared_k_n;
    if (k_n == 0) {
        if (threadIdx.x == 0) {
            cgbn_mem_t<BITS> one;
            for (int i = 0; i < 16; i++) one._limbs[i] = 0;
            one._limbs[0] = 1;
            result[my_instance] = one;
        }
        return;
    }

    for (int i = (int)k_n - 2; i >= 0; i--) {
        uint32_t bit = get_bit(shared_k, (uint32_t)i);
        uint32_t not_bit = 1 - bit;

        if (warp_id == 0) {
            cgbn_load(_env, a_local, &shared_a[not_bit]);
            cgbn_load(_env, b_local, &shared_a[bit]);
            cgbn_mul_wide(_env, wide_temp, a_local, b_local);
            cgbn_rem_wide(_env, temp, wide_temp, modulus);
            cgbn_store(_env, &shared_a[not_bit], temp);
        } else if(warp_id == 1){
            cgbn_load(_env, a_local, &shared_a[bit]);
            cgbn_mul_wide(_env, wide_temp, a_local, a_local);
            cgbn_rem_wide(_env, temp, wide_temp, modulus);
            cgbn_store(_env, &shared_a[bit], temp);
        }
        __syncthreads();  // 保证两个warp计算完毕
    }

    if (warp_id == 0) {
        cgbn_load(_env, a_local, &shared_a[0]);
        cgbn_store(_env, &result[my_instance], a_local);
    }
}

// ===== 双warp协作平方乘内核 =========
// 使用64个线程处理一个实例
__global__ void double_warp_mont_square_multiply_kernel(
    cgbn_mem_t<BITS>* result,
    const cgbn_mem_t<BITS>* g,
    const cgbn_mem_t<BITS>* k,
    const cgbn_mem_t<BITS>* m) {

    // blockDim.x = 64 (两个warp)，一个block处理一个实例
    const uint32_t _TPI = 32;
    int32_t my_instance = (threadIdx.x + blockIdx.x * blockDim.x) / (_TPI * 2);
    if(threadIdx.x == 0){
        printf("my_instance = %d\n", my_instance);
    }
    if(my_instance >= INSTANCES){
        return ;
    }

    // 确定warp编号（0或1）
    uint32_t warp_id = (threadIdx.x + blockIdx.x * blockDim.x) / 32;

    typedef cgbn_context_t<_TPI> _context_t; 
    typedef cgbn_env_t<_context_t, BITS> _env_t;

    // 构造CGBN环境（每warp一个instance）
    context_t _context(cgbn_report_monitor);
    _env_t _env(_context);

    // 共享内存
    __shared__ cgbn_mem_t<BITS> shared_a[2];
    __shared__ cgbn_mem_t<BITS> shared_m;
    __shared__ cgbn_mem_t<BITS> shared_k;
    __shared__ uint32_t shared_k_n;
    __shared__ uint32_t shared_np0;  // 添加共享的np0

    if (threadIdx.x == 0) {
        shared_a[0] = g[my_instance];
        shared_a[1] = g[my_instance];
        shared_m = m[my_instance];
        shared_k = k[my_instance];
        shared_k_n = get_bit_length(shared_k, BITS / 32);
    }
    __syncthreads();

    typename _env_t::cgbn_t a_local, b_local, modulus, temp;
    typename _env_t::cgbn_wide_t wide_temp;

    // 两个warp都加载modulus
    cgbn_load(_env, modulus, &shared_m);

    // 修正：在warp 0中计算np0并共享
    if(warp_id == 0) {
        cgbn_load(_env, a_local, &shared_a[0]);
        uint32_t np0 = cgbn_bn2mont(_env, a_local, a_local, modulus);
        cgbn_store(_env, &shared_a[0], a_local);
        
        // 计算g^2 mod m并转换为Montgomery形式
        cgbn_mont_sqr(_env, a_local, a_local, modulus, np0);
        cgbn_store(_env, &shared_a[1], a_local);
        
        // 存储np0到共享内存
        shared_np0 = np0;
    }
    __syncthreads();

    uint32_t np0 = shared_np0;  // 两个warp都获取np0
    uint32_t k_n = shared_k_n;
    if (k_n == 0) {
        if (threadIdx.x == 0) {
            cgbn_mem_t<BITS> one;
            for (int i = 0; i < 16; i++) one._limbs[i] = 0;
            one._limbs[0] = 1;
            result[my_instance] = one;
        }
        return;
    }

    for (int i = (int)k_n - 2; i >= 0; i--) {
        uint32_t bit = get_bit(shared_k, (uint32_t)i);
        uint32_t not_bit = 1 - bit;

        cgbn_load(_env, temp, &shared_a[bit]);

        if (warp_id == 0) {
            cgbn_load(_env, a_local, &shared_a[not_bit]);  
            cgbn_mont_mul(_env, a_local, a_local, temp, modulus, np0);
            cgbn_store(_env, &shared_a[not_bit], a_local);
        } else if(warp_id == 1){
            cgbn_mont_sqr(_env, temp, temp, modulus, np0);
            cgbn_store(_env, &shared_a[bit], temp);
        }
        __syncthreads();  // 保证两个warp计算完毕
    }

    if (warp_id == 0) {
        cgbn_load(_env, a_local, &shared_a[0]);
        cgbn_mont2bn(_env, a_local, a_local, modulus, np0);  // 转换回常规形式
        cgbn_store(_env, &result[my_instance], a_local);
    }
}

int main(){
    // 初始化随机数生成器
    gmp_randstate_t state;
    gmp_randinit_default(state);
    gmp_randseed_ui(state, time(NULL));

    mpz_t g, k, m, cpu_result, gpu_result;
    mpz_inits(g, k, m, cpu_result, gpu_result, NULL);

    // // 256bit
    // mpz_set_str(g, "23706566702373261242533629265250476169421047316220549756295205653904466885052", 0);
    // mpz_set_str(k, "9165679632897562374507067229402936618150848594180849703626341276411231668113", 0);
    // mpz_set_str(m, "57989917297826558572189305292322747190840118073880657619815054279784290362977", 0);

    // 生成随机参数
    printf("=== 正在生成 %d位 随机参数 ===\n", BITS);

    // 生成随机质数m
    generate_random_prime(m, BITS, state);

    // 生成随机底数g (小于m)
    do {
        mpz_urandomb(g, state, BITS);
    } while (mpz_cmp(g, m) >= 0); 

    // 生成随机指数k
    mpz_urandomb(k, state, BITS);
    // 欧拉定理 k = k mod (m-1)
    mpz_t temp_m_1, one;
    mpz_inits(temp_m_1,one, NULL);
    mpz_set_d(one, 1);
    mpz_sub(temp_m_1, m, one);
    mpz_mod(k, k, temp_m_1);

    gmp_printf("m = %Zd\n", m);
    gmp_printf("g = %Zd\n", g);
    gmp_printf("k = %Zd\n", k);

    printf("\n=== CPU-GMP 模幂计算 ===\n");
    clock_t cpu_start = clock();
    mpz_powm(cpu_result, g, k, m);
    clock_t cpu_end = clock();
    double cpu_time = ((double)(cpu_end - cpu_start)) / CLOCKS_PER_SEC * 1000.0;
    printf("计算时间: %.3f ms\n", cpu_time);
    gmp_printf("GMP:cpu_result = %Zd\n", cpu_result);

    printf("\n=== GPU 模幂计算 ===\n");
    cgbn_mem_t<BITS> h_g, h_k, h_m, h_result;

    mpz_To_cgbn_mem_t(h_g, g);
    mpz_To_cgbn_mem_t(h_k, k);
    mpz_To_cgbn_mem_t(h_m, m);
 
    cgbn_mem_t<BITS> *d_g = NULL, *d_k = NULL, *d_m = NULL, *d_result = NULL;
    size_t mem_size = sizeof(cgbn_mem_t<BITS>);

    CUDA_CHECK(cudaDeviceReset());

    // 分配GPU内存
    CUDA_CHECK(cudaMalloc(&d_g, mem_size));
    CUDA_CHECK(cudaMalloc(&d_k, mem_size));
    CUDA_CHECK(cudaMalloc(&d_m, mem_size));
    CUDA_CHECK(cudaMalloc(&d_result, mem_size));

    CUDA_CHECK(cudaMemcpy(d_g, &h_g, mem_size, cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(d_k, &h_k, mem_size, cudaMemcpyHostToDevice));
    CUDA_CHECK(cudaMemcpy(d_m, &h_m, mem_size, cudaMemcpyHostToDevice));

    cudaEvent_t start, stop;
    CUDA_CHECK(cudaEventCreate(&start));
    CUDA_CHECK(cudaEventCreate(&stop));
    float gpu_time;
    bool flag;

    dim3 grid(1);
    dim3 block(TPB);

    /* BASE-CGBN-模幂函数 */ 
    printf("=== CGBN-modular_power()函数\n");
    CUDA_CHECK(cudaDeviceSynchronize());
    CUDA_CHECK(cudaEventRecord(start, 0));
    base_modexp_kernel<<<grid, block>>>(d_result, d_g, d_k, d_m);
    CUDA_CHECK(cudaEventRecord(stop, 0));
    CUDA_CHECK(cudaEventSynchronize(stop));
    CUDA_CHECK(cudaEventElapsedTime(&gpu_time, start, stop));
    printf("计算时间: %.3f ms\n", gpu_time);
   
    CUDA_CHECK(cudaMemcpy(&h_result, d_result, mem_size, cudaMemcpyDeviceToHost));
    CUDA_CHECK(cudaMemset(d_result, 0, mem_size));
    cgbn_mem_t_To_mpz(gpu_result, h_result);
    
    gmp_printf("gpu_result = %Zd\n", gpu_result); 
    mpz_init(gpu_result);
    
    flag = mpz_cmp(cpu_result, gpu_result);
    printf("GPU-计算结果：%s\n", ~flag ? "正确":"不正确");
   
    /* CGBN-fix-windows-Mont-平方乘-模幂算法 */ 
    printf("\n=== CGBN-fix-windows-Mont-平方乘-模幂算法\n");
    CUDA_CHECK(cudaDeviceSynchronize());
    CUDA_CHECK(cudaEventRecord(start, 0));
    cgbn_fix_windows_powm_odd_kernel<<<grid, block>>>(d_result, d_g, d_k, d_m);
    CUDA_CHECK(cudaEventRecord(stop, 0));
    CUDA_CHECK(cudaEventSynchronize(stop));
    CUDA_CHECK(cudaEventElapsedTime(&gpu_time, start, stop));
    printf("计算时间: %.3f ms\n", gpu_time);

    CUDA_CHECK(cudaMemcpy(&h_result, d_result, mem_size, cudaMemcpyDeviceToHost));
    CUDA_CHECK(cudaMemset(d_result, 0, mem_size));
    cgbn_mem_t_To_mpz(gpu_result, h_result);
    
    gmp_printf("gpu_result = %Zd\n", gpu_result); 
    mpz_init(gpu_result);

    flag = mpz_cmp(cpu_result, gpu_result);
    printf("GPU-计算结果：%s\n", ~flag?"正确":"不正确");

    /* BASE-Mont-平方乘算法 */ 
    printf("\n=== base-mont-平方乘算法\n");
    CUDA_CHECK(cudaDeviceSynchronize());
    CUDA_CHECK(cudaEventRecord(start, 0));
    base_mont_square_multiply_kernel<<<grid, block>>>(d_result, d_g, d_k, d_m);
    CUDA_CHECK(cudaEventRecord(stop, 0));
    CUDA_CHECK(cudaEventSynchronize(stop));
    CUDA_CHECK(cudaEventElapsedTime(&gpu_time, start, stop));
    printf("计算时间: %.3f ms\n", gpu_time);
   
    CUDA_CHECK(cudaMemcpy(&h_result, d_result, mem_size, cudaMemcpyDeviceToHost));
    CUDA_CHECK(cudaMemset(d_result, 0, mem_size));
    cgbn_mem_t_To_mpz(gpu_result, h_result);
    
    gmp_printf("gpu_result = %Zd\n", gpu_result); 
    mpz_init(gpu_result);

    flag = mpz_cmp(cpu_result, gpu_result);
    printf("GPU-计算结果：%s\n", ~flag ? "正确":"不正确");

    /* Mont-平方乘-模幂算法 */ 
    printf("\n=== Mont-平方乘-模幂算法\n");
    CUDA_CHECK(cudaDeviceSynchronize());
    CUDA_CHECK(cudaEventRecord(start, 0));
    mont_square_multiply_kernel<<<grid, block>>>(d_result, d_g, d_k, d_m);
    CUDA_CHECK(cudaEventRecord(stop, 0));
    CUDA_CHECK(cudaEventSynchronize(stop));
    CUDA_CHECK(cudaEventElapsedTime(&gpu_time, start, stop));
    printf("计算时间: %.3f ms\n", gpu_time);

    CUDA_CHECK(cudaMemcpy(&h_result, d_result, mem_size, cudaMemcpyDeviceToHost));
    CUDA_CHECK(cudaMemset(d_result, 0, mem_size));
    cgbn_mem_t_To_mpz(gpu_result, h_result);
    
    gmp_printf("gpu_result = %Zd\n", gpu_result); 
    mpz_init(gpu_result);

    flag = mpz_cmp(cpu_result, gpu_result);
    printf("GPU-计算结果：%s\n", ~flag?"正确":"不正确");

    // /* double_warp-平方乘-模幂算法 */ 
    // printf("\n=== double_warp-平方乘-模幂算法\n");
    // CUDA_CHECK(cudaDeviceSynchronize());
    // CUDA_CHECK(cudaEventRecord(start, 0));
    // double_warp_square_multiply_kernel<<<grid, block>>>(d_result, d_g, d_k, d_m);
    // CUDA_CHECK(cudaEventRecord(stop, 0));
    // CUDA_CHECK(cudaEventSynchronize(stop));
    // CUDA_CHECK(cudaEventElapsedTime(&gpu_time, start, stop));
    // printf("计算时间: %.3f ms\n", gpu_time);

    // CUDA_CHECK(cudaMemcpy(&h_result, d_result, mem_size, cudaMemcpyDeviceToHost));
    // CUDA_CHECK(cudaMemset(d_result, 0, mem_size));
    // cgbn_mem_t_To_mpz(gpu_result, h_result);
    
    // gmp_printf("gpu_result = %Zd\n", gpu_result); 
    // mpz_init(gpu_result);

    // flag = mpz_cmp(cpu_result, gpu_result);
    // printf("GPU-计算结果：%s\n", ~flag?"正确":"不正确");

    /* double_warp-mont-平方乘-模幂算法 */ 
    printf("\n=== double_warp-mont-平方乘-模幂算法\n");
    CUDA_CHECK(cudaDeviceSynchronize());
    CUDA_CHECK(cudaEventRecord(start, 0));
    double_warp_mont_square_multiply_kernel<<<grid, block>>>(d_result, d_g, d_k, d_m);
    CUDA_CHECK(cudaEventRecord(stop, 0));
    CUDA_CHECK(cudaEventSynchronize(stop));
    CUDA_CHECK(cudaEventElapsedTime(&gpu_time, start, stop));
    printf("计算时间: %.3f ms\n", gpu_time);

    CUDA_CHECK(cudaMemcpy(&h_result, d_result, mem_size, cudaMemcpyDeviceToHost));
    CUDA_CHECK(cudaMemset(d_result, 0, mem_size));
    cgbn_mem_t_To_mpz(gpu_result, h_result);
    
    gmp_printf("gpu_result = %Zd\n", gpu_result); 
    mpz_init(gpu_result);

    flag = mpz_cmp(cpu_result, gpu_result);
    printf("GPU-计算结果：%s\n", ~flag?"正确":"不正确");

    // clean
    cudaFree(d_g);
    cudaFree(d_k);
    cudaFree(d_m);
    cudaFree(d_result);
    cudaEventDestroy(start);
    cudaEventDestroy(stop);

    gmp_randclear(state);
    mpz_clears(g, k, m, cpu_result, gpu_result, NULL);

    return 0;
}
