﻿#include<iostream>
#include<limits>
#include<iomanip>
#include<cstdint>

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

using namespace std;

#define argElement_x64 uint64_t*
#define argElement_x32 uint32_t*
#define argElement_fp double*
#define NUM_DIGITS_221_x32 7
#define NUM_DIGITS_221_x64 4
#define NUM_DIGITS_221_x64_fp 5

#define BLOCK_WIDTH 32
#define BLOCK_HEIGHT 1
#define BLOCK_SIZE (BLOCK_WIDTH*BLOCK_HEIGHT)
#define N (56*BLOCK_SIZE)
#define Iterate 1000000
typedef uint64_t EltBN221_x64[NUM_DIGITS_221_x64];
typedef uint64_t EltBN221_x64_fp[5];
typedef uint64_t EltBN221_x64_fp_buffer[10];
typedef double EltBN521_fp[5];

//Using Integer number. 7 32-bit with the last digit being 29 bits

//p=2^221-3
//reduction for integer implementation
__device__ void intreduction(argElement_x32 r, argElement_x32 c) {
	uint64_t t = 0;
	for (int i = 0;i < 7;i++) {
		t = 3 * ((uint64_t)c[7 + i] << 3) + t + c[i];
		c[i] = (uint32_t)t;
		t = t >> 32;
	}
	t = (c[7] >> 29) + (t << 3);
	t = c[0] + 3 * t;
	r[0] = (uint32_t)t;t = t >> 32;
	for (int i = 1;i < 7;i++) {
		t = (uint64_t)c[i] + t;
		r[i] = (uint32_t)t;
		t = t >> 32;
	}
}
//modmul with int
__device__ void intmul(argElement_x32 r, argElement_x32 a, argElement_x32 b) {
	uint32_t c[14];
	for (int i = 0;i < 14;i++) {
		c[i] = 0;
	}

	for (int j = 0;j < NUM_DIGITS_221_x32;j++) {
		// c[j]=0;
		asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[j]) : "r"(a[j]), "r"(b[0]), "r"(c[j]));
	}
	asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[1]) : "r"(a[0]), "r"(b[0]), "r"(c[1]));
	// c[NUM_DIGITS_221_x32]=0;
	for (int j = 1;j < NUM_DIGITS_221_x32;j++) {
		asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[j + 1]) : "r"(a[j]), "r"(b[0]), "r"(c[j + 1]));
	}

	for (int i = 1;i < NUM_DIGITS_221_x32;i++) {
		for (int j = 0;j < NUM_DIGITS_221_x32;j++) {
			asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[i+j]) : "r"(a[j]), "r"(b[i]), "r"(c[i+j]));
		}
		asm("addc.cc.u32 %0, %1, 0;" : "=r"(c[i + NUM_DIGITS_221_x32]): "r"(c[i + NUM_DIGITS_221_x32]));
		for (int j = 0;j < NUM_DIGITS_221_x32;j++) {
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j + 1]) : "r"(a[j]), "r"(b[i]),"r"(c[i+j+1]));
		}
	}
	intreduction(r, c);
}
//modmul2 with int
__device__ void intmul2(argElement_x32 r, argElement_x32 a, argElement_x32 b) {
	uint32_t c[14];
	for (int i = 0;i < 14;i++) {
		c[i] = 0;
	}
	for (int j = 0;j < 7;j++) {
		asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[j]) : "r"(a[j]), "r"(b[0]), "r"(c[j]));
	}
	asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[1]) : "r"(a[0]), "r"(b[0]), "r"(c[1]));
	for (int j = 1;j < 7;j++) {
		asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[j + 1]) : "r"(a[j]), "r"(b[0]), "r"(c[j+1]));
	}
	asm("addc.u32 %0,%1,0;":"=r"(c[8]) : "r"(c[8]));//set CF=0
	for (int i = 1;i < 7;i++) {
		asm("mad.lo.cc.u32 %0,%1,%2,%3;":"=r"(c[i]) : "r"(a[0]), "r"(b[i]), "r"(c[i]));
		for (int j = 1;j < 7;j++) {
			asm("madc.lo.cc.u32 %0,%1,%2,%3;":"=r"(c[i + j]) : "r"(a[j]), "r"(b[i]), "r"(c[i+j]));
		}
		asm("addc.cc.u32 %0,%1,0;":"=r"(c[7 + i]) : "r"(c[7 + i]));
		asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[i+1]) : "r"(a[0]), "r"(b[i]), "r"(c[i+1]));
		for (int j = 1;j < 7;j++) {
			asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[i + j + 1]) : "r"(a[j]), "r"(b[i]), "r"(c[i+j+1]));
		}
		asm("addc.cc.u32 %0,%1,0;":"=r"(c[8 + i]) : "r"(c[8 + i]));
	}
	intreduction(r, c);
}

//adopt zheng fangyu method
__device__ void intmul3(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	unsigned int ct[8] = { 0 };
	ct[2] = b[0];
	ct[3] = b[1];
	asm volatile
	(
			//0
			"mul.lo.u32 		%0, %10, %16;"
			"mul.hi.u32 		%1, %10, %16;"
			"mad.lo.cc.u32 		%1, %10, %17,  %1;"
			"madc.hi.u32 		%2, %10, %17,  0;"
			"mad.lo.cc.u32 		%2, %10, %18,  %2;"
			"madc.hi.u32 		%3, %10, %18,  0;"
			"mad.lo.cc.u32 		%3, %10, %19,  %3;"
			"madc.hi.u32 		%4, %10, %19,  0;"
			"mad.lo.cc.u32 		%4, %10, %20,  %4;"
			"madc.hi.u32 		%5, %10, %20,  0;"
			"mad.lo.cc.u32 		%5, %10, %21,  %5;"
			"madc.hi.u32 		%6, %10, %21,  0;"
			"mad.lo.cc.u32 		%6, %10, %22,  %6;"
			"madc.hi.u32 		%7, %10, %22,  0;"
			"mad.lo.cc.u32 		%7, %10, %23,  %7;"
			"madc.hi.u32 		%8, %10, %23,  0;"
			//1
			"mad.lo.cc.u32 		%1, %11, %16,  %1;"
			"madc.hi.cc.u32 	%2, %11, %16,  %2;"
			"madc.lo.cc.u32 	%3, %11, %18,  %3;"
			"madc.hi.cc.u32 	%4, %11, %18,  %4;"
			"madc.lo.cc.u32 	%5, %11, %20,  %5;"
			"madc.hi.cc.u32 	%6, %11, %20,  %6;"
			"madc.lo.cc.u32 	%7, %11, %22,  %7;"
			"madc.hi.cc.u32 	%8, %11, %22,  %8;"
			"addc.u32 			%9, 0,  0;"
			"mad.lo.cc.u32 		%2, %11, %17,   %2;"
			"madc.hi.cc.u32 	%3, %11, %17,   %3;"
			"madc.lo.cc.u32 	%4, %11, %19,   %4;"
			"madc.hi.cc.u32 	%5, %11, %19,   %5;"
			"madc.lo.cc.u32 	%6, %11, %21,   %6;"
			"madc.hi.cc.u32 	%7, %11, %21,   %7;"
			"madc.lo.cc.u32 	%8, %11, %23,   %8;"
			"madc.hi.u32 		%9, %11, %23,   %9;"
			//2
			"mad.lo.cc.u32 		%2, %24, %16,  %2;"
			"madc.hi.cc.u32 	%3, %24, %16,  %3;"
			"madc.lo.cc.u32 	%4, %24, %18,  %4;"
			"madc.hi.cc.u32 	%5, %24, %18,  %5;"
			"madc.lo.cc.u32 	%6, %24, %20,  %6;"
			"madc.hi.cc.u32 	%7, %24, %20,  %7;"
			"madc.lo.cc.u32 	%8, %24, %22,  %8;"
			"madc.hi.cc.u32 	%9, %24, %22,  %9;"
			"addc.u32 			%10, 0,  0;"
			"mad.lo.cc.u32 		%3, %24, %17,   %3;"
			"madc.hi.cc.u32 	%4, %24, %17,   %4;"
			"madc.lo.cc.u32 	%5, %24, %19,   %5;"
			"madc.hi.cc.u32 	%6, %24, %19,   %6;"
			"madc.lo.cc.u32 	%7, %24, %21,   %7;"
			"madc.hi.cc.u32 	%8, %24, %21,   %8;"
			"madc.lo.cc.u32 	%9, %24, %23,   %9;"
			"madc.hi.u32 		%10, %24, %23,  %10;"
			//3
			"mad.lo.cc.u32 		%3, %25, %16,  %3;"
			"madc.hi.cc.u32 	%4, %25, %16,  %4;"
			"madc.lo.cc.u32 	%5, %25, %18,  %5;"
			"madc.hi.cc.u32 	%6, %25, %18,  %6;"
			"madc.lo.cc.u32 	%7, %25, %20,  %7;"
			"madc.hi.cc.u32 	%8, %25, %20,  %8;"
			"madc.lo.cc.u32 	%9, %25, %22,  %9;"
			"madc.hi.cc.u32 	%10, %25, %22,  %10;"
			"addc.u32 			%11, 0,  0;"
			"mad.lo.cc.u32 		%4, %25, %17,   %4;"
			"madc.hi.cc.u32 	%5, %25, %17,   %5;"
			"madc.lo.cc.u32 	%6, %25, %19,   %6;"
			"madc.hi.cc.u32 	%7, %25, %19,   %7;"
			"madc.lo.cc.u32 	%8, %25, %21,   %8;"
			"madc.hi.cc.u32 	%9, %25, %21,   %9;"
			"madc.lo.cc.u32 	%10, %25, %23,   %10;"
			"madc.hi.u32 		%11, %25, %23,   %11;"
			//4
			"mad.lo.cc.u32 		%4, %26, %16,  %4;"
			"madc.hi.cc.u32 	%5, %26, %16,  %5;"
			"madc.lo.cc.u32 	%6, %26, %18,  %6;"
			"madc.hi.cc.u32 	%7, %26, %18,  %7;"
			"madc.lo.cc.u32 	%8, %26, %20,  %8;"
			"madc.hi.cc.u32 	%9, %26, %20,  %9;"
			"madc.lo.cc.u32 	%10, %26, %22,  %10;"
			"madc.hi.cc.u32 	%11, %26, %22,  %11;"
			"addc.u32 			%12, 0,  0;"
			"mad.lo.cc.u32 		%5, %26, %17,   %5;"
			"madc.hi.cc.u32 	%6, %26, %17,   %6;"
			"madc.lo.cc.u32 	%7, %26, %19,   %7;"
			"madc.hi.cc.u32 	%8, %26, %19,   %8;"
			"madc.lo.cc.u32 	%9, %26, %21,   %9;"
			"madc.hi.cc.u32 	%10, %26, %21,   %10;"
			"madc.lo.cc.u32 	%11, %26, %23,   %11;"
			"madc.hi.u32 		%12, %26, %23,   %12;"
			//5
			"mad.lo.cc.u32 		%5, %27, %16,  %5;"
			"madc.hi.cc.u32 	%6, %27, %16,  %6;"
			"madc.lo.cc.u32 	%7, %27, %18,  %7;"
			"madc.hi.cc.u32 	%8, %27, %18,  %8;"
			"madc.lo.cc.u32 	%9, %27, %20,  %9;"
			"madc.hi.cc.u32 	%10, %27, %20,  %10;"
			"madc.lo.cc.u32 	%11, %27, %22,  %11;"
			"madc.hi.cc.u32 	%12, %27, %22,  %12;"
			"addc.u32 			%13, 0,  0;"
			"mad.lo.cc.u32 		%6, %27, %17,   %6;"
			"madc.hi.cc.u32 	%7, %27, %17,   %7;"
			"madc.lo.cc.u32 	%8, %27, %19,   %8;"
			"madc.hi.cc.u32 	%9, %27, %19,   %9;"
			"madc.lo.cc.u32 	%10, %27, %21,   %10;"
			"madc.hi.cc.u32 	%11, %27, %21,   %11;"
			"madc.lo.cc.u32 	%12, %27, %23,   %12;"
			"madc.hi.u32 		%13, %27, %23,   %13;"

			//6
			"mad.lo.cc.u32 		%6, %28, %16,  %6;"
			"madc.hi.cc.u32 	%7, %28, %16,  %7;"
			"madc.lo.cc.u32 	%8, %28, %18,  %8;"
			"madc.hi.cc.u32 	%9, %28, %18,  %9;"
			"madc.lo.cc.u32 	%10, %28, %20,  %10;"
			"madc.hi.cc.u32 	%11, %28, %20,  %11;"
			"madc.lo.cc.u32 	%12, %28, %22,  %12;"
			"madc.hi.cc.u32 	%13, %28, %22,  %13;"
			"addc.u32 			%14, 0,  0;"
			"mad.lo.cc.u32 		%7, %28, %17,   %7;"
			"madc.hi.cc.u32 	%8, %28, %17,   %8;"
			"madc.lo.cc.u32 	%9, %28, %19,   %9;"
			"madc.hi.cc.u32 	%10, %28, %19,   %10;"
			"madc.lo.cc.u32 	%11, %28, %21,   %11;"
			"madc.hi.cc.u32 	%12, %28, %21,   %12;"
			"madc.lo.cc.u32 	%13, %28, %23,   %13;"
			"madc.hi.u32 		%14, %28, %23,   %14;"
			//7
			"mad.lo.cc.u32 		%7, %29, %16,  %7;"
			"madc.hi.cc.u32 	%8, %29, %16,  %8;"
			"madc.lo.cc.u32 	%9, %29, %18,  %9;"
			"madc.hi.cc.u32 	%10, %29, %18,  %10;"
			"madc.lo.cc.u32 	%11, %29, %20,  %11;"
			"madc.hi.cc.u32 	%12, %29, %20,  %12;"
			"madc.lo.cc.u32 	%13, %29, %22,  %13;"
			"madc.hi.cc.u32 	%14, %29, %22,  %14;"
			"addc.u32 			%15, 0,  0;"
			"mad.lo.cc.u32 		%8, %29, %17,   %8;"
			"madc.hi.cc.u32 	%9, %29, %17,   %9;"
			"madc.lo.cc.u32 	%10, %29, %19,   %10;"
			"madc.hi.cc.u32 	%11, %29, %19,   %11;"
			"madc.lo.cc.u32 	%12, %29, %21,   %12;"
			"madc.hi.cc.u32 	%13, %29, %21,   %13;"
			"madc.lo.cc.u32 	%14, %29, %23,   %14;"
			"madc.hi.u32 		%15, %29, %23,   %15;"

			//add

			"mad.lo.cc.u32 		%0,		%8, 	38,		%0;"
			"madc.hi.cc.u32 	%1,		%8, 	38,		%1;"
			"madc.lo.cc.u32 	%2,		%10, 	38,		%2;"
			"madc.hi.cc.u32 	%3,		%10, 	38,		%3;"
			"madc.lo.cc.u32 	%4,		%12, 	38,		%4;"
			"madc.hi.cc.u32 	%5,		%12, 	38,		%5;"
			"madc.lo.cc.u32 	%6,		%14, 	38,		%6;"
			"madc.hi.cc.u32 	%7,		%14, 	38,		%7;"
			"addc.u32			%8, 	0, 		0;"
			"mad.lo.cc.u32 		%1,		%9, 	38,		%1;"
			"madc.hi.cc.u32 	%2,		%9, 	38,		%2;"
			"madc.lo.cc.u32 	%3,		%11, 	38,		%3;"
			"madc.hi.cc.u32 	%4,		%11, 	38,		%4;"
			"madc.lo.cc.u32 	%5,		%13, 	38,		%5;"
			"madc.hi.cc.u32 	%6,		%13, 	38,		%6;"
			"madc.lo.cc.u32 	%7,		%15, 	38,		%7;"
			"madc.hi.u32 		%8,		%15, 	38,		%8;"

			:"=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]),
			"=r"(c[4]), "=r"(c[5]), "=r"(c[6]), "=r"(c[7]),
			"=r"(ct[0]), "=r"(ct[1]), "+r"(ct[2]), "+r"(ct[3]),
			"=r"(ct[4]), "=r"(ct[5]), "=r"(ct[6]), "=r"(ct[7])
			: "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]),
			"r"(a[4]), "r"(a[5]), "r"(a[6]), "r"(a[7]),
			"r"(b[2]), "r"(b[3]), "r"(b[4]), "r"(b[5]), "r"(b[6]), "r"(b[7])
			);
}

__device__ void intsqr(argElement_x32 r, argElement_x32 a) {
	uint32_t c[14];
	for (int i = 0;i < 14;i++) {
		c[i] = 0;
	}
	for (int i = 0;i <= (NUM_DIGITS_221_x32 / 2 - 2);i++) {
		asm("mad.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[2 * i + 1]) : "r"(a[i + 1]), "r"(a[i]),"r"(c[2 * i + 1]));//
		for (int j = i + 2;j <= NUM_DIGITS_221_x32 - i - 2;j++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j]) : "r"(a[j]), "r"(a[i]), "r"(c[i + j]));
		}
		for (int k = 0; k <= i; k++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_221_x32 + 2 * k - 1]) : "r"(a[NUM_DIGITS_221_x32 - 1 - i + k]), "r"(a[i + k]), "r"(c[NUM_DIGITS_221_x32 + 2 * k - 1]));
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_221_x32 + 2 * k]) : "r"(a[NUM_DIGITS_221_x32 - 1 - i + k]), "r"(a[i + k]), "r"(c[NUM_DIGITS_221_x32 + 2 * k]));
		}
		asm("mad.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[2 * i + 2]) : "r"(a[i + 1]), "r"(a[i]), "r"(c[2 * i + 2]));
		for (int j = i + 2; j <= NUM_DIGITS_221_x32 - i - 2; j++) {
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j + 1]) : "r"(a[j]), "r"(a[i]),"r"(c[i + j + 1]));
		}
		for (int k = 0; k <= i; k++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_221_x32 + 2 * k]) : "r"(a[NUM_DIGITS_221_x32 - 1 - i + k]), "r"(a[i + k + 1]), "r"(c[NUM_DIGITS_221_x32 + 2 * k]));
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_221_x32 + 2 * k + 1]) : "r"(a[NUM_DIGITS_221_x32 - 1 - i + k]), "r"(a[i + k + 1]), "r"(c[NUM_DIGITS_221_x32 + 2 * k + 1]));
		}
	}
	asm("mad.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_221_x32 - 1]) : "r"(a[NUM_DIGITS_221_x32 / 2]), "r"(a[NUM_DIGITS_221_x32 / 2 - 1]), "r"(c[NUM_DIGITS_221_x32 - 1]));
	asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_221_x32]) : "r"(a[NUM_DIGITS_221_x32 / 2]), "r"(a[NUM_DIGITS_221_x32 / 2 - 1]), "r"(c[NUM_DIGITS_221_x32]));
	for (int k = 1; k < NUM_DIGITS_221_x32 / 2; k++) {
		asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_221_x32 + 2 * k - 1]) : "r"(a[NUM_DIGITS_221_x32 / 2 + k]), "r"(a[NUM_DIGITS_221_x32 / 2 + k - 1]), "r"(c[NUM_DIGITS_221_x32 + 2 * k - 1]));
		asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_221_x32 + 2 * k]) : "r"(a[NUM_DIGITS_221_x32 / 2 + k]), "r"(a[NUM_DIGITS_221_x32 / 2 + k - 1]), "r"(c[NUM_DIGITS_221_x32 + 2 * k]));
	}
	asm("add.cc.u32 %0, %1, 0;" : "=r"(c[1]):"r"(c[1]));
	for (int i = 2; i < 2 * NUM_DIGITS_221_x32; i++) {
		asm("addc.cc.u32 %0, %1, 0;" : "=r"(c[i]):"r"(c[i]));
	}
	asm("mad.lo.cc.u32 %0, %1, %1, %2;" : "=r"(c[0]) : "r"(a[0]), "r"(c[0]));
	asm("madc.hi.cc.u32 %0, %1, %1, %2;" : "=r"(c[1]) : "r"(a[0]), "r"(c[1]));
#pragma unroll
	for (int i = 1; i < NUM_DIGITS_221_x32; i++) {
		asm("madc.lo.cc.u32 %0, %1, %1, %2;" : "=r"(c[2 * i]) : "r"(a[i]), "r"(c[2 * i]));
		asm("madc.hi.cc.u32 %0, %1, %1, %2;" : "=r"(c[2 * i + 1]) : "r"(a[i]), "r"(c[2 * i + 1]));
	}
	intreduction(r, c);
}

//Using Double-precision Floating point number.
__host__ __device__ uint64_t to_u64(double d) {
	union {
		double d;
		uint64_t u64;
	}converter;
	converter.d = d;
	return converter.u64;
}

__host__ __device__ double to_double(uint64_t u64) {
	union {
		double d;
		uint64_t u64;
	}converter;
	converter.u64 = u64;
	return converter.d;
}

static const __device__ uint64_t GLOB_Const_P[NUM_DIGITS_221_x64_fp] = { 0xFFFFFFFFFFFF,0xFFFFFFFFFFF0, 0xFFFFFFFFFFF0 ,0xFFFFFFFFFFF0 ,0x1FFFFFFFFFF0};
#define RADIX 45
#define LAST_DIGIT 41
static const __device__ uint64_t GLOB_mask45 = 0x1FFFFFFFFFFF;
static const __device__ uint64_t GLOB_mask41 = 0x1FFFFFFFFFF;

__constant__ double ct[2];
//__constant__ double t2;
//input length must be 5.
//Simplify to unique element in NIST P-221
//Used before mul and squaring
void simplification_unique_host(argElement_x64 c, argElement_x64 a) {
	uint64_t t = a[4] >> 41;
	a[4] &= 0x1FFFFFFFFFF;
	a[0] += t*3;
	for (int i = 0; i < 4; i++) {
		a[i + 1] += (a[i] >> 45);
		c[i] = a[i] & 0x1FFFFFFFFFFF;
	}
	c[4] = a[4];
}
__device__ void simplification_unique(argElement_x64 c, argElement_x64 a) {
	uint64_t t = a[4] >> 41;
	a[4] &= 0x1FFFFFFFFFF;
	a[0] += t * 3;
	for (int i = 0; i < 4; i++) {
		a[i + 1] = a[i + 1] + (a[i] >> 45);
		c[i] =  a[i] & 0x1FFFFFFFFFFF;
	}
	c[4] = a[4];
}

//Just simplify no reduction.
__host__ __device__ void simplification_fast(argElement_x64 c, int num) {
	for (int i = 0; i < (num - 1); i++) {
		c[i + 1] += c[i] >> 45;
		c[i] &= 0x1FFFFFFFFFFF;
	}
}

//new sub and add can be parallel, only used for element of NIST P-521, cannot be used in Karatsuba multiplication
__device__ void sub(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	uint64_t* p;
	p = const_cast<uint64_t*>(GLOB_Const_P);
	for (int i = 0; i < 5; i++) {
		a[i] += p[i];
		r[i] = a[i] - b[i];
	}
}

__device__ void add(argElement_x64 r, const argElement_x64 a, const argElement_x64 b)
{
	int i;
	for (i = 0; i < 5; i++) {
		r[i] = a[i] + b[i];
	}
}
#define SUM_C0 0xbcd0000000000000
#define SUM_C1 0x33a0000000000000
#define SUM_C2 0xaa70000000000000
#define SUM_C3 0x2140000000000000
#define SUM_C4 0x9810000000000000
#define SUM_C5 0x9540000000000000
#define SUM_C6 0x1e70000000000000
#define SUM_C7 0xa7a0000000000000
#define SUM_C8 0x30d0000000000000
#define SUM_C9 0xba00000000000000
//slow and original version
__device__ void mul_fma_5_origin(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	const double t1 = to_double(0x4600000000000000);
	const double t2 = to_double(0x4600000000000080);
	double p_hi = 0, p_lo = 0, sub = 0;
	c[0] = 0xbcd0000000000000;c[1] = 0x33a0000000000000;c[2] = 0xaa70000000000000;c[3] = 0x2140000000000000;c[4] = 0x9810000000000000;
	c[5] = 0x9540000000000000;c[6] = 0x1e70000000000000;c[7] = 0xa7a0000000000000;c[8] = 0x30d0000000000000;c[9] = 0xba00000000000000;
	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		for (int j = 0; j < NUM_DIGITS_221_x64_fp; j++) {
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"((double)a[i]), "d"((double)b[j]), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"((double)a[i]), "d"((double)b[j]), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
}

__device__ void mul_fma_5_with_precompute(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	const double t1 = to_double(0x4600000000000000);
	const double t2 = to_double(0x4600000000000080);
	uint64_t c[10];
	int w2 = 2 * NUM_DIGITS_221_x64_fp - 1;
	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		c[i] = (uint64_t)0x460 * i + (uint64_t)0x433 * (i + 1);
		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
		c[w2 - i] = (uint64_t)0x460 * (i + 1) + (uint64_t)0x433 * i;
		c[w2 - i] = -(int64_t)(c[w2 - i] & 0xFFF) << 52;
	}
	//fesetround(FE_TOWARDZERO);
	double p_hi = 0, p_lo = 0, sub = 0;

	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		double a_tmp = (double)a[i];
		for (int j = 0; j < NUM_DIGITS_221_x64_fp; j++) {
			double b_tmp = (double)b[j];
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
#pragma
	for (int i = 0; i < 5; i++) {
		c[i] = c[i] + c[5 + i] * 48;
	}
	simplification_unique(r, c);
}


//with shared Memory
__device__ void mul_fma_5_with_shared(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	int tid = threadIdx.x + threadIdx.y * blockDim.x;
	extern __shared__ uint64_t c[10*BLOCK_SIZE];
	
	c[tid + BLOCK_SIZE * 0] = 0xbcd0000000000000;c[tid + BLOCK_SIZE * 1] = 0x33a0000000000000;c[tid + BLOCK_SIZE * 2] = 0xaa70000000000000;c[tid + BLOCK_SIZE * 3] = 0x2140000000000000;c[tid + BLOCK_SIZE * 4] = 0x9810000000000000;
	c[tid + BLOCK_SIZE * 5] = 0x9540000000000000;c[tid + BLOCK_SIZE * 6] = 0x1e70000000000000;c[tid + BLOCK_SIZE * 7] = 0xa7a0000000000000;c[tid + BLOCK_SIZE * 8] = 0x30d0000000000000;c[tid + BLOCK_SIZE * 9] = 0xba00000000000000;
	//fesetround(FE_TOWARDZERO);
	double p_hi, sub;
#pragma unroll
	for (int i = 0; i < 5; i++) {
		double a_tmp = (double)a[i];
#pragma unroll
		for (int j = 0; j < 5; j++) {
			double b_tmp = (double)b[j];
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(ct[0]));
			c[tid + BLOCK_SIZE * (i + j + 1)] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(ct[1]), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
			c[tid + BLOCK_SIZE * (i + j)] += to_u64(p_hi);
		}
	}
#pragma unroll
	for (int i = 0; i < 5; i++) {
		c[tid + BLOCK_SIZE * i] = c[tid + BLOCK_SIZE * i] + c[tid + BLOCK_SIZE * (5 + i)] * 48;
	}
	uint32_t t = c[tid+10*4] >> 41;
	c[tid + BLOCK_SIZE * 4] &= 0x1FFFFFFFFFF;
	c[tid + BLOCK_SIZE * 0] += t * 3;
	for (int i = 0; i < 4; i++) {
		c[tid+BLOCK_SIZE*(i + 1)] += (c[tid + BLOCK_SIZE * i] >> 45);
		r[i] = c[tid + BLOCK_SIZE * i] & 0x1FFFFFFFFFFF;
	}
	r[4] = c[tid + BLOCK_SIZE * 4];
}

//without reduction and simplify
__device__ void mul_fma_5(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	uint64_t c[10];
	double p_hi, sub,t1,t2;
	c[0] = 0xbcd0000000000000;c[1] = 0x33a0000000000000;c[2] = 0xaa70000000000000;c[3] = 0x2140000000000000;c[4] = 0x9810000000000000;
	c[5] = 0x9540000000000000;c[6] = 0x1e70000000000000;c[7] = 0xa7a0000000000000;c[8] = 0x30d0000000000000;c[9] = 0xba00000000000000;
	t1 = ct[0];
	t2 = ct[1];
#pragma unroll
	for (int i = 0; i < 5; i++) {
		double a_tmp = (double)a[i];
#pragma unroll
		for (int j = 0; j < 5; j++) {
			double b_tmp = (double)b[j];
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
			c[i + j] += to_u64(p_hi);
		}
	}
#pragma unroll
	for (int i = 0; i < 5; i++) {
		c[i] = c[i] + c[(5 + i)] * 48;
	}
	uint32_t t = c[4] >> 41;
	c[4] &= 0x1FFFFFFFFFF;
	c[0] += (t * 3);
	for (int i = 0; i < 4; i++) {
		c[i + 1] += (c[i] >> 45);
		r[i] = c[i] & 0x1FFFFFFFFFFF;
	}
	r[4] = c[4];
}

__device__ void mul_fma_5_with_reduction(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	double t1 = ct[0];
	double t2 = ct[1];
	uint64_t c[10];
	double a_tmp, b_tmp;
	double p_hi, p_lo, sub;
	c[0] = 0xbcd0000000000000;c[1] = 0x33a0000000000000;c[2] = 0xaa70000000000000;c[3] = 0x2140000000000000;c[4] = 0x9810000000000000;
	c[5] = 0x9540000000000000;c[6] = 0x1e70000000000000;c[7] = 0xa7a0000000000000;c[8] = 0x30d0000000000000;c[9] = 0xba00000000000000;
	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		for (int j = 0; j < NUM_DIGITS_221_x64_fp; j++) {
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"((double)a[i]), "d"((double)b[j]), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"((double)a[i]), "d"((double)b[j]), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	//asm(".reg .b64 %tmp;");
	//for (int i = 0; i < 5; i++) {
	//	a_tmp = (double)a[i];
	//	for (int j = 0; j < 5; j++) {
	//		b_tmp = (double)b[j];
	//		asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	//		c[i + j + 1] += to_u64(p_hi);
	//		asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	//		asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	//		c[i + j] += to_u64(p_lo);
	//		/*asm("fma.rz.f64 %0, %4, %5, %6; \n\t"
	//			"mov.b64 %tmp,%0;\n\t"
	//			"add.u64 %2,%2,%tmp;\n\t"
	//			"sub.rz.f64 %1, %7, %0;\n\t"
	//			"fma.rz.f64 %0, %4, %5, %1; \n\t"
	//			"mov.b64 %tmp,%0;\n\t"
	//			"add.u64 %3,%3,%tmp;"
	//			:	"=d"(p_hi), "=d"(sub), "=l"(c[i + j + 1]), "=l"(c[i + j]) : "d"(a_tmp), "d"(b_tmp), "d"(t1), "d"(t2));*/
	//	}
	//}
	for (int i = 0; i < 5; i++) {
		//asm("mad.lo.s64 %0, %2, 3, %1;":"=l"(c[i]) : "l"(c[i]), "l"(c[5 + i]));
		c[i] = c[i]+ c[5 + i] * 3;
	}
	simplification_unique(r, c);
}

__device__ void mul_fma_5_product_scanning_with_reduction(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	double t1 = ct[0];
	double t2 = ct[1];
	uint64_t c[10];
	double a_tmp, b_tmp;
	uint64_t c_tmp1, c_tmp2;
	double p_hi, sub;
	/*c[0] = 0xbcd0000000000000;c[1] = 0x33a0000000000000;c[2] = 0xaa70000000000000;c[3] = 0x2140000000000000;c[4] = 0x9810000000000000;
	c[5] = 0x9540000000000000;c[6] = 0x1e70000000000000;c[7] = 0xa7a0000000000000;c[8] = 0x30d0000000000000;c[9] = 0xba00000000000000;*/
	//c0
	a_tmp = (double)a[0];	b_tmp = (double)b[0];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c[0] = 0xbcd0000000000000 + to_u64(p_hi);c_tmp1 = c_tmp2;c_tmp2 = 0;

	/*a_tmp = (double)a[0];*/	b_tmp = (double)b[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c[0] = 0xbcd0000000000000 + to_u64(p_hi);

	for (int i = 0; i < 5; i++) {
		c[i] = c[i] + c[5 + i] * 3;
	}
	simplification_unique(r, c);
}

__device__ void sqr_fma_5(argElement_x64 r, argElement_x64 a) {
	const double t1 = to_double(0x4600000000000000);
	const double t2 = to_double(0x4600000000000080);
	uint64_t c_tmp1, c_tmp2, c_tmp3;

	double p_hi, p_lo, sub, a_tmp, b_tmp;
	//c0
	a_tmp = (double)a[0];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[0] = 0xbcd0000000000000 + to_u64(p_lo);
	//c1
	b_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 = to_u64(p_lo) << 1;
	r[1] = c_tmp1 + 0x33a0000000000000 + c_tmp3;
	//c2
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[2] = (c_tmp1 << 1) + to_u64(p_lo) + 0xaa70000000000000;
	//c3:a1b2
	c_tmp1 = c_tmp2;
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[0];
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	r[3] = (c_tmp1 << 1) + c_tmp3 + 0x2140000000000000;
	//c4:a0b4
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[4] = (c_tmp1 << 1) + to_u64(p_lo) + 0x9810000000000000;
	//c5:a2b3
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	r[5] = (c_tmp1 << 1) + c_tmp3 + 0x9540000000000000;
	//c6:a2b4
	c_tmp1 = c_tmp2;
	a_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[6] = (c_tmp1 << 1) + to_u64(p_lo) + 0x1e70000000000000;
	//c7:a3b4
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	r[7] = (c_tmp1 << 1) + c_tmp3 + 0xa7a0000000000000;
	//c8:a4b4
	//c_tmp1 = c_tmp2;
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(b_tmp), "d"(b_tmp), "d"(t1));
	r[9] = to_u64(p_hi) + 0xba00000000000000;
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(b_tmp), "d"(b_tmp), "d"(sub));
	r[8] = (c_tmp2 << 1) + to_u64(p_lo) + 0x30d0000000000000;
}

__device__ void sqr_fma_5_with_reduction(argElement_x64 r, argElement_x64 a) {
	double t1, t2;
	t1 = to_double(0x4600000000000000);
	t2 = to_double(0x4600000000000080);
	uint64_t c_tmp1 = 0, c_tmp2 = 0, c_tmp3 = 0;
	uint64_t c[10];
	double p_hi, p_lo, sub, a_tmp, b_tmp;
	//c0
	a_tmp = (double)a[0];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	c[0] = 0xbcd0000000000000 + to_u64(p_lo);
	//c1
	b_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 = to_u64(p_lo) << 1;
	c[1] = c_tmp1 + 0x33a0000000000000 + c_tmp3;
	//c2
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	c[2] = (c_tmp1 << 1) + to_u64(p_lo) + 0xaa70000000000000;
	//c3:a1b2
	c_tmp1 = c_tmp2;
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[0];
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	c[3] = (c_tmp1 << 1) + c_tmp3 + 0x2140000000000000;
	//c4:a0b4
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	c[4] = (c_tmp1 << 1) + to_u64(p_lo) + 0x9810000000000000;
	//c5:a2b3
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	c[5] = (c_tmp1 << 1) + c_tmp3 + 0x9540000000000000;
	//c6:a2b4
	c_tmp1 = c_tmp2;
	a_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	c[6] = (c_tmp1 << 1) + to_u64(p_lo) + 0x1e70000000000000;
	//c7:a3b4
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	c[7] = (c_tmp1 << 1) + c_tmp3 + 0xa7a0000000000000;
	//c8:a4b4
	//c_tmp1 = c_tmp2;
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(b_tmp), "d"(b_tmp), "d"(t1));
	c[9] = to_u64(p_hi) + 0xba00000000000000;
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(b_tmp), "d"(b_tmp), "d"(sub));
	c[8] = (c_tmp2 << 1) + to_u64(p_lo) + 0x30d0000000000000;
	for (int i = 0; i < 5; i++) {
		uint64_t c5 = c[5 + i];
		c[i] = c[i] + (c5 << 5) + c5;
	}
	simplification_unique(r, c);
}

#undef SUM_C0 
#undef SUM_C1 
#undef SUM_C2 
#undef SUM_C3 
#undef SUM_C4 
#undef SUM_C5 
#undef SUM_C6 
#undef SUM_C7 
#undef SUM_C8 
#undef SUM_C9 
//reduction for Multiplication and squaring
__device__ void reduction(argElement_x64 c, argElement_x64 a) {
	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		c[i] = a[i] + (a[NUM_DIGITS_221_x64_fp + i] << 5)+a[NUM_DIGITS_221_x64_fp+i];
	}
}

//modular multiplication
__global__ void mul_fma_5_reduction(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * BLOCK_SIZE;
	bid = bid + threadIdx.x;
	bid = bid * NUM_DIGITS_221_x64_fp;
	uint64_t tr[NUM_DIGITS_221_x64_fp],ta[NUM_DIGITS_221_x64_fp],tb[NUM_DIGITS_221_x64_fp];
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		ta[i]=a[i+bid];
		tb[i]=b[i+bid];
	}
	for (int i = 0;i < Iterate;i++) {
		mul_fma_5_with_reduction(tr, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		r[i+bid]=tr[i];
	}
	
}

__global__ void sqr_fma_5_reduction(argElement_x64 c, argElement_x64 a) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_221_x64_fp;
	uint64_t tr[NUM_DIGITS_221_x64_fp],ta[NUM_DIGITS_221_x64_fp];
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		ta[i]=a[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		sqr_fma_5_with_reduction(tr, ta);
	}
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		c[i+tid]=tr[i];
	}
	// for (int i = 0;i < Iterate;i++) {
	// 	sqr_fma_5_with_reduction(c + tid, a + tid);
	// }
	
}

//modular multiplication with karatsuba multiplication
__global__ void intmul_kernel(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_221_x32;
	
	uint32_t tc[NUM_DIGITS_221_x32],ta[NUM_DIGITS_221_x32],tb[NUM_DIGITS_221_x32];
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		intmul(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		c[i+tid]=tc[i];
	}
}
__global__ void intmul2_kernel(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_221_x32;
	uint32_t tc[NUM_DIGITS_221_x32],ta[NUM_DIGITS_221_x32],tb[NUM_DIGITS_221_x32];
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		intmul2(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		c[i+tid]=tc[i];
	}
}

__global__ void intsqr_kernel(argElement_x32 c, argElement_x32 a) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_221_x32;
	uint32_t tc[NUM_DIGITS_221_x32],ta[NUM_DIGITS_221_x32];
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		ta[i]=a[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		intsqr(tc, ta);
	}
	for(int i=0;i<NUM_DIGITS_221_x64_fp;i++){
		c[i+tid]=tc[i];
	}
	
}

//modular multiplication with karatsuba multiplication
__global__ void mul_fma_5_reduction_consecutive(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);

	EltBN221_x64_fp_buffer c = { 0 };
	const double t1 = to_double(0x4600000000000000);
	const double t2 = to_double(0x4600000000000080);

	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		c[i] = (uint64_t)0x460 * i + (uint64_t)0x433 * (i + 1);
		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
		c[2 * NUM_DIGITS_221_x64_fp - i] = (uint64_t)0x460 * (i + 1) + (uint64_t)0x433 * i;
		c[2 * NUM_DIGITS_221_x64_fp - i] = -(int64_t)(c[2 * NUM_DIGITS_221_x64_fp - i] & 0xFFF) << 52;
	}
	double p_hi = 0, p_lo = 0, sub = 0;
	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		for (int j = 0; j < NUM_DIGITS_221_x64_fp; j++) {
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"((double)a[t + N * i]), "d"((double)b[t + N * j]), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"((double)a[t + N * i]), "d"((double)b[t + N * j]), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		c[i] = c[i] + (c[NUM_DIGITS_221_x64_fp + i] << 5) + c[NUM_DIGITS_221_x64_fp + i];
	}
	uint64_t tmp = c[NUM_DIGITS_221_x64_fp - 1] >> 41;
	c[NUM_DIGITS_221_x64_fp - 1] &= 0x1FFFFFFFFFF;
	c[0] += tmp * 3;
	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		c[i + 1] += (c[i] >> 45);
		r[t + N * i] = c[i] & 0x1FFFFFFFFFFF;
	}
	r[t + (NUM_DIGITS_221_x64_fp - 1) * N] = c[NUM_DIGITS_221_x64_fp - 1];
}


extern "C"
void mul_fma5_reductionAPI(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	//dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_221_x64_fp * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_221_x64_fp * sizeof(uint64_t);
	//int tmpM = (NUM_DIGITS_221_x64_fp * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_221_x64_fp * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, NUM_DIGITS_221_x64_fp * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);
	//constant memory
	double dt[2];
	dt[0] = to_double(0x4600000000000000);
	dt[1] = to_double(0x4600000000000080);
	cudaMemcpyToSymbol(ct, &dt, 2*sizeof(double));

	mul_fma_5_reduction << <numBlocks, BLOCK_SIZE >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}


extern "C"
void sqr_fma5_reductionAPI(argElement_x64 c, argElement_x64 a) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_221_x64_fp * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_221_x64_fp * sizeof(uint64_t);
	//int tmpM = (NUM_DIGITS_221_x64_fp * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_221_x64_fp * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	sqr_fma_5_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);

}

extern "C"
void intmulAPI(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	argElement_x32 dev_a = NULL;
	argElement_x32 dev_b = NULL;
	argElement_x32 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_221_x64_fp * sizeof(argElement_x32) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_221_x32 * sizeof(uint32_t);
	//int tmpM = (NUM_DIGITS_221_x32 * 2 + 1) * sizeof(uint32_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_221_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, NUM_DIGITS_221_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	intmul_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	intmul2_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}

extern "C"
void intsqrAPI(argElement_x32 c, argElement_x32 a) {
	argElement_x32 dev_a = NULL;
	argElement_x32 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_221_x32 * sizeof(argElement_x32) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_221_x32 * sizeof(uint32_t);
	//int tmpM = (NUM_DIGITS_221_x32 * 2 + 1) * sizeof(uint32_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_221_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	intsqr_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
}

extern "C"
void mul_fma5_reductionAPI_consecutive(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_221_x64_fp * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_221_x64_fp * sizeof(uint64_t);
	//int tmpM = (NUM_DIGITS_221_x64_fp * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_221_x64_fp * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, NUM_DIGITS_221_x64_fp * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	mul_fma_5_reduction_consecutive << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);
}

void print(FILE* file, argElement_x64 c, int num) {
	if (num == NUM_DIGITS_221_x64_fp) {
		simplification_unique_host(c,c);
	}
	else {
		simplification_fast(c, num);
	}

	printf("0X");
	for (int i = num - 1; i >= 0; i--) {
		printf("%013llX", c[i]);
	}
	printf("\n");
}


//only for EltBN221_x64_fp
void AssignNfp(argElement_x64 dst, argElement_x64 src, int n) {
	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		for (int j = 0; j < n; j++) {
			dst[i + NUM_DIGITS_221_x64_fp * j] = src[i];
		}
	}
}
void AssignN32(argElement_x32 dst, argElement_x32 src, int n) {
	for (int i = 0; i < NUM_DIGITS_221_x64_fp; i++) {
		for (int j = 0; j < n; j++) {
			dst[i + NUM_DIGITS_221_x64_fp * j] = src[i];
		}
	}
}

void AssignNfp_consecutive(argElement_x64 dst, argElement_x64 src, int n) {
	int i, j;
	for (i = 0;i < NUM_DIGITS_221_x32;i++) {
		for (j = 0;j < n;j++) {
			dst[j + i * n] = src[i];
		}
	}
}


int main() {
	EltBN221_x64_fp A = { 0x134934453435,0x134462363435,0x131232453435,0x134925323435,0x15353535345 };
	EltBN221_x64_fp B = { 0x1F35124FFFFF,0x1F24144FFFFF,0x1FF1252FFFFF,0x1F152FFFFFFF,0x1FF4542222FF };
	

	EltBN221_x64_fp P = { 0 };
	EltBN221_x64_fp_buffer C = { 0 };
	uint64_t* AN = new uint64_t[NUM_DIGITS_221_x64_fp * N];
	uint64_t* BN = new uint64_t[NUM_DIGITS_221_x64_fp * N];
	uint64_t* CN = new uint64_t[NUM_DIGITS_221_x64_fp * N];
	
	//print(stdout, A, 10);
	//print(stdout, B, 10);
	//This result is correct.

	AssignNfp(AN, A, N);
	AssignNfp(BN, B, N);
	print(stdout, AN, NUM_DIGITS_221_x64_fp);
	print(stdout, BN, NUM_DIGITS_221_x64_fp);
	print(stdout, AN + (N - 1) * NUM_DIGITS_221_x64_fp, NUM_DIGITS_221_x64_fp);
	print(stdout, BN + (N - 1) * NUM_DIGITS_221_x64_fp, NUM_DIGITS_221_x64_fp);
	
	sqr_fma5_reductionAPI(CN, AN);
	
	//mul_fma5_reductionAPI(CN, AN, BN);

	mul_fma5_reductionAPI(CN, AN, AN);
	printf("A^2  mod P mul:=\n");
	print(stdout, CN, NUM_DIGITS_221_x64_fp);
	print(stdout, CN + (N - 1) * NUM_DIGITS_221_x64_fp, NUM_DIGITS_221_x64_fp);

	sqr_fma5_reductionAPI(CN, AN);
	printf("A^2  mod P sqr:=\n");
	print(stdout, CN, NUM_DIGITS_221_x64_fp);
	print(stdout, CN + (N - 1) * NUM_DIGITS_221_x64_fp, NUM_DIGITS_221_x64_fp);

	uint32_t A32[7] = { 0x1234124,0x324234,0x34242332,0x342342,0x5342423,0x34242332, 0x34242332 };
	uint32_t B32[7] = { 0x1234124,0x324234,0x34242332,0x342342,0x5342423,0x34242332, 0x34242332 };
	uint32_t* A32N = new uint32_t[NUM_DIGITS_221_x32 * N];
	uint32_t* B32N = new uint32_t[NUM_DIGITS_221_x32 * N];
	uint32_t* C32N = new uint32_t[NUM_DIGITS_221_x32 * N];
	AssignN32(A32N, A32, N);
	AssignN32(B32N, B32, N);

	intsqrAPI(C32N, A32N);
	intmulAPI(C32N, A32N, B32N);

	//AssignNfp_consecutive(AN, A, N);
	//AssignNfp_consecutive(BN, B, N);
	///*print(stdout, AN, NUM_DIGITS_221_x64_fp);
	//print(stdout, BN, NUM_DIGITS_221_x64_fp);
	//print(stdout, AN + (N - 1) * NUM_DIGITS_221_x64_fp, NUM_DIGITS_221_x64_fp);
	//print(stdout, BN + (N - 1) * NUM_DIGITS_221_x64_fp, NUM_DIGITS_221_x64_fp);*/
	//mul_fma5_reductionAPI_consecutive(CN, AN, BN);
	//for (int i = 0;i < 99;i++) {
	//	mul_fma5_reductionAPI_consecutive(CN, CN, AN);
	//}
	delete AN;
	delete BN;
	delete CN;
}