﻿#include<iostream>
#include<limits>
#include<iomanip>
#include<cstdint>

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

using namespace std;

#define argElement_x32 uint32_t*
#define argElement_x64 uint64_t*
#define argElement_fp double*
#define NUM_DIGITS_251_x64 4
#define NUM_DIGITS_251_x32 8
#define NUM_DIGITS_251_x64_fp 5

#define BLOCK_WIDTH 32
#define BLOCK_HEIGHT 4
#define BLOCK_SIZE (BLOCK_WIDTH*BLOCK_HEIGHT)
#define N (56*BLOCK_SIZE)
#define Iterate 1000000

typedef uint64_t EltBN251_x64[NUM_DIGITS_251_x64];
typedef uint64_t EltBN251_x64_fp[5];
typedef uint64_t EltBN251_x64_fp_buffer[10];
typedef double EltBN521_fp[5];


//p=2^251-9
//32&27
//reduction for integer implementation
__device__ void intreduction(argElement_x32 r, argElement_x32 c) {
	uint64_t t = 0;
	for (int i = 0;i < 8;i++) {
		t = 9 * ((uint64_t)c[8 + i] << 5) + t + c[i];
		c[i] = (uint32_t)t;
		t = t >> 32;
	}
	t = (c[7] >> 27) + (t << 5);
	t = c[0] + 9 * t;
	r[0] = (uint32_t)t;t = t >> 32;
	for (int i = 1;i < 8;i++) {
		t = (uint64_t)c[i] + t;
		r[i] = (uint32_t)t;
		t = t >> 32;
	}
}

__device__ void intmul(argElement_x32 r, argElement_x32 a, argElement_x32 b) {
	uint32_t c[16];
	for (int i = 0;i < 16;i++) {
		c[i] = 0;
	}
	for (int j = 0;j < NUM_DIGITS_251_x32;j++) {
		// c[j]=0;
		asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[j]) : "r"(a[j]), "r"(b[0]), "r"(c[j]));
	}
	asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[1]) : "r"(a[0]), "r"(b[0]), "r"(c[1]));
	// c[NUM_DIGITS_251_x32]=0;
	for (int j = 1;j < NUM_DIGITS_251_x32;j++) {
		asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[j + 1]) : "r"(a[j]), "r"(b[0]), "r"(c[j + 1]));
	}

	for (int i = 1;i < NUM_DIGITS_251_x32;i++) {
		for (int j = 0;j < NUM_DIGITS_251_x32;j++) {
			asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[i + j]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j]));
		}
		asm("addc.cc.u32 %0, %1, 0;" : "=r"(c[i + NUM_DIGITS_251_x32]) : "r"(c[i + NUM_DIGITS_251_x32]));
		for (int j = 0;j < NUM_DIGITS_251_x32;j++) {
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j + 1]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j + 1]));
		}
	}
	intreduction(r, c);
}

__device__ void intmul2(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	unsigned int ct[8] = { 0 };
	ct[2] = b[0];
	ct[3] = b[1];
	asm volatile(
			//0
			"mul.lo.u32 		%0, %10, %16;\n\t"
			"mul.hi.u32 		%1, %10, %16;\n\t"
			"mad.lo.cc.u32 		%1, %10, %17,  %1;\n\t"
			"madc.hi.u32 		%2, %10, %17,  0;\n\t"
			"mad.lo.cc.u32 		%2, %10, %18,  %2;\n\t"
			"madc.hi.u32 		%3, %10, %18,  0;\n\t"
			"mad.lo.cc.u32 		%3, %10, %19,  %3;\n\t"
			"madc.hi.u32 		%4, %10, %19,  0;\n\t"
			"mad.lo.cc.u32 		%4, %10, %20,  %4;\n\t"
			"madc.hi.u32 		%5, %10, %20,  0;\n\t"
			"mad.lo.cc.u32 		%5, %10, %21,  %5;\n\t"
			"madc.hi.u32 		%6, %10, %21,  0;\n\t"
			"mad.lo.cc.u32 		%6, %10, %22,  %6;\n\t"
			"madc.hi.u32 		%7, %10, %22,  0;\n\t"
			"mad.lo.cc.u32 		%7, %10, %23,  %7;\n\t"
			"madc.hi.u32 		%8, %10, %23,  0;\n\t"
			//1
			"mad.lo.cc.u32 		%1, %11, %16,  %1;\n\t"
			"madc.hi.cc.u32 	%2, %11, %16,  %2;\n\t"
			"madc.lo.cc.u32 	%3, %11, %18,  %3;\n\t"
			"madc.hi.cc.u32 	%4, %11, %18,  %4;\n\t"
			"madc.lo.cc.u32 	%5, %11, %20,  %5;\n\t"
			"madc.hi.cc.u32 	%6, %11, %20,  %6;\n\t"
			"madc.lo.cc.u32 	%7, %11, %22,  %7;\n\t"
			"madc.hi.cc.u32 	%8, %11, %22,  %8;\n\t"
			"addc.u32 			%9, 0,  0;\n\t"
			"mad.lo.cc.u32 		%2, %11, %17,   %2;\n\t"
			"madc.hi.cc.u32 	%3, %11, %17,   %3;\n\t"
			"madc.lo.cc.u32 	%4, %11, %19,   %4;\n\t"
			"madc.hi.cc.u32 	%5, %11, %19,   %5;\n\t"
			"madc.lo.cc.u32 	%6, %11, %21,   %6;\n\t"
			"madc.hi.cc.u32 	%7, %11, %21,   %7;\n\t"
			"madc.lo.cc.u32 	%8, %11, %23,   %8;\n\t"
			"madc.hi.u32 		%9, %11, %23,   %9;\n\t"
			//2
			"mad.lo.cc.u32 		%2, %24, %16,  %2;\n\t"
			"madc.hi.cc.u32 	%3, %24, %16,  %3;\n\t"
			"madc.lo.cc.u32 	%4, %24, %18,  %4;\n\t"
			"madc.hi.cc.u32 	%5, %24, %18,  %5;\n\t"
			"madc.lo.cc.u32 	%6, %24, %20,  %6;\n\t"
			"madc.hi.cc.u32 	%7, %24, %20,  %7;\n\t"
			"madc.lo.cc.u32 	%8, %24, %22,  %8;\n\t"
			"madc.hi.cc.u32 	%9, %24, %22,  %9;\n\t"
			"addc.u32 			%10, 0,  0;\n\t"
			"mad.lo.cc.u32 		%3, %24, %17,   %3;\n\t"
			"madc.hi.cc.u32 	%4, %24, %17,   %4;\n\t"
			"madc.lo.cc.u32 	%5, %24, %19,   %5;\n\t"
			"madc.hi.cc.u32 	%6, %24, %19,   %6;\n\t"
			"madc.lo.cc.u32 	%7, %24, %21,   %7;\n\t"
			"madc.hi.cc.u32 	%8, %24, %21,   %8;\n\t"
			"madc.lo.cc.u32 	%9, %24, %23,   %9;\n\t"
			"madc.hi.u32 		%10, %24, %23,  %10;\n\t"
			//3
			"mad.lo.cc.u32 		%3, %25, %16,  %3;\n\t"
			"madc.hi.cc.u32 	%4, %25, %16,  %4;\n\t"
			"madc.lo.cc.u32 	%5, %25, %18,  %5;\n\t"
			"madc.hi.cc.u32 	%6, %25, %18,  %6;\n\t"
			"madc.lo.cc.u32 	%7, %25, %20,  %7;\n\t"
			"madc.hi.cc.u32 	%8, %25, %20,  %8;\n\t"
			"madc.lo.cc.u32 	%9, %25, %22,  %9;\n\t"
			"madc.hi.cc.u32 	%10, %25, %22,  %10;\n\t"
			"addc.u32 			%11, 0,  0;\n\t"
			"mad.lo.cc.u32 		%4, %25, %17,   %4;\n\t"
			"madc.hi.cc.u32 	%5, %25, %17,   %5;\n\t"
			"madc.lo.cc.u32 	%6, %25, %19,   %6;\n\t"
			"madc.hi.cc.u32 	%7, %25, %19,   %7;\n\t"
			"madc.lo.cc.u32 	%8, %25, %21,   %8;\n\t"
			"madc.hi.cc.u32 	%9, %25, %21,   %9;\n\t"
			"madc.lo.cc.u32 	%10, %25, %23,   %10;\n\t"
			"madc.hi.u32 		%11, %25, %23,   %11;\n\t"
			//4
			"mad.lo.cc.u32 		%4, %26, %16,  %4;\n\t"
			"madc.hi.cc.u32 	%5, %26, %16,  %5;\n\t"
			"madc.lo.cc.u32 	%6, %26, %18,  %6;\n\t"
			"madc.hi.cc.u32 	%7, %26, %18,  %7;\n\t"
			"madc.lo.cc.u32 	%8, %26, %20,  %8;\n\t"
			"madc.hi.cc.u32 	%9, %26, %20,  %9;\n\t"
			"madc.lo.cc.u32 	%10, %26, %22,  %10;\n\t"
			"madc.hi.cc.u32 	%11, %26, %22,  %11;\n\t"
			"addc.u32 			%12, 0,  0;\n\t"
			"mad.lo.cc.u32 		%5, %26, %17,   %5;\n\t"
			"madc.hi.cc.u32 	%6, %26, %17,   %6;\n\t"
			"madc.lo.cc.u32 	%7, %26, %19,   %7;\n\t"
			"madc.hi.cc.u32 	%8, %26, %19,   %8;\n\t"
			"madc.lo.cc.u32 	%9, %26, %21,   %9;\n\t"
			"madc.hi.cc.u32 	%10, %26, %21,   %10;\n\t"
			"madc.lo.cc.u32 	%11, %26, %23,   %11;\n\t"
			"madc.hi.u32 		%12, %26, %23,   %12;\n\t"
			//5
			"mad.lo.cc.u32 		%5, %27, %16,  %5;\n\t"
			"madc.hi.cc.u32 	%6, %27, %16,  %6;\n\t"
			"madc.lo.cc.u32 	%7, %27, %18,  %7;\n\t"
			"madc.hi.cc.u32 	%8, %27, %18,  %8;\n\t"
			"madc.lo.cc.u32 	%9, %27, %20,  %9;\n\t"
			"madc.hi.cc.u32 	%10, %27, %20,  %10;\n\t"
			"madc.lo.cc.u32 	%11, %27, %22,  %11;\n\t"
			"madc.hi.cc.u32 	%12, %27, %22,  %12;\n\t"
			"addc.u32 			%13, 0,  0;"
			"mad.lo.cc.u32 		%6, %27, %17,   %6;\n\t"
			"madc.hi.cc.u32 	%7, %27, %17,   %7;\n\t"
			"madc.lo.cc.u32 	%8, %27, %19,   %8;\n\t"
			"madc.hi.cc.u32 	%9, %27, %19,   %9;\n\t"
			"madc.lo.cc.u32 	%10, %27, %21,   %10;\n\t"
			"madc.hi.cc.u32 	%11, %27, %21,   %11;\n\t"
			"madc.lo.cc.u32 	%12, %27, %23,   %12;\n\t"
			"madc.hi.u32 		%13, %27, %23,   %13;\n\t"

			//6
			"mad.lo.cc.u32 		%6, %28, %16,  %6;\n\t"
			"madc.hi.cc.u32 	%7, %28, %16,  %7;\n\t"
			"madc.lo.cc.u32 	%8, %28, %18,  %8;\n\t"
			"madc.hi.cc.u32 	%9, %28, %18,  %9;\n\t"
			"madc.lo.cc.u32 	%10, %28, %20,  %10;\n\t"
			"madc.hi.cc.u32 	%11, %28, %20,  %11;\n\t"
			"madc.lo.cc.u32 	%12, %28, %22,  %12;\n\t"
			"madc.hi.cc.u32 	%13, %28, %22,  %13;\n\t"
			"addc.u32 			%14, 0,  0;\n\t"
			"mad.lo.cc.u32 		%7, %28, %17,   %7;\n\t"
			"madc.hi.cc.u32 	%8, %28, %17,   %8;\n\t"
			"madc.lo.cc.u32 	%9, %28, %19,   %9;\n\t"
			"madc.hi.cc.u32 	%10, %28, %19,   %10;\n\t"
			"madc.lo.cc.u32 	%11, %28, %21,   %11;\n\t"
			"madc.hi.cc.u32 	%12, %28, %21,   %12;\n\t"
			"madc.lo.cc.u32 	%13, %28, %23,   %13;\n\t"
			"madc.hi.u32 		%14, %28, %23,   %14;\n\t"
			//7
			"mad.lo.cc.u32 		%7, %29, %16,  %7;\n\t"
			"madc.hi.cc.u32 	%8, %29, %16,  %8;\n\t"
			"madc.lo.cc.u32 	%9, %29, %18,  %9;\n\t"
			"madc.hi.cc.u32 	%10, %29, %18,  %10;\n\t"
			"madc.lo.cc.u32 	%11, %29, %20,  %11;\n\t"
			"madc.hi.cc.u32 	%12, %29, %20,  %12;\n\t"
			"madc.lo.cc.u32 	%13, %29, %22,  %13;\n\t"
			"madc.hi.cc.u32 	%14, %29, %22,  %14;\n\t"
			"addc.u32 			%15, 0,  0;\n\t"
			"mad.lo.cc.u32 		%8, %29, %17,   %8;\n\t"
			"madc.hi.cc.u32 	%9, %29, %17,   %9;\n\t"
			"madc.lo.cc.u32 	%10, %29, %19,   %10;\n\t"
			"madc.hi.cc.u32 	%11, %29, %19,   %11;\n\t"
			"madc.lo.cc.u32 	%12, %29, %21,   %12;\n\t"
			"madc.hi.cc.u32 	%13, %29, %21,   %13;\n\t"
			"madc.lo.cc.u32 	%14, %29, %23,   %14;\n\t"
			"madc.hi.u32 		%15, %29, %23,   %15;\n\t"

			//add

			"mad.lo.cc.u32 		%0,		%8, 	96,		%0;\n\t"
			"madc.hi.cc.u32 	%1,		%8, 	96,		%1;\n\t"
			"madc.lo.cc.u32 	%2,		%10, 	96,		%2;\n\t"
			"madc.hi.cc.u32 	%3,		%10, 	96,		%3;\n\t"
			"madc.lo.cc.u32 	%4,		%12, 	96,		%4;\n\t"
			"madc.hi.cc.u32 	%5,		%12, 	96,		%5;\n\t"
			"madc.lo.cc.u32 	%6,		%14, 	96,		%6;\n\t"
			"madc.hi.cc.u32 	%7,		%14, 	96,		%7;\n\t"
			"addc.u32			%8, 	0, 		0;\n\t"
			"mad.lo.cc.u32 		%1,		%9, 	96,		%1;\n\t"
			"madc.hi.cc.u32 	%2,		%9, 	96,		%2;\n\t"
			"madc.lo.cc.u32 	%3,		%11, 	96,		%3;\n\t"
			"madc.hi.cc.u32 	%4,		%11, 	96,		%4;\n\t"
			"madc.lo.cc.u32 	%5,		%13, 	96,		%5;\n\t"
			"madc.hi.cc.u32 	%6,		%13, 	96,		%6;\n\t"
			"madc.lo.cc.u32 	%7,		%15, 	96,		%7;\n\t"
			"madc.hi.u32 		%8,		%15, 	96,		%8;\n\t"
			"shl.b32			%8,		%8,		5;\n\t"
			"shr.b32			%9,		%7,		27;\n\t"
			"or.b32				%8,		%8,		%9;\n\t"
			"and.b32			%7,		%7,		0x7FFFFFF;\n\t"
			"mul.lo.u32			%8,		%8,		9;\n\t"
			"add.cc.u32		%0, 	%0,		%8;"
			"addc.cc.u32	%1, 	%1,		0;"
			"addc.cc.u32	%2, 	%2,		0;"
			"addc.cc.u32	%3, 	%3,		0;"
			"addc.cc.u32	%4, 	%4,		0;"
			"addc.cc.u32	%5, 	%5,		0;"
			"addc.cc.u32	%6, 	%6,		0;"
			"addc.cc.u32	%7, 	%7,		0;"
			:"=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]),
			"=r"(c[4]), "=r"(c[5]), "=r"(c[6]), "=r"(c[7]),
			"=r"(ct[0]), "=r"(ct[1]), "+r"(ct[2]), "+r"(ct[3]),
			"=r"(ct[4]), "=r"(ct[5]), "=r"(ct[6]), "=r"(ct[7])
			: "r"(a[0]), "r"(a[1]), "r"(a[2]), "r"(a[3]),
			"r"(a[4]), "r"(a[5]), "r"(a[6]), "r"(a[7]),
			"r"(b[2]), "r"(b[3]), "r"(b[4]), "r"(b[5]), "r"(b[6]), "r"(b[7])
			);	
}

__device__ void intsqr(argElement_x32 r, argElement_x32 a) {
	uint32_t c[16];
	for (int i = 0;i < 16;i++) {
		c[i] = 0;
	}
	for (int i = 0;i <= (NUM_DIGITS_251_x32 / 2 - 2);i++) {
		asm("mad.lo.cc.u32 %0, %1, %2, %0;" : "=r"(c[2 * i + 1]) : "r"(a[i + 1]), "r"(a[i]));//
		for (int j = i + 2;j <= NUM_DIGITS_251_x32 - i - 2;j++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %0;" : "=r"(c[i + j]) : "r"(a[j]), "r"(a[i]));
		}
		for (int k = 0; k <= i; k++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %0;" : "=r"(c[NUM_DIGITS_251_x32 + 2 * k - 1]) : "r"(a[NUM_DIGITS_251_x32 - 1 - i + k]), "r"(a[i + k]));
			asm("madc.hi.cc.u32 %0, %1, %2, %0;" : "=r"(c[NUM_DIGITS_251_x32 + 2 * k]) : "r"(a[NUM_DIGITS_251_x32 - 1 - i + k]), "r"(a[i + k]));
		}
		asm("mad.hi.cc.u32 %0, %1, %2, %0;" : "=r"(c[2 * i + 2]) : "r"(a[i + 1]), "r"(a[i]));
		for (int j = i + 2; j <= NUM_DIGITS_251_x32 - i - 2; j++) {
			asm("madc.hi.cc.u32 %0, %1, %2, %0;" : "=r"(c[i + j + 1]) : "r"(a[j]), "r"(a[i]));
		}
		for (int k = 0; k <= i; k++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %0;" : "=r"(c[NUM_DIGITS_251_x32 + 2 * k]) : "r"(a[NUM_DIGITS_251_x32 - 1 - i + k]), "r"(a[i + k + 1]));
			asm("madc.hi.cc.u32 %0, %1, %2, %0;" : "=r"(c[NUM_DIGITS_251_x32 + 2 * k + 1]) : "r"(a[NUM_DIGITS_251_x32 - 1 - i + k]), "r"(a[i + k + 1]));
		}
	}
	asm("mad.lo.cc.u32 %0, %1, %2, %0;" : "=r"(c[NUM_DIGITS_251_x32 - 1]) : "r"(a[NUM_DIGITS_251_x32 / 2]), "r"(a[NUM_DIGITS_251_x32 / 2 - 1]));
	asm("madc.hi.cc.u32 %0, %1, %2, %0;" : "=r"(c[NUM_DIGITS_251_x32]) : "r"(a[NUM_DIGITS_251_x32 / 2]), "r"(a[NUM_DIGITS_251_x32 / 2 - 1]));
	for (int k = 1; k < NUM_DIGITS_251_x32 / 2; k++) {
		asm("madc.lo.cc.u32 %0, %1, %2, %0;" : "=r"(c[NUM_DIGITS_251_x32 + 2 * k - 1]) : "r"(a[NUM_DIGITS_251_x32 / 2 + k]), "r"(a[NUM_DIGITS_251_x32 / 2 + k - 1]));
		asm("madc.hi.cc.u32 %0, %1, %2, %0;" : "=r"(c[NUM_DIGITS_251_x32 + 2 * k]) : "r"(a[NUM_DIGITS_251_x32 / 2 + k]), "r"(a[NUM_DIGITS_251_x32 / 2 + k - 1]));
	}
	asm("add.cc.u32 %0, %0, %0;" : "=r"(c[1]));
	for (int i = 2; i < 2 * NUM_DIGITS_251_x32; i++) {
		asm("addc.cc.u32 %0, %0, %0;" : "=r"(c[i]));
	}
	asm("mad.lo.cc.u32 %0, %1, %1, %0;" : "=r"(c[0]) : "r"(a[0]));
	asm("madc.hi.cc.u32 %0, %1, %1, %0;" : "=r"(c[1]) : "r"(a[0]));
#pragma unroll
	for (int i = 1; i < NUM_DIGITS_251_x32; i++) {
		asm("madc.lo.cc.u32 %0, %1, %1, %0;" : "=r"(c[2 * i]) : "r"(a[i]));
		asm("madc.hi.cc.u32 %0, %1, %1, %0;" : "=r"(c[2 * i + 1]) : "r"(a[i]));
	}
	intreduction(r, c);
}

__global__ void intmul_kernel(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_251_x32;
	uint32_t tc[NUM_DIGITS_251_x32],ta[NUM_DIGITS_251_x32],tb[NUM_DIGITS_251_x32];
	for(int i=0;i<NUM_DIGITS_251_x32;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		intmul(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_251_x32;i++){
		c[i+tid]=tc[i];
	}
}

__global__ void intmul2_kernel(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_251_x32;
	uint32_t tc[NUM_DIGITS_251_x32],ta[NUM_DIGITS_251_x32],tb[NUM_DIGITS_251_x32];
	for(int i=0;i<NUM_DIGITS_251_x32;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		intmul2(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_251_x32;i++){
		c[i+tid]=tc[i];
	}
}

__global__ void intsqr_kernel(argElement_x32 c, argElement_x32 a) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_251_x32;
	uint32_t tc[NUM_DIGITS_251_x32],ta[NUM_DIGITS_251_x32];
	for(int i=0;i<NUM_DIGITS_251_x32;i++){
		ta[i]=a[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		intsqr(tc, ta);
	}
	for(int i=0;i<NUM_DIGITS_251_x32;i++){
		c[i+tid]=tc[i];
	}
	
}

__device__ uint64_t to_u64(double d) {
	union {
		double d;
		uint64_t u64;
	}converter;
	converter.d = d;
	return converter.u64;
}

__device__ double to_double(uint64_t u64) {
	union {
		double d;
		uint64_t u64;
	}converter;
	converter.u64 = u64;
	return converter.d;
}

static const __device__ uint64_t GLOB_Const_P[5] = { 0xFFFFFFFFFFFF,0xFFFFFFFFFFF0, 0xFFFFFFFFFFF0 ,0xFFFFFFFFFFF0 ,0x1FFFFFFFFFF0 };
#define RADIX 51
#define LAST_DIGIT 47
static const __device__ uint64_t GLOB_mask51 = ((uint64_t)1 << RADIX) - 1;
static const __device__ uint64_t GLOB_mask47 = ((uint64_t)1 << LAST_DIGIT) - 1;


//input length must be 5.
//Simplify to unique element in NIST P-251
//Used before mul and squaring
__host__ __device__ void simplification_unique(argElement_x64 c,argElement_x64 a) {
	uint64_t t = a[4] >> LAST_DIGIT;
	a[4] = a[4] & GLOB_mask47;
	a[0] = a[0] + t * 9;
	for (int i = 0; i < 4; i++) {
		a[i + 1] += (a[i] >> RADIX);
		c[i] &= GLOB_mask51;
	}
	c[4] = a[4];
}

//Just simplify no reduction.
void simplification_fast(argElement_x64 c, int num) {
	for (int i = 0; i < (num - 1); i++) {
		c[i + 1] += c[i] >> RADIX;
		c[i] &= GLOB_mask51;
	}
}

//new sub and add can be parallel, only used for element of NIST P-521, cannot be used in Karatsuba multiplication
__device__ void sub(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	uint64_t* p;
	p = const_cast<uint64_t*>(GLOB_Const_P);
	for (int i = 0; i < 5; i++) {
		a[i] += p[i];
		r[i] = a[i] - b[i];
	}
}

__device__ void add(argElement_x64 r, const argElement_x64 a, const argElement_x64 b)
{
	int i;
	for (i = 0; i < 5; i++) {
		r[i] = a[i] + b[i];
	}
}
#define SUM_C0 0xbcd0000000000000
#define SUM_C1 0x3340000000000000
#define SUM_C2 0xa9b0000000000000
#define SUM_C3 0x2020000000000000
#define SUM_C4 0x9690000000000000
#define SUM_C5 0x9360000000000000
#define SUM_C6 0x1cf0000000000000
#define SUM_C7 0xa680000000000000
#define SUM_C8 0x3010000000000000
#define SUM_C9 0xb9a0000000000000

//__device__ void mul_fma_5(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
//
//	//printf("0x%016llx, 0x%016llx \n", (double)a[0], (uint64_t)b[0]);
//	//const double dt1 = powf(2, 103);
//	//const double dt2 = powf(2, 103) + powf(2, 52);
//
//	const double t1 = to_double(0x4660000000000000);
//	const double t2 = to_double(0x4660000000000002);
//
//	c[0] = SUM_C0;c[1] = SUM_C1;c[2] = SUM_C2;c[3] = SUM_C3;c[4] = SUM_C4;
//	c[5] = SUM_C5;c[6] = SUM_C6;c[7] = SUM_C7;c[8] = SUM_C8;c[9] = SUM_C9;
//	/*for (int i = 0; i < 5; i++) {
//		c[i] = (uint64_t)0x466 * i + (uint64_t)0x433 * (i + 1);
//		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
//		c[9 - i] = (uint64_t)0x466 * (i + 1) + (uint64_t)0x433 * i;
//		c[9 - i] = -(int64_t)(c[9 - i] & 0xFFF) << 52;
//	}*/
//	double p_hi = 0, p_lo = 0, sub = 0;
//	for (int i = 0; i < 5; i++) {
//		for (int j = 0; j < 5; j++) {
//			double a_tmp = (double)a[i], b_tmp = (double)b[i];
//			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
//			c[i + j + 1] += to_u64(p_hi);
//			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
//			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
//			c[i + j] += to_u64(p_lo);
//		}
//	}
//}


__device__ void mul_fma_5_with_reduction(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	const double t1 = to_double(0x4660000000000000);
	const double t2 = to_double(0x4660000000000002);
	uint64_t c[10];
	double p_hi = 0, p_lo = 0, sub = 0, a_tmp, b_tmp;
	c[0] = SUM_C0;c[1] = SUM_C1;c[2] = SUM_C2;c[3] = SUM_C3;c[4] = SUM_C4;
	c[5] = SUM_C5;c[6] = SUM_C6;c[7] = SUM_C7;c[8] = SUM_C8;c[9] = SUM_C9;
	for (int i = 0; i < 5; i++) {
		a_tmp = (double)a[i];
		for (int j = 0; j < 5; j++) {
			b_tmp = (double)b[j];
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	//2^251-9
	for (int i = 0; i < 5; i++) {
		c[i] = c[i] + (c[5 + i]*144);
	}
	simplification_unique(r, c);
}

__device__ void mul_fma_5_with_reduction2(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	const double t1 = to_double(0x4660000000000000);
	const double t2 = to_double(0x4660000000000002);
	uint64_t r[5];
	double p_hi, sub;
	uint64_t tmp;
	asm volatile(
		//a[0]
		"mov.b64		%0,		0xbcd0000000000000;\n\t"
		"mov.b64		%1,		0x3340000000000000;\n\t"
		"mov.b64		%2,		0xa9b0000000000000;\n\t"
		"mov.b64		%3,		0x2020000000000000;\n\t"
		"mov.b64		%4,		0x9690000000000000;\n\t"
		"mov.b64		%5,		0x9360000000000000;\n\t"
		"mov.b64		%6,		0x1cf0000000000000;\n\t"
		"mov.b64		%7,		0xa680000000000000;\n\t"
		"mov.b64		%8,		0x3010000000000000;\n\t"
		"mov.b64		%9,		0xb9a0000000000000;\n\t"

		"fma.rz.f64		%10,	%15,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%1,		%1,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%0,		%0,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%1,		%1,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"

		//a[1]
		"fma.rz.f64		%10,	%16,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%1,		%1,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"

		//a[2]
		"fma.rz.f64		%10,	%17,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"

		//a[3]
		"fma.rz.f64		%10,	%18,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%8,		%8,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"

		//a[4]
		"fma.rz.f64		%10,	%19,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%8,		%8,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%9,		%9,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%8,		%8,		%14;\n\t"
		//reduction
		"mad.lo.u64 	%0,		%5, 	144,	%0;\n\t"
		"mad.lo.u64 	%1,		%6, 	144,	%1;\n\t"
		"mad.lo.u64 	%2,		%7, 	144,	%2;\n\t"
		"mad.lo.u64 	%3,		%8, 	144,	%3;\n\t"
		"mad.lo.u64 	%4,		%9, 	144,	%4;\n\t"
		//simplify
		
		"shr.b64		%5,		%4,		47;\n\t"
		"and.b64		%4,		%4,		0x7FFFFFFFFFFF;\n\t"
		"mad.lo.u64		%0,		%5,		9,		%0;\n\t"

		"shl.b64		%5,		%0,		51;\n\t"
		"add.s64		%1,		%1,		%5;\n\t"
		"and.b64		%0,		%0,		0x7FFFFFFFFFFFF;\n\t"

		"shl.b64		%5,		%1,		51;\n\t"
		"add.s64		%2,		%2,		%5;\n\t"
		"and.b64		%1,		%1,		0x7FFFFFFFFFFFF;\n\t"
		
		"shl.b64		%5,		%2,		51;\n\t"
		"add.s64		%3,		%3,		%5;\n\t"
		"and.b64		%2,		%2,		0x7FFFFFFFFFFFF;\n\t"

		"shl.b64		%5,		%3,		51;\n\t"
		"add.s64		%4,		%4,		%5;\n\t"
		"and.b64		%3,		%3,		0x7FFFFFFFFFFFF;\n\t"

		:"=l"(c[0]),"=l"(c[1]), "=l"(c[2]), "=l"(c[3]),	"=l"(c[4]),//0
		"=l"(r[0]), "=l"(r[1]), "=l"(r[2]), "=l"(r[3]), "=l"(r[4]),//5
		"=d"(p_hi), "=d"(sub): "d"(t1), "d"(t2),"l"(tmp),//10
		"d"(double(a[0])), "d"(double(a[1])), "d"(double(a[2])), "d"(double(a[3])), "d"(double(a[4])), //15
		"d"(double(b[0])), "d"(double(b[1])), "d"(double(b[2])), "d"(double(b[3])), "d"(double(b[4]))//20
		);
		//2^251-9

		/*uint64_t t = a[4] >> LAST_DIGIT;
		a[4] = a[4] & GLOB_mask47;
		a[0] = a[0] + t * 9;
		for (int i = 0; i < 4; i++) {
			a[i + 1] += (a[i] >> RADIX);
			c[i] &= GLOB_mask51;
		}
		c[4] = a[4];*/
}

__device__ void sqr_fma_5(argElement_x64 r, argElement_x64 a) {
	const double t1 = to_double(0x4660000000000000);
	const double t2 = to_double(0x4660000000000002);
	uint64_t c_tmp1 = 0, c_tmp2 = 0, c_tmp3 = 0;
	uint64_t c[10];
	double p_hi, p_lo, sub, a_tmp, b_tmp;
	//c0
	a_tmp = (double)a[0];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	c[0] = SUM_C0 + to_u64(p_lo);
	//c1
	b_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 = to_u64(p_lo) << 1;
	c[1] = c_tmp1 + SUM_C1 + c_tmp3;
	//c2
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	c[2] = (c_tmp1 << 1) + to_u64(p_lo) + SUM_C2;
	//c3:a1b2
	c_tmp1 = c_tmp2;
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[0];
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	c[3] = (c_tmp1 << 1) + c_tmp3 + SUM_C3;
	//c4:a0b4
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	c[4] = (c_tmp1 << 1) + to_u64(p_lo) + SUM_C4;
	//c5:a2b3
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	c[5] = (c_tmp1 << 1) + c_tmp3 + SUM_C5;
	//c6:a2b4
	c_tmp1 = c_tmp2;
	a_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	c[6] = (c_tmp1 << 1) + to_u64(p_lo) + SUM_C6;
	//c7:a3b4
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	c[7] = (c_tmp1 << 1) + c_tmp3 + SUM_C7;
	//c8:a4b4
	//c_tmp1 = c_tmp2;
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(b_tmp), "d"(b_tmp), "d"(t1));
	c[9] = to_u64(p_hi) + SUM_C9;
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(b_tmp), "d"(b_tmp), "d"(sub));
	c[8] = (c_tmp2 << 1) + to_u64(p_lo) + SUM_C8;
	for (int i = 0; i < 5; i++) {
		uint64_t c5 = c[5 + i];
		c[i] = c[i] + (c5 << 5) + c5;
	}
	simplification_unique(r, c);
}

#undef SUM_C0 
#undef SUM_C1 
#undef SUM_C2 
#undef SUM_C3 
#undef SUM_C4 
#undef SUM_C5 
#undef SUM_C6 
#undef SUM_C7 
#undef SUM_C8 
#undef SUM_C9 

//reduction for Multiplication and squaring
__device__ void reduction(argElement_x64 c, argElement_x64 a) {
	for (int i = 0; i < 5; i++) {
		c[i] = a[i] + a[5 + i]*144;
	}
}

//modular multiplication with karatsuba multiplication
__global__ void mul_fma_5_reduction(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 5;
	uint64_t tc[NUM_DIGITS_251_x64_fp],ta[NUM_DIGITS_251_x64_fp],tb[NUM_DIGITS_251_x64_fp];
	for(int i=0;i<NUM_DIGITS_251_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		mul_fma_5_with_reduction2(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_251_x64_fp;i++){
		c[i+tid]=tc[i];
	}
}

__global__ void sqr_fma_5_reduction(argElement_x64 c, argElement_x64 a) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 5;
	EltBN251_x64_fp_buffer tmp = { 0 };
	EltBN251_x64_fp tc,ta;
	for(int i=0;i<NUM_DIGITS_251_x64_fp;i++){
		ta[i]=a[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		sqr_fma_5(tmp, ta);
		reduction(tmp, tmp);
		simplification_unique(tc, tmp);
	}
	for(int i=0;i<NUM_DIGITS_251_x64_fp;i++){
		c[i+tid]=tc[i];
	}
}

//modular multiplication with karatsuba multiplication
__global__ void mul_fma_5_reduction_consecutive(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);

	EltBN251_x64_fp_buffer c = { 0 };
	const double t1 = to_double(0x4660000000000000);
	const double t2 = to_double(0x4660000000000002);

	for (int i = 0; i < 5; i++) {
		c[i] = (uint64_t)0x466 * i + (uint64_t)0x433 * (i + 1);
		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
		c[9 - i] = (uint64_t)0x466 * (i + 1) + (uint64_t)0x433 * i;
		c[9 - i] = -(int64_t)(c[9 - i] & 0xFFF) << 52;
	}
	double p_hi = 0, p_lo = 0, sub = 0;
	for (int i = 0; i < 5; i++) {
		for (int j = 0; j < 5; j++) {
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"((double)a[t + N * i]), "d"((double)b[t + N * j]), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"((double)a[t + N * i]), "d"((double)b[t + N * j]), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	for (int i = 0; i < 5; i++) {
		c[i] = c[i] + (c[5 + i] << 5) + c[5 + i];
	}
	uint64_t tmp = c[4] >> LAST_DIGIT;
	c[4] = c[4] & GLOB_mask47;
	c[0] = c[0] + tmp * 9;
	for (int i = 0; i < 5; i++) {
		c[i + 1] += (c[i] >> RADIX);
		r[t+N*i] &= GLOB_mask51;
	}
	r[t + 4 * N] = c[4];
}

extern "C"
void mul_fma5_reductionAPI(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 5 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 5 * sizeof(uint64_t);
	//int tmpM = (5 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 5 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 5 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	mul_fma_5_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}

extern "C"
void sqr_fma5_reductionAPI(argElement_x64 c, argElement_x64 a) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 5 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 5 * sizeof(uint64_t);
	//int tmpM = (5 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 5 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	sqr_fma_5_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
}

extern "C"
void mul_fma5_reductionAPI_consecutive(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 5 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 5 * sizeof(uint64_t);
	//int tmpM = (5 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 5 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 5 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	mul_fma_5_reduction_consecutive << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);
}

extern "C"
void intmulAPI(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	argElement_x32 dev_a = NULL;
	argElement_x32 dev_b = NULL;
	argElement_x32 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_251_x32 * sizeof(argElement_x32) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_251_x32 * sizeof(uint32_t);
	//int tmpM = (NUM_DIGITS_251_x32 * 2 + 1) * sizeof(uint32_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_251_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, NUM_DIGITS_251_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	//intmul_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	intmul2_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	printf("%s:intmul time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}

extern "C"
void intsqrAPI(argElement_x32 c, argElement_x32 a) {
	argElement_x32 dev_a = NULL;
	argElement_x32 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_251_x32 * sizeof(argElement_x32) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_251_x32 * sizeof(uint32_t);
	//int tmpM = (NUM_DIGITS_251_x32 * 2 + 1) * sizeof(uint32_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_251_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	intsqr_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
}


void print(FILE* file, argElement_x64 c, int num) {
	if (num == 5) {
		simplification_unique(c,c);
	}
	else {
		simplification_fast(c, num);
	}

	printf("0X");
	for (int i = num - 1; i >= 0; i--) {
		printf("%013llX", c[i]);
	}
	printf("\n");
}


//only for EltBN251_x64_fp
void AssignNfp(argElement_x64 dst, argElement_x64 src, int n) {
	for (int i = 0; i < 5; i++) {
		for (int j = 0; j < n; j++) {
			dst[i + 5 * j] = src[i];
		}
	}
}

void AssignNfp_consecutive(argElement_x64 dst, argElement_x64 src, int n) {
	int i, j;
	for (i = 0;i < 5;i++) {
		for (j = 0;j < n;j++) {
			dst[j + i * n] = src[i];
		}
	}
}

void AssignN32(argElement_x32 dst, argElement_x32 src, int n) {
	for (int i = 0; i < NUM_DIGITS_251_x32; i++) {
		for (int j = 0; j < n; j++) {
			dst[i + NUM_DIGITS_251_x32 * j] = src[i];
		}
	}
}

int main() {
	EltBN251_x64_fp A = { 0x134934453435,0x134462363435,0x131232453435,0x134925323435,0x15353535345 };
	EltBN251_x64_fp B = { 0x1F35124FFFFF,0x1F24144FFFFF,0x1FF1252FFFFF,0x1F152FFFFFFF,0x1FF4542222FF };
	EltBN251_x64_fp P = { 0 };
	EltBN251_x64_fp_buffer C = { 0 };
	uint64_t* AN = new uint64_t[5 * N];
	uint64_t* BN = new uint64_t[5 * N];
	uint64_t* CN = new uint64_t[5 * N];

	//print(stdout, A, 10);
	//print(stdout, B, 10);
	//This result is correct.

	AssignNfp(AN, A, N);
	AssignNfp(BN, B, N);
	print(stdout, AN, 5);
	print(stdout, BN, 5);
	print(stdout, AN + (N - 1) * 5, 5);
	print(stdout, BN + (N - 1) * 5, 5);
	mul_fma5_reductionAPI(CN, AN, BN);

	mul_fma5_reductionAPI(CN, AN, AN);
	printf("A^2  mod P mul:=\n");
	print(stdout, CN, 5);
	print(stdout, CN + (N - 1) * 5, 5);

	sqr_fma5_reductionAPI(CN, AN);
	printf("A^2  mod P sqr:=\n");
	print(stdout, CN, 5);
	print(stdout, CN + (N - 1) * 5, 5);

	/*mul_fma5_reductionAPI(CN, AN, BN);
	printf("AB mod P:=\n");
	print(stdout, CN, 5);
	print(stdout, CN + (N - 1) * 5, 5);

	mul_fma5_reductionAPI(CN, AN, AN);
	printf("A^2 mod P:=\n");
	print(stdout, CN, 5);
	print(stdout, CN + (N - 1) * 5, 5);
	mul_fma5_reductionAPI(CN, BN, BN);
	printf("B^2 mod P:=\n");
	print(stdout, CN, 5);
	print(stdout, CN + (N - 1) * 5, 5);*/

	AssignNfp_consecutive(AN, A, N);
	AssignNfp_consecutive(BN, B, N);
	print(stdout, AN, 5);
	print(stdout, BN, 5);
	print(stdout, AN + (N - 1) * 5, 5);
	print(stdout, BN + (N - 1) * 5, 5);
	mul_fma5_reductionAPI_consecutive(CN, AN, BN);

	uint32_t A32[8] = { 0x1234124,0x324234,0x34242332,0x342342,0x5342423,0x34242332, 0x34242332 };
	uint32_t B32[8] = { 0x1234124,0x324234,0x34242332,0x342342,0x5342423,0x34242332, 0x34242332 };
	uint32_t* A32N = new uint32_t[NUM_DIGITS_251_x32 * N];
	uint32_t* B32N = new uint32_t[NUM_DIGITS_251_x32 * N];
	uint32_t* C32N = new uint32_t[NUM_DIGITS_251_x32 * N];
	AssignN32(A32N, A32, N);
	AssignN32(B32N, B32, N);
	intsqrAPI(C32N, A32N);

	intmulAPI(C32N, A32N, B32N);


	delete AN;
	delete BN;
	delete CN;
}