﻿#include<iostream>
#include<limits>
#include<iomanip>
#include<cstdint>

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

using namespace std;


#define argElement_x32 uint32_t*
#define argElement_x64 uint64_t*
#define argElement_fp double*
#define NUM_DIGITS_521_x32 17
#define NUM_DIGITS_521_x64 9
#define NUM_DIGITS_521_x64_fp 10

#define BLOCK_WIDTH 32
#define BLOCK_HEIGHT 2
#define BLOCK_SIZE (BLOCK_WIDTH*BLOCK_HEIGHT)

typedef uint64_t EltBN521_x64[NUM_DIGITS_521_x64];
typedef uint64_t EltBN521_x64_fp[10];
typedef uint64_t EltBN521_x64_fp_buffer[20];
typedef double EltBN521_fp[10];

#define N (56*BLOCK_SIZE)
#define Iterate 1000000

//Using Integer number. 17 58-bit with the last digit being 57-bit

//p=2^221-3
//reduction for integer implementation
__device__ void intreduction(argElement_x32 r, argElement_x32 c) {
	uint64_t t = 0;
	for (int i = 0;i < NUM_DIGITS_521_x32 - 1;i++) {
		t = (uint64_t)c[NUM_DIGITS_521_x32 + i] * 384 + c[i] + t;
		c[i] = (uint32_t)t;
		t = (t >> 32);
	}
	t = (uint64_t)c[33] * 384 + c[11];
	c[11] = t & 0x7FFFFFFF;
	t = (t >> 31);
	t = (c[NUM_DIGITS_521_x32 - 1] >> 31);
	//second round
	t = c[0] + t * 187;
	r[0] = (uint32_t)t;
	t = t >> 32;
	asm("addc.cc.u32 %0,%1,%2;":"=r"(r[1]) : "r"((uint32_t)t), "r"(c[1]));
	for (int i = 2;i < NUM_DIGITS_521_x32;i++) {
		asm("addc.cc.u32 %0,0,%1;":"=r"(r[i]) : "r"(c[i]));
	}
}

__device__ void intmul(argElement_x32 r, argElement_x32 a, argElement_x32 b) {
	uint32_t c[34];
	for (int i = 0;i < 34;i++) {
		c[i] = 0;
	}

	for (int j = 0;j < NUM_DIGITS_521_x32;j++) {
		// c[j]=0;
		asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[j]) : "r"(a[j]), "r"(b[0]), "r"(c[j]));
	}
	asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[1]) : "r"(a[0]), "r"(b[0]), "r"(c[1]));
	// c[NUM_DIGITS_521_x32]=0;
	for (int j = 1;j < NUM_DIGITS_521_x32;j++) {
		asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[j + 1]) : "r"(a[j]), "r"(b[0]), "r"(c[j + 1]));
	}

	for (int i = 1;i < NUM_DIGITS_521_x32;i++) {
		for (int j = 0;j < NUM_DIGITS_521_x32;j++) {
			asm("mad.lo.cc.u32 %0,%1,%2,%3;":"=r"(c[i + j]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j]));
		}
		asm("addc.cc.u32 %0, %1, 0;" : "=r"(c[i + NUM_DIGITS_521_x32]) : "r"(c[i + NUM_DIGITS_521_x32]));
		for (int j = 0;j < NUM_DIGITS_521_x32;j++) {
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j + 1]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j + 1]));
		}
	}
	intreduction(r, c);
}

__device__ void intmul2(argElement_x32 r, argElement_x32 a, argElement_x32 b) {
	uint32_t c[34];
	for (int i = 0;i < 34;i++) {
		c[i] = 0;
	}
	asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[0]) : "r"(a[0]), "r"(b[0]), "r"(c[0]));
	for (int j = 1;j < NUM_DIGITS_521_x32;j++) {
		asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[j]) : "r"(a[j]), "r"(b[0]), "r"(c[j]));
	}
	asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[1]) : "r"(a[0]), "r"(b[0]), "r"(c[1]));
	for (int j = 1;j < NUM_DIGITS_521_x32;j++) {
		asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[j + 1]) : "r"(a[j]), "r"(b[0]), "r"(c[j + 1]));
	}
	asm("addc.u32 %0,%1,0;":"=r"(c[NUM_DIGITS_521_x32 + 1]) : "r"(c[NUM_DIGITS_521_x32 + 1]));
	for (int i = 1;i < NUM_DIGITS_521_x32;i++) {
		asm("mad.lo.cc.u32 %0,%1,%2,%3;":"=r"(c[i]) : "r"(a[0]), "r"(b[i]), "r"(c[i]));
		for (int j = 1;j < NUM_DIGITS_521_x32;j++) {
			asm("madc.lo.cc.u32 %0,%1,%2,%3;":"=r"(c[i + j]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j]));
		}
		asm("addc.cc.u32 %0,%1,0;":"=r"(c[NUM_DIGITS_521_x32 + i]) : "r"(c[NUM_DIGITS_521_x32 + i]));
		asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[i + 1]) : "r"(a[0]), "r"(b[i]), "r"(c[i + 1]));
		for (int j = 1;j < NUM_DIGITS_521_x32;j++) {
			asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[i + j + 1]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j + 1]));
		}
		asm("addc.cc.u32 %0,%1,0;":"=r"(c[NUM_DIGITS_521_x32 + 1 + i]) : "r"(c[NUM_DIGITS_521_x32 + 1 + i]));
	}
	intreduction(r, c);
}
__device__ void intsqr(argElement_x32 r, argElement_x32 a) {
	uint32_t c[24];
	for (int i = 0;i < 24;i++) {
		c[i] = 0;
	}
	for (int i = 0;i <= (NUM_DIGITS_521_x32 / 2 - 2);i++) {
		//asm("mad.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[2 * i + 1]) : "r"(a[i + 1]), "r"(a[i]), "r"(c[2 * i + 1]));//
		for (int j = i + 1;j <= NUM_DIGITS_521_x32 - i - 2;j++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j]) : "r"(a[j]), "r"(a[i]), "r"(c[i + j]));
		}
		for (int k = 0; k <= i; k++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_521_x32 + 2 * k - 1]) : "r"(a[NUM_DIGITS_521_x32 - 1 - i + k]), "r"(a[i + k]), "r"(c[NUM_DIGITS_521_x32 + 2 * k - 1]));
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_521_x32 + 2 * k]) : "r"(a[NUM_DIGITS_521_x32 - 1 - i + k]), "r"(a[i + k]), "r"(c[NUM_DIGITS_521_x32 + 2 * k]));
		}
		asm("add.cc.u32 %0,%1,0;":"=r"(c[0]) : "r"(c[0]));
		//asm("mad.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[2 * i + 2]) : "r"(a[i + 1]), "r"(a[i]), "r"(c[2 * i + 2]));
		for (int j = i + 1; j <= NUM_DIGITS_521_x32 - i - 2; j++) {
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j + 1]) : "r"(a[j]), "r"(a[i]), "r"(c[i + j + 1]));
		}
		for (int k = 0; k <= i; k++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_521_x32 + 2 * k]) : "r"(a[NUM_DIGITS_521_x32 - 1 - i + k]), "r"(a[i + k + 1]), "r"(c[NUM_DIGITS_521_x32 + 2 * k]));
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_521_x32 + 2 * k + 1]) : "r"(a[NUM_DIGITS_521_x32 - 1 - i + k]), "r"(a[i + k + 1]), "r"(c[NUM_DIGITS_521_x32 + 2 * k + 1]));
		}
	}
	asm("add.cc.u32 %0,%1,0;":"=r"(c[0]) : "r"(c[0]));
	//asm("mad.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_521_x32 - 1]) : "r"(a[NUM_DIGITS_521_x32 / 2]), "r"(a[NUM_DIGITS_521_x32 / 2 - 1]), "r"(c[NUM_DIGITS_521_x32 - 1]));
	//asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_521_x32]) : "r"(a[NUM_DIGITS_521_x32 / 2]), "r"(a[NUM_DIGITS_521_x32 / 2 - 1]), "r"(c[NUM_DIGITS_521_x32]));
	for (int k = 0; k < NUM_DIGITS_521_x32 / 2; k++) {
		asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_521_x32 + 2 * k - 1]) : "r"(a[NUM_DIGITS_521_x32 / 2 + k]), "r"(a[NUM_DIGITS_521_x32 / 2 + k - 1]), "r"(c[NUM_DIGITS_521_x32 + 2 * k - 1]));
		asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_521_x32 + 2 * k]) : "r"(a[NUM_DIGITS_521_x32 / 2 + k]), "r"(a[NUM_DIGITS_521_x32 / 2 + k - 1]), "r"(c[NUM_DIGITS_521_x32 + 2 * k]));
	}
	asm("add.cc.u32 %0, %1, %1;" : "=r"(c[1]) : "r"(c[1]));
	for (int i = 2; i < 2 * NUM_DIGITS_521_x32; i++) {
		asm("addc.cc.u32 %0, %1, %1;" : "=r"(c[i]) : "r"(c[i]));
	}
	asm("add.cc.u32 %0, %1, 0;" : "=r"(c[0]) : "r"(c[0]));
	/*asm("mad.lo.cc.u32 %0, %1, %1, %2;" : "=r"(c[0]) : "r"(a[0]), "r"(c[0]));
	asm("madc.hi.cc.u32 %0, %1, %1, %2;" : "=r"(c[1]) : "r"(a[0]), "r"(c[1]));*/
	for (int i = 0; i < NUM_DIGITS_521_x32; i++) {
		asm("madc.lo.cc.u32 %0, %1, %1, %2;" : "=r"(c[2 * i]) : "r"(a[i]), "r"(c[2 * i]));
		asm("madc.hi.cc.u32 %0, %1, %1, %2;" : "=r"(c[2 * i + 1]) : "r"(a[i]), "r"(c[2 * i + 1]));
	}
	intreduction(r, c);
}

//modular multiplication with karatsuba multiplication
__global__ void intmul_kernel(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_521_x32;
	//mul_fma_5_with_reduction(c + tid, a + tid, b + tid);
	//EltBN221_x64_fp_buffer tmp = { 0 };
	uint32_t tc[NUM_DIGITS_521_x32],ta[NUM_DIGITS_521_x32],tb[NUM_DIGITS_521_x32];
	for(int i=0;i<NUM_DIGITS_521_x32;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for(int i=0;i<Iterate;i++){
		intmul(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_521_x32;i++){
		c[i+tid]=tc[i];
	}
	//reduction(tmp, tmp);
	//simplification_unique(c + tid, tmp);
}

//modular multiplication with karatsuba multiplication
__global__ void intmul2_kernel(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_521_x32;
	//mul_fma_5_with_reduction(c + tid, a + tid, b + tid);
	//EltBN221_x64_fp_buffer tmp = { 0 };
	uint32_t tc[NUM_DIGITS_521_x32],ta[NUM_DIGITS_521_x32],tb[NUM_DIGITS_521_x32];
	for(int i=0;i<NUM_DIGITS_521_x32;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for(int i=0;i<Iterate;i++){
		intmul2(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_521_x32;i++){
		c[i+tid]=tc[i];
	}
	//reduction(tmp, tmp);
	//simplification_unique(c + tid, tmp);
}

__global__ void intsqr_kernel(argElement_x32 c, argElement_x32 a) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_521_x32;
	uint32_t tc[NUM_DIGITS_521_x32],ta[NUM_DIGITS_521_x32];
	for(int i=0;i<NUM_DIGITS_521_x32;i++){
		ta[i]=a[i+tid];
	}
	for(int i=0;i<Iterate;i++){
		intsqr(tc, ta);
	}
	for(int i=0;i<NUM_DIGITS_521_x32;i++){
		c[i+tid]=tc[i];
	}
	
}
extern "C"
void intmulAPI(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	argElement_x32 dev_a = NULL;
	argElement_x32 dev_b = NULL;
	argElement_x32 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_521_x32 * sizeof(argElement_x32) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_521_x32 * sizeof(uint32_t);
	//int tmpM = (NUM_DIGITS_521_x32 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_521_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, NUM_DIGITS_521_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	intmul_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	intmul2_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}
extern "C"
void intsqrAPI(argElement_x32 c, argElement_x32 a) {
	argElement_x32 dev_a = NULL;
	argElement_x32 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_521_x32 * sizeof(argElement_x32) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_521_x32 * sizeof(uint32_t);
	//int tmpM = (NUM_DIGITS_521_x32 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_521_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	intsqr_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
}

__device__ uint64_t to_u64(double d) {
	union {
		double d;
		uint64_t u64;
	}converter;
	converter.d = d;
	return converter.u64;
}

__device__ double to_double(uint64_t u64) {
	union {
		double d;
		uint64_t u64;
	}converter;
	converter.u64 = u64;
	return converter.d;
}

static const uint64_t GLOB_Const_P[10] = { 0xFFFFFFFFFFFFF0,0xFFFFFFFFFFFFF0, 0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0,0xFFFFFFFFFFFFF0,0x1FFFFFFFFFFFFF0 };
static const uint64_t GLOB_mask52 = 0xFFFFFFFFFFFFF;
static const uint64_t GLOB_mask53 = 0x1FFFFFFFFFFFFF;


//input length must be 10.
//Simplify to unique element in NIST P-521
//Used before mul and squaring
__host__ __device__ void simplification_unique(argElement_x64 c,argElement_x64 a) {
	uint64_t t = a[9] >> 53;
	a[9] &= 0x1FFFFFFFFFFFFF;
	a[0] += t;
	for (int i = 0; i < 9; i++) {
		a[i + 1] += (a[i] >> 52);
		a[i] &= 0xFFFFFFFFFFFFF;
	}
	t = a[9] >> 53;
	a[9] &= 0x1FFFFFFFFFFFFF;
	a[0] += t;
	for (int i = 0; i < 9; i++) {
		a[i + 1] += (c[i] >> 52);
		c[i] = a[i]& 0xFFFFFFFFFFFFF;
	}
	c[9] = a[9];
}

//Just simplify no reduction.
void simplification_fast(argElement_x64 c, int num) {
	for (int i = 0; i < (num - 1); i++) {
		c[i + 1] += c[i] >> 52;
		c[i] &= 0xFFFFFFFFFFFFF;
	}
}

//new sub and add can be parallel, only used for element of NIST P-521, cannot be used in Karatsuba multiplication
__device__ void sub(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	uint64_t GLOB_Const_P[10] = { 0xFFFFFFFFFFFFF0,0xFFFFFFFFFFFFF0, 0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0 ,0xFFFFFFFFFFFFF0,0xFFFFFFFFFFFFF0,0x1FFFFFFFFFFFFF0 };
	uint64_t* p;
	p = const_cast<uint64_t*>(GLOB_Const_P);
	for (int i = 0; i < 10; i++) {
		a[i] += p[i];
		r[i] = a[i] - b[i];
	}
}

__device__ void add(argElement_x64 r, const argElement_x64 a, const argElement_x64 b)
{
	int i;
	for (i = 0; i < 10; i++) {
		r[i] = a[i] + b[i];
	}
}
//reduction for Multiplication and squaring
__device__ void reduction(argElement_x64 c, argElement_x64 a) {
	for (int i = 0; i < 9; i++) {
		c[i] = a[i] + (a[10 + i] >> 1);
		c[i] = c[i] + ((uint64_t)(a[11 + i] & 1) << 51);
	}
	c[9] = c[9] + (a[19] >> 1) + ((uint64_t)(a[10] & 1) << 52);
}


/*
Using FMA as for 521-bit digit multiplication
School book multiplication
Used to test Karatsuba multiplication
*/
__device__ void mul_fma_10(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	uint32_t a10, b10;
	a10 = a[9] >> 52;
	a[9] &= 0xFFFFFFFFFFFFF;
	b10 = b[9] >> 52;	
	b[9] &= 0xFFFFFFFFFFFFF;
	const double t1 = to_double(0x4670000000000000);
	const double t2 = to_double(0x4670000000000001);

	for (int i = 0; i < 10; i++) {
		c[i] = (uint64_t)0x467 * i + (uint64_t)0x433 * (i + 1);
		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
		c[19 - i] = (uint64_t)0x467 * (i + 1) + (uint64_t)0x433 * i;
		c[19 - i] = -(int64_t)(c[19 - i] & 0xFFF) << 52;
	}

	double p_hi = 0, p_lo = 0, sub = 0, a_tmp,b_tmp;

	for (int i = 0; i < 10; i++) {
		a_tmp = (double)a[i];
		for (int j = 0; j < 10; j++) {
			b_tmp = (double)b[j];
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	uint64_t maska = -(int64_t)a10;
	uint64_t maskb = -(int64_t)b10;
	for (int i = 0; i < 10; i++) {
		c[10 + i] = c[10 + i] + (a[i] & maskb) + (b[i] & maska);
	}
	c[19] += (uint64_t)(a10 & b10) << 52;

	a[9] |= ((uint64_t)a10 << 52);
	b[9] |= ((uint64_t)b10 << 52);
}

__device__ void mul_fma_10_with_reduction(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	uint32_t a10, b10;
	uint64_t c[20];
	a10 = a[9] >> 52;
	b10 = b[9] >> 52;
	a[9] &= 0xFFFFFFFFFFFFF;
	b[9] &= 0xFFFFFFFFFFFFF;
	const double t1 = to_double(0x4670000000000000);
	const double t2 = to_double(0x4670000000000001);

	for (int i = 0; i < 10; i++) {
		c[i] = (uint64_t)0x467 * i + (uint64_t)0x433 * (i + 1);
		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
		c[19 - i] = (uint64_t)0x467 * (i + 1) + (uint64_t)0x433 * i;
		c[19 - i] = -(int64_t)(c[19 - i] & 0xFFF) << 52;
	}

	double p_hi = 0, p_lo = 0, sub = 0, a_tmp, b_tmp;

	for (int i = 0; i < 10; i++) {
		a_tmp = (double)a[i];
		for (int j = 0; j < 10; j++) {
			b_tmp = (double)b[j];
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	uint64_t maska = -(int64_t)a10;
	uint64_t maskb = -(int64_t)b10;
	for (int i = 0; i < 10; i++) {
		c[10 + i] = c[10 + i] + (a[i] & maskb) + (b[i] & maska);
	}
	c[19] += (uint64_t)(a10 & b10) << 52;
	for (int i = 0; i < 9; i++) {
		c[i] = c[i] + (c[10 + i] >> 1);
		c[i] = c[i] + ((uint64_t)(c[11 + i] & 1) << 51);
	}
	c[9] = c[9] + (c[19] >> 1) + ((uint64_t)(c[10] & 1) << 52);
	simplification_unique(r, c);
	a[9] |= ((uint64_t)a10 << 52);
	b[9] |= ((uint64_t)b10 << 52);
}

#define SUM_C0 0xbcd0000000000000
#define SUM_C1 0x3330000000000000
#define SUM_C2 0xa990000000000000
#define SUM_C3 0x1ff0000000000000
#define SUM_C4 0x9650000000000000
#define SUM_C5 0x9310000000000000
#define SUM_C6 0x1cb0000000000000
#define SUM_C7 0xa650000000000000
#define SUM_C8 0x2ff0000000000000
#define SUM_C9 0xb990000000000000

//Below are the Functions used in Karatsuba multiplication
///*
//Karatsuba sub routine multiplication for 5 52-bit digit number.
//*/
__device__ void mul_sub_routine(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	const double t1 = to_double(0x4670000000000000);
	const double t2 = to_double(0x4670000000000001);
	uint64_t c[10];
	c[0] = 0xbcd0000000000000;c[1] = 0x3330000000000000;c[2] = 0xa990000000000000;c[3] = 0x1ff0000000000000;c[4] = 0x9650000000000000;
	c[5] = 0x9310000000000000;c[6] = 0x1cb0000000000000;c[7] = 0xa650000000000000;c[8] = 0x2ff0000000000000;c[9] = 0xb990000000000000;

	double p_hi = 0, p_lo = 0, sub = 0, a_tmp,b_tmp;
	for (int i = 0; i < 5; i++) {
		a_tmp = (double)a[i];
		for (int j = 0; j < 5; j++) {
			b_tmp = (double)b[i];
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	for (int i = 0;i < 10;i++) {
		r[i] = c[i];
	}
}

__device__ void mul_sub_routine2(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	const double t1 = to_double(0x4660000000000000);
	const double t2 = to_double(0x4660000000000002);
	double p_hi, sub;
	uint64_t tmp;
	asm volatile(
		//a[0]
		"mov.b64		%0,		0xbcd0000000000000;\n\t"
		"mov.b64		%1,		0x3330000000000000;\n\t"
		"mov.b64		%2,		0xa990000000000000;\n\t"
		"mov.b64		%3,		0x1ff0000000000000;\n\t"
		"mov.b64		%4,		0x9650000000000000;\n\t"
		"mov.b64		%5,		0x9310000000000000;\n\t"
		"mov.b64		%6,		0x1cb0000000000000;\n\t"
		"mov.b64		%7,		0xa650000000000000;\n\t"
		"mov.b64		%8,		0x2ff0000000000000;\n\t"
		"mov.b64		%9,		0xb990000000000000;\n\t"

		"fma.rz.f64		%10,	%15,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%1,		%1,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%0,		%0,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%1,		%1,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%15,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"

		//a[1]
		"fma.rz.f64		%10,	%16,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%1,		%1,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%16,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"

		//a[2]
		"fma.rz.f64		%10,	%17,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%2,		%2,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"
		"fma.rz.f64		%10,	%17,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"

		//a[3]
		"fma.rz.f64		%10,	%18,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%3,		%3,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%8,		%8,		%14;\n\t"
		"fma.rz.f64		%10,	%18,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"

		//a[4]
		"fma.rz.f64		%10,	%19,	%20,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%20,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%4,		%4,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%21,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%21,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%5,		%5,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%22,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%22,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%6,		%6,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%23,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%8,		%8,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%23,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%7,		%7,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%24,	%12;\n\t"
		"sub.rz.f64		%11,	%10,	%13;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%9,		%9,		%14;\n\t"
		"fma.rz.f64		%10,	%19,	%24,	%11;\n\t"
		"mov.b64		%14,	%10;\n\t"
		"add.s64 		%8,		%8,		%14;\n\t"
		:"=l"(r[0]), "=l"(r[1]), "=l"(r[2]), "=l"(r[3]), "=l"(r[4]),//0
		"=l"(r[5]), "=l"(r[6]), "=l"(r[7]), "=l"(r[8]), "=l"(r[9]),//5
		"=d"(p_hi), "=d"(sub) : "d"(t1), "d"(t2), "l"(tmp),//10
		"d"(double(a[0])), "d"(double(a[1])), "d"(double(a[2])), "d"(double(a[3])), "d"(double(a[4])), //15
		"d"(double(b[0])), "d"(double(b[1])), "d"(double(b[2])), "d"(double(b[3])), "d"(double(b[4]))//20
		);
}

__device__ void kara_mul(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	uint64_t a_sum[5], b_sum[5], r_sum[10];
	uint32_t a10, b10;
	a10 = a[9] >> 52;
	b10 = b[9] >> 52;
	a[9] &= 0xFFFFFFFFFFFFF;
	b[9] &= 0xFFFFFFFFFFFFF;

	uint64_t carry_a, carry_b = 0;
	a_sum[0] = 0;
	b_sum[0] = 0;
	for (int i = 0; i < 4; i++) {
		a_sum[i] = a_sum[i] + a[i] + a[5 + i];//<2^53
		b_sum[i] = b_sum[i] + b[i] + b[5 + i];//<2^53
		a_sum[i + 1] = (a_sum[i] >> 52);
		b_sum[i + 1] = (b_sum[i] >> 52);
		a_sum[i] &= 0xFFFFFFFFFFFFF;
		b_sum[i] &= 0xFFFFFFFFFFFFF;
	}
	a_sum[4] = a_sum[4] + a[4] + a[9];
	b_sum[4] = b_sum[4] + b[4] + b[9];
	carry_a = -(int64_t)(a_sum[4] >> 52);
	carry_b = -(int64_t)(b_sum[4] >> 52);
	a_sum[4] &= 0xFFFFFFFFFFFFF;
	b_sum[4] &= 0xFFFFFFFFFFFFF;
	mul_sub_routine(c, a, b);//<(2^52)*(2^4)=2^56
	mul_sub_routine(c + 10, a + 5, b + 5);//<2^56
	mul_sub_routine(r_sum, a_sum, b_sum);//<2^56
	mul_sub_routine2(c, a, b);//<(2^52)*(2^4)=2^56
	mul_sub_routine2(c + 10, a + 5, b + 5);//<2^56
	mul_sub_routine2(r_sum, a_sum, b_sum);//<2^56
	//due with a_sum, b_sum's carries
	for (int i = 0; i < 5; i++) {
		r_sum[i + 5] = r_sum[i + 5] + (carry_a & b_sum[i]) + (carry_b & a_sum[i]);//<2^58
	}
	carry_a = (-(int64_t)carry_a) & (-(int64_t)carry_b);//add to c[15]

	for (int i = 0; i < 9; i++) {
		r_sum[i] += c[5 + i];
		r_sum[i] |= ((uint64_t)1 << 58);
		r_sum[i + 1] -= (1 << 6);
	}
	r_sum[9] += c[14];//r_sum[9]<c[9]+c[19]? Assume not, if not c[15]-?
	r_sum[9] |= ((uint64_t)1 << 58);
	carry_a -= (1 << 6);

	for (int i = 0; i < 10; i++) {
		r_sum[i] -= c[i];
	}
	for (int i = 0; i < 10; i++) {
		c[5 + i] = r_sum[i] - c[10 + i];
	}
	c[15] += carry_a;

	uint64_t maska = -(int64_t)a10;
	uint64_t maskb = -(int64_t)b10;
	uint64_t tmp;
	for (int i = 0; i < 10; i++) {
		c[10 + i] = c[10 + i] + (a[i] & maskb) + (b[i] & maska);
	}
	c[19] += (uint64_t)(a10 & b10) << 52;
	/*c[20] = a10 & b10;*/

	a[9] |= ((uint64_t)a10 << 52);
	b[9] |= ((uint64_t)b10 << 52);
}

__device__ void sqr_sub_routine_product_scanning(argElement_x64 r, argElement_x64 a) {
	double t1 = to_double(0x4630000000000000);
	double t2 = to_double(0x4630000000000010);
	uint64_t c_tmp1 = 0, c_tmp2 = 0, c_tmp3 = 0;
	//uint64_t tmp = 0;
	double p_hi = 0, p_lo = 0, sub = 0, a_tmp = 0, b_tmp = 0;
	//c0
	a_tmp = (double)a[0];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[0] = 0xbcd0000000000000 + to_u64(p_lo);
	//c1
	b_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 = to_u64(p_lo) << 1;
	r[1] = c_tmp1 + 0x3330000000000000 + c_tmp3;
	//c2
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[2] = (c_tmp1 << 1) + to_u64(p_lo) + 0xa990000000000000;
	//c3:a1b2
	c_tmp1 = c_tmp2;
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[0];
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	r[3] = (c_tmp1 << 1) + c_tmp3 + 0x1ff0000000000000;
	//c4:a0b4
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[4] = (c_tmp1 << 1) + to_u64(p_lo) + 0x9650000000000000;
	//c5:a2b3
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[1];b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	r[5] = (c_tmp1 << 1) + c_tmp3 + 0x9310000000000000;
	//c6:a2b4
	c_tmp1 = c_tmp2;
	a_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);

	a_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[6] = (c_tmp1 << 1) + to_u64(p_lo) + 0x1cb0000000000000;
	//c7:a3b4
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[4];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	r[7] = (c_tmp1 << 1) + c_tmp3 + 0xa650000000000000;
	//c8:a4b4
	//c_tmp1 = c_tmp2;
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(b_tmp), "d"(b_tmp), "d"(t1));
	r[9] = to_u64(p_hi) + 0xb990000000000000;
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(b_tmp), "d"(b_tmp), "d"(sub));
	r[8] = (c_tmp2 << 1) + to_u64(p_lo) +0x2ff0000000000000;
}

__device__ void kara_sqr(argElement_x64 c, argElement_x64 a) {
	uint64_t a_sum[5], r_sum[10];
	uint32_t a10;
	uint64_t carry_a = 0;
	a10 = a[9] >> 52;
	a_sum[0] = 0;
	for (int i = 0; i < 5; i++) {
		a_sum[i] = a[i] + a[5 + i];//<2^49
		a_sum[i + 1] = (a_sum[i] >> 52);
		a_sum[i] &= 0xFFFFFFFFFFFFF;
	}
	a_sum[4] = a_sum[4] + a[4] + a[9];
	carry_a = -(int64_t)(a_sum[4] >> 52);
	a_sum[4] &= 0xFFFFFFFFFFFFF;
	sqr_sub_routine_product_scanning(c, a);//<(2^48)*7<2^51
	sqr_sub_routine_product_scanning(c + 10, a + 5);//<(2^48)*7<2^51
	sqr_sub_routine_product_scanning(r_sum, a_sum);//<(2^48)*3+2^50*4=19*2^48<2^53
	for (int i = 0; i < 5; i++) {
		r_sum[i + 5] = r_sum[i + 5] + (carry_a & a_sum[i])<<1;//<2^58
	}
	carry_a = (-(int64_t)carry_a);//add to c[15]

	for (int i = 0; i < 9; i++) {
		r_sum[i] += c[5 + i];
		r_sum[i] |= ((uint64_t)1 << 58);
		r_sum[i + 1] -= (1 << 6);
	}
	r_sum[9] += c[14];
	r_sum[9] |= ((uint64_t)1 << 58);
	carry_a -= (1 << 6);

	for (int i = 0; i < 10; i++) {
		r_sum[i] -= c[i];
	}
	for (int i = 0; i < 10; i++) {
		c[5 + i] = r_sum[i] - c[10 + i];
	}
	c[15] += carry_a;

	uint64_t maska = -(int64_t)a10;
	uint64_t tmp;
	for (int i = 0; i < 10; i++) {
		c[10 + i] = c[10 + i] + ((a[i] & maska) << 1);
	}
	c[19] += ((uint64_t)(a10 & a10) << 52);
	/*c[20] = a10 & b10;*/

	a[9] |= ((uint64_t)a10 << 52);
}

#undef SUM_C0 
#undef SUM_C1 
#undef SUM_C2 
#undef SUM_C3 
#undef SUM_C4 
#undef SUM_C5 
#undef SUM_C6 
#undef SUM_C7 
#undef SUM_C8 
#undef SUM_C9 
//mul with school book
__device__ void mul_1(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	EltBN521_x64_fp_buffer tmp;
	mul_fma_10(tmp, a, b);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}

//mul with karatsuba
__device__ void mul_2(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	EltBN521_x64_fp_buffer tmp;
	kara_mul(tmp, a, b);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}

__device__ void sqr_1(argElement_x64 c, argElement_x64 a) {
	EltBN521_x64_fp_buffer tmp;
	mul_fma_10(tmp, a, a);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}

__device__ void sqr_2(argElement_x64 c, argElement_x64 a) {
	EltBN521_x64_fp_buffer tmp;
	kara_mul(tmp, a, a);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}


__host__ __device__ void copy(argElement_x64 c, argElement_x64 a) {
	c[0] = a[0];
	c[1] = a[1];
	c[2] = a[2];
	c[3] = a[3];
	c[4] = a[4];
	c[5] = a[5];
	c[6] = a[6];
	c[7] = a[7];
	c[8] = a[8];
	c[9] = a[9];
}

__device__ void inv_1(argElement_x64 c, argElement_x64 x) {
	EltBN521_x64_fp x127, w, t, z;
	sqr_1(x127, x);//x127=a^2
	mul_1(t, x, x127);//t=a^3
	sqr_1(x127, t);//t3=a^6
	mul_1(w, x127, x);//w=a^7
	sqr_1(x127, w);//a^14
	sqr_1(t, x127);//28
	sqr_1(x127, t);//x127=x^56
	copy(t, x127);
	mul_1(x127, w, t);//x^63
	sqr_1(t, x127);//x^126
	mul_1(x127, t, x);//x^127

	sqr_1(t, x127);
	mul_1(z, t, x);//z=x^255
	copy(w, z);
	for (int i = 0; i < 4; i++) {
		sqr_1(t, z);
		sqr_1(z, t);
	}
	mul_1(t, z, w);//t=z16

	copy(w, t);
	for (int i = 0; i < 8; i++)
	{
		sqr_1(z, t);
		sqr_1(t, z);
	}
	mul_1(z, t, w);        // z=z32      

	copy(w, z);
	for (int i = 0; i < 16; i++)
	{
		sqr_1(t, z);
		sqr_1(z, t);
	}
	mul_1(t, z, w);        // t=z64      

	copy(w, t);
	for (int i = 0; i < 32; i++)
	{
		sqr_1(z, t);
		sqr_1(t, z);
	}
	mul_1(z, t, w);        // z=z128     

	copy(w, z);
	for (int i = 0; i < 64; i++)
	{
		sqr_1(t, z);
		sqr_1(z, t);
	}
	mul_1(t, z, w);        // z=z256       

	copy(w, t);
	for (int i = 0; i < 128; i++)
	{
		sqr_1(z, t);
		sqr_1(t, z);
	}
	mul_1(z, t, w);      // z=z512        

	sqr_1(t, z);
	sqr_1(z, t);
	sqr_1(t, z);
	sqr_1(z, t);
	sqr_1(t, z);
	sqr_1(z, t);
	sqr_1(t, z);
	mul_1(z, t, x127);
	sqr_1(t, z);
	sqr_1(z, t);
	mul_1(t, z, x);
	copy(c, t);
}

__device__ void inv_2(argElement_x64 c, argElement_x64 x) {
	EltBN521_x64_fp x127, w, t, z;
	sqr_2(x127, x);//x127=a^2
	mul_2(t, x, x127);//t=a^3
	sqr_2(x127, t);//t3=a^6
	mul_2(w, x127, x);//w=a^7
	sqr_2(x127, w);//a^14
	sqr_2(t, x127);//28
	sqr_2(x127, t);//x127=x^56
	copy(t, x127);
	mul_2(x127, w, t);//x^63
	sqr_2(t, x127);//x^126
	mul_2(x127, t, x);//x^127

	sqr_2(t, x127);
	mul_2(z, t, x);//z=x^255
	copy(w, z);
	for (int i = 0; i < 4; i++) {
		sqr_2(t, z);
		sqr_2(z, t);
	}
	mul_2(t, z, w);//t=z16

	copy(w, t);
	for (int i = 0; i < 8; i++)
	{
		sqr_2(z, t);
		sqr_2(t, z);
	}
	mul_2(z, t, w);        // z=z32      

	copy(w, z);
	for (int i = 0; i < 16; i++)
	{
		sqr_2(t, z);
		sqr_2(z, t);
	}
	mul_2(t, z, w);        // t=z64      

	copy(w, t);
	for (int i = 0; i < 32; i++)
	{
		sqr_2(z, t);
		sqr_2(t, z);
	}
	mul_2(z, t, w);        // z=z128     

	copy(w, z);
	for (int i = 0; i < 64; i++)
	{
		sqr_2(t, z);
		sqr_2(z, t);
	}
	mul_2(t, z, w);        // z=z256       

	copy(w, t);
	for (int i = 0; i < 128; i++)
	{
		sqr_2(z, t);
		sqr_2(t, z);
	}
	mul_2(z, t, w);      // z=z512        

	sqr_2(t, z);
	sqr_2(z, t);
	sqr_2(t, z);
	sqr_2(z, t);
	sqr_2(t, z);
	sqr_2(z, t);
	sqr_2(t, z);
	mul_2(z, t, x127);
	sqr_2(t, z);
	sqr_2(z, t);
	mul_2(t, z, x);
	copy(c, t);
}


//modular multiplication with karatsuba multiplication
__global__ void mul_fma_10_reduction(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 10;
	EltBN521_x64_fp tc,ta,tb;
	for(int i=0;i<NUM_DIGITS_521_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		mul_fma_10_with_reduction(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_521_x64_fp;i++){
		c[i+tid]=tc[i];
	}
	
}

__global__ void mul_1_kernel(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 10;
	EltBN521_x64_fp tc,ta,tb;
	for(int i=0;i<NUM_DIGITS_521_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		mul_1(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_521_x64_fp;i++){
		c[i+tid]=tc[i];
	}
}

//modular multiplication with karatsuba multiplication
__global__ void mul_fma_10_reduction_consecutive(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	EltBN521_x64_fp_buffer c = { 0 };
	uint32_t a10;
	uint32_t b10;
	a10 = a[t + 9 * N] >> 52;
	b10 = b[t + 9 * N] >> 52;
	a[t + 9 * N] &= 0xFFFFFFFFFFFFF;
	b[t + 9 * N] &= 0xFFFFFFFFFFFFF;
	const double t1 = to_double(0x4670000000000000);
	const double t2 = to_double(0x4670000000000001);

	for (int i = 0; i < 10; i++) {
		c[i] = (uint64_t)0x467 * i + (uint64_t)0x433 * (i + 1);
		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
		c[19 - i] = (uint64_t)0x467 * (i + 1) + (uint64_t)0x433 * i;
		c[19 - i] = -(int64_t)(c[19 - i] & 0xFFF) << 52;
	}

	double p_hi = 0, p_lo = 0, sub = 0;

	for (int i = 0; i < 10; i++) {
		for (int j = 0; j < 10; j++) {
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"((double)a[t + i * N]), "d"((double)b[t + j * N]), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"((double)a[t + i * N]), "d"((double)b[t + j * N]), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	uint64_t maska = -(int64_t)a10;
	uint64_t maskb = -(int64_t)b10;
	for (int i = 0; i < 10; i++) {
		c[10 + i] = c[10 + i] + (a[t+i*N] & maskb) + (b[t+i*N] & maska);
	}
	c[19] += (uint64_t)(a10 & b10) << 52;

	a[t+9*N] |= ((uint64_t)a10 << 52);
	b[t+9*N] |= ((uint64_t)b10 << 52);

	for (int i = 0; i < 9; i++) {
		c[i] = c[i] + (c[10 + i] >> 1);
		c[i] = c[i] + ((uint64_t)(c[11 + i] & 1) << 51);
	}
	c[9] = c[9] + (c[19] >> 1) + ((uint64_t)(c[10] & 1) << 52);

	uint64_t tmp = c[9] >> 53;
	c[9] &= 0x1FFFFFFFFFFFFF;
	c[0] += tmp;
	for (int i = 0; i < 9; i++) {
		c[i + 1] += (c[i] >> 52);
		c[i] &= 0xFFFFFFFFFFFFF;
	}
	tmp = c[9] >> 53;
	c[9] &= 0x1FFFFFFFFFFFFF;
	c[0] += tmp;
	for (int i = 0; i < 9; i++) {
		c[i + 1] += (c[i] >> 52);
		r[t+N*i] = c[i] & 0xFFFFFFFFFFFFF;
	}
	r[t + N * (9)] = c[9];
}


__global__ void kara_mul_reduction(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 10;

	EltBN521_x64_fp_buffer tmp = { 0 };
	EltBN521_x64_fp tc,ta,tb;
	for(int i=0;i<NUM_DIGITS_521_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		kara_mul(tmp, ta, tb);
		reduction(tmp, tmp);
		simplification_unique(tc, tmp);
	}
	for(int i=0;i<NUM_DIGITS_521_x64_fp;i++){
		c[i+tid]=tc[i];
	}
	
}

__global__ void kara_sqr_reduction(argElement_x64 c, argElement_x64 a) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 10;

	EltBN521_x64_fp_buffer tmp = { 0 };
	EltBN521_x64_fp tc,ta;
	for(int i=0;i<NUM_DIGITS_521_x64_fp;i++){
		ta[i]=a[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		kara_sqr(tmp, ta);
		reduction(tmp, tmp);
		simplification_unique(tc, tmp);
	}
	for(int i=0;i<NUM_DIGITS_521_x64_fp;i++){
		c[i+tid]=tc[i];
	}
}


__global__ void kara_mul_reduction_consecutive(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);

	EltBN521_x64_fp_buffer c = { 0 };
	uint64_t a_sum[5], b_sum[5], r_sum[10];
	uint32_t a10, b10;
	a10 = a[t+9*N] >> 52;
	b10 = b[t + 9 * N] >> 52;
	a[t + 9 * N] &= 0xFFFFFFFFFFFFF;
	b[t + 9 * N] &= 0xFFFFFFFFFFFFF;

	uint64_t carry_a, carry_b = 0;
	a_sum[0] = 0;
	b_sum[0] = 0;
	for (int i = 0; i < 4; i++) {
		a_sum[i] = a_sum[i] + a[t+N*i] + a[t + N * (5 + i)];//<2^53
		b_sum[i] = b_sum[i] + b[t + N * i] + b[t + N * (5 + i)];//<2^53
		a_sum[i + 1] = (a_sum[i] >> 52);
		b_sum[i + 1] = (b_sum[i] >> 52);
		a_sum[i] &= 0xFFFFFFFFFFFFF;
		b_sum[i] &= 0xFFFFFFFFFFFFF;
	}
	a_sum[4] = a_sum[4] + a[t + N * 4] + a[t + N * 9];
	b_sum[4] = b_sum[4] + b[t + N * 4] + b[t + N * 9];
	carry_a = -(int64_t)(a_sum[4] >> 52);
	carry_b = -(int64_t)(b_sum[4] >> 52);
	a_sum[4] &= 0xFFFFFFFFFFFFF;
	b_sum[4] &= 0xFFFFFFFFFFFFF;

	const double t1 = to_double(0x4670000000000000);
	const double t2 = to_double(0x4670000000000001);
	double p_hi = 0, p_lo = 0, sub = 0;

	for (int i = 0; i < 5; i++) {
		c[i] = (uint64_t)0x467 * i + (uint64_t)0x433 * (i + 1);
		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
		c[9 - i] = (uint64_t)0x467 * (i + 1) + (uint64_t)0x433 * i;
		c[9 - i] = -(int64_t)(c[9 - i] & 0xFFF) << 52;
	}	
	for (int i = 0; i < 5; i++) {
		for (int j = 0; j < 5; j++) {
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"((double)a[t+N*i]), "d"((double)b[t + N * j]), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"((double)a[t + N * i]), "d"((double)b[t + N * j]), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	for (int i = 0; i < 5; i++) {
		c[10+i] = (uint64_t)0x467 * i + (uint64_t)0x433 * (i + 1);
		c[10 + i] = -(int64_t)(c[10+i] & 0xFFF) << 52;
		c[10 + 9 - i] = (uint64_t)0x467 * (i + 1) + (uint64_t)0x433 * i;
		c[10 + 9 - i] = -(int64_t)(c[10+ 9 - i] & 0xFFF) << 52;
	}
	for (int i = 0; i < 5; i++) {
		for (int j = 0; j < 5; j++) {
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"((double)a[t + N * (5+i)]), "d"((double)b[t + N * (5+j)]), "d"(t1));
			c[10+ i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"((double)a[t + N * (5 + i)]), "d"((double)b[t + N * (5 + j)]), "d"(sub));
			c[10+ i + j] += to_u64(p_lo);
		}
	}
	//mul_sub_routine(c, a, b);//<(2^52)*(2^4)=2^56
	//mul_sub_routine(c + 10, a + 5, b + 5);//<2^56
	mul_sub_routine(r_sum, a_sum, b_sum);//<2^56
	//due with a_sum, b_sum's carries
	for (int i = 0; i < 5; i++) {
		r_sum[i + 5] = r_sum[i + 5] + (carry_a & b_sum[i]) + (carry_b & a_sum[i]);//<2^58
	}
	carry_a = (-(int64_t)carry_a) & (-(int64_t)carry_b);//add to c[15]

	for (int i = 0; i < 9; i++) {
		r_sum[i] += c[5 + i];
		r_sum[i] |= ((uint64_t)1 << 58);
		r_sum[i + 1] -= (1 << 6);
	}
	r_sum[9] += c[14];//r_sum[9]<c[9]+c[19]? Assume not, if not c[15]-?
	r_sum[9] |= ((uint64_t)1 << 58);
	carry_a -= (1 << 6);

	for (int i = 0; i < 10; i++) {
		r_sum[i] -= c[i];
	}
	for (int i = 0; i < 10; i++) {
		c[5 + i] = r_sum[i] - c[10 + i];
	}
	c[15] += carry_a;

	uint64_t maska = -(int64_t)a10;
	uint64_t maskb = -(int64_t)b10;
	for (int i = 0; i < 10; i++) {
		c[10 + i] = c[10 + i] + (a[t + N * i] & maskb) + (b[t + N * i] & maska);
	}
	c[19] += (uint64_t)(a10 & b10) << 52;
	/*c[20] = a10 & b10;*/

	a[t + N * 9] |= ((uint64_t)a10 << 52);
	b[t + N * 9] |= ((uint64_t)b10 << 52);

	for (int i = 0; i < 9; i++) {
		c[N] = c[i] + (c[10 + i] >> 1);
		c[N] = c[i] + ((uint64_t)(c[11 + i] & 1) << 51);
	}
	c[9] = c[9] + (c[19] >> 1) + ((uint64_t)(c[10] & 1) << 52);

	uint64_t tmp = c[9] >> 53;
	c[9] &= 0x1FFFFFFFFFFFFF;
	c[0] += tmp;
	for (int i = 0; i < 9; i++) {
		c[i + 1] += (c[i] >> 52);
		c[i] &= 0xFFFFFFFFFFFFF;
	}
	tmp = c[9] >> 53;
	c[9] &= 0x1FFFFFFFFFFFFF;
	c[0] += tmp;
	for (int i = 0; i < 9; i++) {
		c[i + 1] += (c[i] >> 52);
		r[t + N * i] = c[i] & 0xFFFFFFFFFFFFF;
	}
	r[t + N * (9)] = c[9];
}

__global__ void inv_1_kernel(argElement_x64 c, argElement_x64 a) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 10;
	for (int i = 0;i < 100;i++) {
		inv_1(c + tid, a + tid);
	}
	
}

__global__ void inv_2_kernel(argElement_x64 c, argElement_x64 a) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 10;
	for (int i = 0;i < 100;i++) {
		inv_2(c + tid, a + tid);
	}
}

extern "C"
void mul_fma10_reductionAPI(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 10 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 10 * sizeof(uint64_t);
	//int tmpM = (10 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	mul_fma_10_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	mul_1_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}

extern "C"
void mul_fma10_reductionAPI_consecutive(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 10 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 10 * sizeof(uint64_t);
	//int tmpM = (10 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	mul_fma_10_reduction_consecutive << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);
}

extern "C"
void kara_mul_reductionAPI(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;
	//int numBlocks = 1;
	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 10 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 10 * sizeof(uint64_t);
	//int tmpM = (10 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);


	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	kara_mul_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:kara_mul time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}

extern "C"
void kara_sqr_reductionAPI(argElement_x64 c, argElement_x64 a) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;
	//int numBlocks = 1;
	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 10 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 10 * sizeof(uint64_t);
	//int tmpM = (10 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	kara_sqr_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:kara_mul time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
}


extern "C"
void inv_1API(argElement_x64 c, argElement_x64 a) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;
	//int numBlocks = 1;
	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 10 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 10 * sizeof(uint64_t);
	//int tmpM = (10 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);


	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	inv_1_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:kara_mul time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
}

extern "C"
void inv_2API(argElement_x64 c, argElement_x64 a) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;
	//int numBlocks = 1;
	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 10 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 10 * sizeof(uint64_t);
	//int tmpM = (10 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);


	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	inv_2_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:kara_mul time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
}


extern "C"
void kara_mul_reductionAPI_consecutive(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;
	//int numBlocks = 1;
	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = 10 * sizeof(argElement_x64) * numBlocks * BLOCK_SIZE;
	int tmpN = 10 * sizeof(uint64_t);
	//int tmpM = (10 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 10 * sizeof(uint64_t) * numThreads, cudaMemcpyHostToDevice);


	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	kara_mul_reduction_consecutive << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:kara_mul time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}


void print(FILE* file, argElement_x64 c, int num) {
	
	if (num == 10) {
		simplification_unique(c,c);
	}
	else {
		simplification_fast(c, num);
	}

	printf("0X");
	for (int i = num - 1; i >= 0; i--) {
		printf("%013llX", c[i]);
	}
	printf("\n");
}


//only for EltBN521_x64_fp
void AssignNfp(argElement_x64 dst, argElement_x64 src, int n) {
	for (int i = 0; i < 10; i++) {
		for (int j = 0; j < n; j++) {
			dst[i + 10 * j] = src[i];
		}
	}
}

void AssignNfp_consecutive(argElement_x64 dst, argElement_x64 src, int n) {
	for (int i = 0;i < 10;i++) {
		for (int j = 0;j < n;j++) {
			dst[j + i * n] = src[i];
		}
	}
}
void AssignN32(argElement_x32 dst, argElement_x32 src, int n) {
	for (int i = 0; i < NUM_DIGITS_521_x32; i++) {
		for (int j = 0; j < n; j++) {
			dst[i + NUM_DIGITS_521_x32 * j] = src[i];
		}
	}
}

int main() {
	EltBN521_x64_fp A = { 0x4534934453435,0x4734462363435,0x7731232453435,0x7734925323435,0x1225353535345,
		0x4634925353435,0x4634925353435,0x4634925353435,0x4634925353435,0x14634925353435 };
	EltBN521_x64_fp B = { 0xFFF35124FFFFF,0xFFF24144FFFFF,0xFFFF1252FFFFF,0xFFF152FFFFFFF,0xFFFF4542222FF,
		0xFFFFFFFFFFFFF,0xFFFFFFFFFFFFF,0xFFFFFFFFFFFFF,0xFFFFFFFFFFFFF,0x1FFFFFFFFFFFFF };
	EltBN521_x64_fp P = { 0 };
	EltBN521_x64_fp_buffer C = { 0 };
	uint64_t* AN = new uint64_t[10 * N];
	uint64_t* BN = new uint64_t[10 * N];
	uint64_t* CN = new uint64_t[10 * N];

	//print(stdout, A, 10);
	//print(stdout, B, 10);
	//This result is correct.

	AssignNfp(AN, A, N);
	AssignNfp(BN, B, N);
	print(stdout, AN, 10);
	print(stdout, BN, 10);
	print(stdout, AN + (N - 1) * 10, 10);
	print(stdout, BN + (N - 1) * 10, 10);
	kara_mul_reductionAPI(CN, AN, AN);
	printf("A^2  mod P kara mul:=\n");
	print(stdout, CN, 10);
	print(stdout, CN + (N - 1) * 10, 10);
	kara_sqr_reductionAPI(CN, AN);
	printf("A^2  mod P kara sqr:=\n");
	print(stdout, CN, 10);
	print(stdout, CN + (N - 1) * 10, 10);
	mul_fma10_reductionAPI(CN, AN, BN);
	// for (int i = 0;i < 99;i++) {
	// 	mul_fma10_reductionAPI(CN, AN, CN);
	// }
	printf("mul_fma10_reductionAPI:=\n");
	print(stdout, CN, 10);
	print(stdout, CN + (N - 1) * 10, 10);
	kara_mul_reductionAPI(CN, AN, BN);
	// for (int i = 0;i < 99;i++) {
	// 	kara_mul_reductionAPI(CN, AN, CN);
	// }
	printf("kara_mul_reductionAPI:=\n");
	print(stdout, CN, 10);
	print(stdout, CN + (N - 1) * 10, 10);

	// inv_1API(CN, AN);
	// inv_2API(CN, AN);


	// mul_fma10_reductionAPI(CN, AN, BN);
	// printf("AB mod P:=\n");
	// print(stdout, CN, 10);
	// print(stdout, CN + (N - 1) * 10, 10);

	// kara_mul_reductionAPI(CN, AN, BN);
	// printf("AB  mod P kara mul:=\n");
	// print(stdout, CN, 10);
	// print(stdout, CN + (N - 1) * 10, 10);

	// mul_fma10_reductionAPI(CN, BN, BN);
	// printf("B^2 mod P:=\n");
	// print(stdout, CN, 10);
	// print(stdout, CN + (N - 1) * 10, 10);

	// kara_mul_reductionAPI(CN, BN, BN);
	// printf("B^2  mod P kara mul:=\n");
	// print(stdout, CN, 10);
	// print(stdout, CN + (N - 1) * 10, 10);

	// mul_fma10_reductionAPI(CN, AN, AN);
	// printf("A^2 mod P:=\n");
	// print(stdout, CN, 10);
	// print(stdout, CN + (N - 1) * 10, 10);

	// kara_mul_reductionAPI(CN, AN, AN);
	// printf("A^2  mod P kara mul:=\n");
	// print(stdout, CN, 10);
	// print(stdout, CN + (N - 1) * 10, 10);

	AssignNfp_consecutive(AN, A, N);
	AssignNfp_consecutive(BN, B, N);
	print(stdout, AN, 10);
	print(stdout, BN, 10);
	print(stdout, AN + (N - 1) * 10, 10);
	print(stdout, BN + (N - 1) * 10, 10);
	/*mul_fma10_reductionAPI_consecutive(CN, AN, BN);
	mul_fma10_reductionAPI_consecutive(CN, AN, AN);
	mul_fma10_reductionAPI_consecutive(CN, BN, BN);

	kara_mul_reductionAPI_consecutive(CN, AN, BN);
	kara_mul_reductionAPI_consecutive(CN, BN, BN);
	kara_mul_reductionAPI_consecutive(CN, AN, AN);*/
	mul_fma10_reductionAPI_consecutive(CN, AN, BN);

	kara_mul_reductionAPI_consecutive(CN, AN, BN);
	uint32_t A32[17] = { 0x1234124,0x324234,0x34242332,0x342342,0x5342423,0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332,0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332};
	uint32_t B32[17] = { 0x1234124,0x324234,0x34242332,0x342342,0x5342423,0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332 ,0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332};

	uint32_t* A32N = new uint32_t[NUM_DIGITS_521_x32 * N];
	uint32_t* B32N = new uint32_t[NUM_DIGITS_521_x32 * N];
	uint32_t* C32N = new uint32_t[NUM_DIGITS_521_x32 * N];
	AssignN32(A32N, A32, N);
	AssignN32(B32N, B32, N);

	intmulAPI(C32N, A32N, B32N);
	intsqrAPI(C32N,A32N);
	delete AN;
	delete BN;
	delete CN;
}
