﻿#include<iostream>
#include<limits>
#include<iomanip>
#include<cstdint>
#include<sm_30_intrinsics.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"

using namespace std;


#define argElement_x64 uint64_t*
#define argElement_x32 uint32_t*
#define argElement_fp double*
#define NUM_DIGITS_383_x64 6
#define NUM_DIGITS_383_x32 12
#define NUM_DIGITS_383_x64_fp 8

#define BLOCK_WIDTH 32
#define BLOCK_HEIGHT 4
#define BLOCK_SIZE (BLOCK_WIDTH*BLOCK_HEIGHT)

typedef uint64_t EltBN383_x64[6];
typedef uint32_t EltBN383_x32[12];
typedef uint64_t EltBN383_x64_fp[8];
typedef uint64_t EltBN383_x64_fp_buffer[16];
typedef double EltBN383_fp[8];

#define N (56*BLOCK_SIZE)
#define Iterate 1000000

//Using Integer number. 12 32-bit with the last digit being 31 bits

//p=2^383-187
//reduction for integer implementation
__device__ void intreduction(argElement_x32 r, argElement_x32 c) {
	uint64_t t = 0;
	for (int i = 0;i < NUM_DIGITS_383_x32 -1;i++) {
		t = (uint64_t)c[NUM_DIGITS_383_x32 + i] * 384 + c[i] + t;
		c[i] = (uint32_t)t;
		t = (t >> 32);
	}
	t = (uint64_t)c[23] * 384 + c[11];
	c[11] = t & 0x7FFFFFFF;
	t = (t >> 31);
	t = (c[NUM_DIGITS_383_x32-1] >> 31);
	//second round
	t = c[0] + t * 187;
	r[0] = (uint32_t)t;
	t = t >> 32;
	asm("addc.cc.u32 %0,%1,%2;":"=r"(r[1]) : "r"((uint32_t)t), "r"(c[1]));
	for (int i = 2;i < NUM_DIGITS_383_x32;i++) {
		asm("addc.cc.u32 %0,0,%1;":"=r"(r[i]) : "r"(c[i]));
	}
}

__device__ void intmul(argElement_x32 r, argElement_x32 a, argElement_x32 b) {
	uint32_t c[24];
	for (int i = 0;i < 24;i++) {
		c[i] = 0;
	}

	for (int j = 0;j < NUM_DIGITS_383_x32;j++) {
		// c[j]=0;
		asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[j]) : "r"(a[j]), "r"(b[0]), "r"(c[j]));
	}
	asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[1]) : "r"(a[0]), "r"(b[0]), "r"(c[1]));
	// c[NUM_DIGITS_383_x32]=0;
	for (int j = 1;j < NUM_DIGITS_383_x32;j++) {
		asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[j + 1]) : "r"(a[j]), "r"(b[0]), "r"(c[j + 1]));
	}

	for (int i = 1;i < NUM_DIGITS_383_x32;i++) {
		for (int j = 0;j < NUM_DIGITS_383_x32;j++) {
			asm("mad.lo.cc.u32 %0,%1,%2,%3;":"=r"(c[i + j]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j]));
		}
		asm("addc.cc.u32 %0, %1, 0;" : "=r"(c[i + NUM_DIGITS_383_x32]) : "r"(c[i + NUM_DIGITS_383_x32]));
		for (int j = 0;j < NUM_DIGITS_383_x32;j++) {
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j + 1]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j + 1]));
		}
	}
	intreduction(r, c);
}

__device__ void intmul2(argElement_x32 r, argElement_x32 a, argElement_x32 b) {
	uint32_t c[24];
	for (int i = 0;i < 24;i++) {
		c[i] = 0;
	}
	asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[0]) : "r"(a[0]), "r"(b[0]), "r"(c[0]));
	for (int j = 1;j < NUM_DIGITS_383_x32;j++) {
		asm("mad.lo.u32 %0,%1,%2,%3;":"=r"(c[j]) : "r"(a[j]), "r"(b[0]), "r"(c[j]));
	}
	asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[1]) : "r"(a[0]), "r"(b[0]), "r"(c[1]));
	for (int j = 1;j < NUM_DIGITS_383_x32;j++) {
		asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[j + 1]) : "r"(a[j]), "r"(b[0]), "r"(c[j + 1]));
	}
	asm("addc.u32 %0,%1,0;":"=r"(c[NUM_DIGITS_383_x32+1]) : "r"(c[NUM_DIGITS_383_x32+1]));
	for (int i = 1;i < NUM_DIGITS_383_x32;i++) {
		asm("mad.lo.cc.u32 %0,%1,%2,%3;":"=r"(c[i]) : "r"(a[0]), "r"(b[i]), "r"(c[i]));
		for (int j = 1;j < NUM_DIGITS_383_x32;j++) {
			asm("madc.lo.cc.u32 %0,%1,%2,%3;":"=r"(c[i + j]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j]));
		}
		asm("addc.cc.u32 %0,%1,0;":"=r"(c[NUM_DIGITS_383_x32 + i]) : "r"(c[NUM_DIGITS_383_x32 + i]));
		asm("mad.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[i + 1]) : "r"(a[0]), "r"(b[i]), "r"(c[i + 1]));
		for (int j = 1;j < NUM_DIGITS_383_x32;j++) {
			asm("madc.hi.cc.u32 %0,%1,%2,%3;":"=r"(c[i + j + 1]) : "r"(a[j]), "r"(b[i]), "r"(c[i + j + 1]));
		}
		asm("addc.cc.u32 %0,%1,0;":"=r"(c[NUM_DIGITS_383_x32+1 + i]) : "r"(c[NUM_DIGITS_383_x32+1 + i]));
	}
	intreduction(r, c);
}
__device__ void intsqr(argElement_x32 r, argElement_x32 a) {
	uint32_t c[24];
	for (int i = 0;i < 24;i++) {
		c[i] = 0;
	}
	for (int i = 0;i <= (NUM_DIGITS_383_x32 / 2 - 2);i++) {
		//asm("mad.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[2 * i + 1]) : "r"(a[i + 1]), "r"(a[i]), "r"(c[2 * i + 1]));//
		for (int j = i + 1;j <= NUM_DIGITS_383_x32 - i - 2;j++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j]) : "r"(a[j]), "r"(a[i]), "r"(c[i + j]));
		}
		for (int k = 0; k <= i; k++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_383_x32 + 2 * k - 1]) : "r"(a[NUM_DIGITS_383_x32 - 1 - i + k]), "r"(a[i + k]), "r"(c[NUM_DIGITS_383_x32 + 2 * k - 1]));
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_383_x32 + 2 * k]) : "r"(a[NUM_DIGITS_383_x32 - 1 - i + k]), "r"(a[i + k]), "r"(c[NUM_DIGITS_383_x32 + 2 * k]));
		}
		asm("add.cc.u32 %0,%1,0;":"=r"(c[0]):"r"(c[0]));
		//asm("mad.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[2 * i + 2]) : "r"(a[i + 1]), "r"(a[i]), "r"(c[2 * i + 2]));
		for (int j = i + 1; j <= NUM_DIGITS_383_x32 - i - 2; j++) {
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[i + j + 1]) : "r"(a[j]), "r"(a[i]), "r"(c[i + j + 1]));
		}
		for (int k = 0; k <= i; k++) {
			asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_383_x32 + 2 * k]) : "r"(a[NUM_DIGITS_383_x32 - 1 - i + k]), "r"(a[i + k + 1]), "r"(c[NUM_DIGITS_383_x32 + 2 * k]));
			asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_383_x32 + 2 * k + 1]) : "r"(a[NUM_DIGITS_383_x32 - 1 - i + k]), "r"(a[i + k + 1]), "r"(c[NUM_DIGITS_383_x32 + 2 * k + 1]));
		}
	}
	asm("add.cc.u32 %0,%1,0;":"=r"(c[0]) : "r"(c[0]));
	//asm("mad.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_383_x32 - 1]) : "r"(a[NUM_DIGITS_383_x32 / 2]), "r"(a[NUM_DIGITS_383_x32 / 2 - 1]), "r"(c[NUM_DIGITS_383_x32 - 1]));
	//asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_383_x32]) : "r"(a[NUM_DIGITS_383_x32 / 2]), "r"(a[NUM_DIGITS_383_x32 / 2 - 1]), "r"(c[NUM_DIGITS_383_x32]));
	for (int k = 0; k < NUM_DIGITS_383_x32 / 2; k++) {
		asm("madc.lo.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_383_x32 + 2 * k - 1]) : "r"(a[NUM_DIGITS_383_x32 / 2 + k]), "r"(a[NUM_DIGITS_383_x32 / 2 + k - 1]), "r"(c[NUM_DIGITS_383_x32 + 2 * k - 1]));
		asm("madc.hi.cc.u32 %0, %1, %2, %3;" : "=r"(c[NUM_DIGITS_383_x32 + 2 * k]) : "r"(a[NUM_DIGITS_383_x32 / 2 + k]), "r"(a[NUM_DIGITS_383_x32 / 2 + k - 1]), "r"(c[NUM_DIGITS_383_x32 + 2 * k]));
	}
	asm("add.cc.u32 %0, %1, %1;" : "=r"(c[1]) : "r"(c[1]));
	for (int i = 2; i < 2 * NUM_DIGITS_383_x32; i++) {
		asm("addc.cc.u32 %0, %1, %1;" : "=r"(c[i]) : "r"(c[i]));
	}
	asm("add.cc.u32 %0, %1, 0;" : "=r"(c[0]) : "r"(c[0]));
	/*asm("mad.lo.cc.u32 %0, %1, %1, %2;" : "=r"(c[0]) : "r"(a[0]), "r"(c[0]));
	asm("madc.hi.cc.u32 %0, %1, %1, %2;" : "=r"(c[1]) : "r"(a[0]), "r"(c[1]));*/
	for (int i = 0; i < NUM_DIGITS_383_x32; i++) {
		asm("madc.lo.cc.u32 %0, %1, %1, %2;" : "=r"(c[2 * i]) : "r"(a[i]), "r"(c[2 * i]));
		asm("madc.hi.cc.u32 %0, %1, %1, %2;" : "=r"(c[2 * i + 1]) : "r"(a[i]), "r"(c[2 * i + 1]));
	}
	intreduction(r, c);
}

__device__ uint64_t to_u64(double d) {
	union {
		double d;
		uint64_t u64;
	}converter;
	converter.d = d;
	return converter.u64;
}

__device__ double to_double(uint64_t u64) {
	union {
		double d;
		uint64_t u64;
	}converter;
	converter.u64 = u64;
	return converter.d;
}

//2^383-187= 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF45
#define radix_w 48
#define bit_length_last_digit 47
#define bit_length_unreduced_form 50
static const uint64_t GLOB_Const_P[8] = { 0xFFFFFFFFFF450,0xFFFFFFFFFFFF0, 0xFFFFFFFFFFFF0 ,0xFFFFFFFFFFFF0 ,0xFFFFFFFFFFFF0 ,0xFFFFFFFFFFFF0 ,0xFFFFFFFFFFFF0 ,0x7FFFFFFFFFFF0 };
static const uint64_t GLOB_mask48 = ((uint64_t)1 << radix_w) - 1;
static const uint64_t GLOB_mask47 = ((uint64_t)1 << bit_length_last_digit) - 1;
static const uint64_t GLOB_mask50 = ((uint64_t)1 << bit_length_unreduced_form) - 1;

void rand_bytes(uint8_t* buffer, uint32_t num_bytes) {
	srand((unsigned)time(0));
	for (int i = 0; i < num_bytes; i++) {
		buffer[i] = rand() % 256;
	}
}

void unser(uint64_t* c, uint8_t* a) {
	uint64_t* tmpA = (uint64_t*)a;
	c[0] = tmpA[0];
	c[1] = (tmpA[0] >> 48) | (tmpA[1] << 16);
	c[2] = (tmpA[1] >> 32) | (tmpA[2] << 32);
	c[3] = (tmpA[2] >> 16);
	c[4] = tmpA[3];
	c[5] = (tmpA[3] >> 48) | (tmpA[4] << 16);
	c[6] = (tmpA[4] >> 32) | (tmpA[5] << 32);
	c[7] = (tmpA[5] >> 16);
	for (int i = 0; i < 8 - 1; i++) {
		c[i] &= GLOB_mask48;
	}
	c[7] &= GLOB_mask47;
}


//input length must be 10.
//Simplify to unique element in M-383
//Used before mul and squaring
__host__ __device__ void simplification_unique(argElement_x64 c,argElement_x64 a) {
	uint64_t t = a[7] >> bit_length_last_digit;
	a[7] &= GLOB_mask47;
	a[0] += 187 * t;
	for (int i = 0; i < 7; i++) {
		a[i + 1] += (a[i] >> radix_w);
		c[i] = a[i] & GLOB_mask48;
	}
	c[7] = a[7];
}

//Just simplify no reduction.
void simplification_fast(argElement_x64 c, int num) {
	for (int i = 0; i < (num - 1); i++) {
		c[i + 1] += c[i] >> radix_w;
		c[i] &= GLOB_mask48;
	}
}

//new sub and add can be parallel, only used for element of NIST P-521, cannot be used in Karatsuba multiplication
__device__ void sub(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	uint64_t GLOB_Const_P[8] = { 0xFFFFFFFFFF450,0xFFFFFFFFFFFF0, 0xFFFFFFFFFFFF0 ,0xFFFFFFFFFFFF0 ,0xFFFFFFFFFFFF0 ,0xFFFFFFFFFFFF0 ,0xFFFFFFFFFFFF0 ,0x7FFFFFFFFFFF0 };
	uint64_t* p;
	p = const_cast<uint64_t*>(GLOB_Const_P);
	for (int i = 0; i < 8; i++) {
		a[i] += p[i];
		r[i] = a[i] - b[i];
	}
}

__device__ void add(argElement_x64 r, const argElement_x64 a, const argElement_x64 b)
{
	int i;
	for (i = 0; i < 8; i++) {
		r[i] = a[i] + b[i];
	}
}


/*
Using FMA as for 383-bit digit multiplication
School book multiplication
Used to test Karatsuba multiplication
*/
__device__ void mul_fma_8(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	const double t1 = to_double(0x4630000000000000);
	const double t2 = to_double(0x4630000000000010);

	int w2 = 15;
	for (int i = 0; i < 8; i++) {
		c[i] = (uint64_t)0x463 * i + (uint64_t)0x433 * (i + 1);
		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
		c[w2 - i] = (uint64_t)0x463 * (i + 1) + (uint64_t)0x433 * i;
		c[w2 - i] = -(int64_t)(c[w2 - i] & 0xFFF) << 52;
	}
	//fesetround(FE_TOWARDZERO);
	double p_hi = 0, p_lo = 0, sub = 0;

	for (int i = 0; i < 8; i++) {
		double a_tmp = (double)a[i];
		for (int j = 0; j < 8; j++) {
			double b_tmp = (double)b[j];
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
}

//Below are the Functions used in Karatsuba multiplication
///*
//Karatsuba sub routine multiplication for 5 52-bit digit number.
//*/
#define SUM_C0 0xbcd0000000000000
#define SUM_C1 0x3370000000000000
#define SUM_C2 0xaa10000000000000
#define SUM_C3 0x20b0000000000000
#define SUM_C4 0x1db0000000000000
#define SUM_C5 0xa710000000000000
#define SUM_C6 0x3070000000000000
#define SUM_C7 0xb9d0000000000000

__device__ void mul_sub_routine_origin(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	const double t1 = to_double(0x4630000000000000);
	const double t2 = to_double(0x4630000000000010);
	uint64_t c[8];
	c[0] = SUM_C0;c[1] = SUM_C1;c[2] = SUM_C2;c[3] = SUM_C3;c[4] = SUM_C4;c[5] = SUM_C5;c[6] = SUM_C6;c[7] = SUM_C7;

	double p_hi = 0, p_lo = 0, sub = 0;
	for (int i = 0; i < 4; i++) {
		double a_tmp = (double)a[i];
		for (int j = 0; j < 4; j++) {
			double b_tmp = (double)b[j];
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
			c[i + j] += to_u64(p_lo);
		}
	}
	for (int i = 0;i < 8;i++) {
		r[i]=c[i];
	}
}

__device__ void mul_sub_routine2(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	const double t1 = to_double(0x4630000000000000);
	const double t2 = to_double(0x4630000000000010);
	uint64_t c[8];
	uint64_t tmp;
	//c[0] = SUM_C0;c[1] = SUM_C1;c[2] = SUM_C2;c[3] = SUM_C3;c[4] = SUM_C4;c[5] = SUM_C5;c[6] = SUM_C6;c[7] = SUM_C7;
	double p_hi, sub;
	asm volatile(
		//a[0]
		"mov.b64		%0,		0xbcd0000000000000;\n\t"
		"mov.b64		%1,		0x3370000000000000;\n\t"
		"mov.b64		%2,		0xaa10000000000000;\n\t"
		"mov.b64		%3,		0x20b0000000000000;\n\t"
		"mov.b64		%4,		0x1db0000000000000;\n\t"
		"mov.b64		%5,		0xa710000000000000;\n\t"
		"mov.b64		%6,		0x3070000000000000;\n\t"
		"mov.b64		%7,		0xb9d0000000000000;\n\t"

		"fma.rz.f64		%16,	%21,	%25,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%1,		%1,		%20;\n\t"
		"fma.rz.f64		%16,	%21,	%25,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%0,		%0,		%20;\n\t"
		"fma.rz.f64		%16,	%21,	%26,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%2,		%2,		%20;\n\t"
		"fma.rz.f64		%16,	%21,	%26,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%1,		%1,		%20;\n\t"
		"fma.rz.f64		%16,	%21,	%27,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%3,		%3,		%20;\n\t"
		"fma.rz.f64		%16,	%21,	%27,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%2,		%2,		%20;\n\t"
		"fma.rz.f64		%16,	%21,	%28,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%4,		%4,		%20;\n\t"
		"fma.rz.f64		%16,	%21,	%28,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%3,		%3,		%20;\n\t"

		//a[1]
		"fma.rz.f64		%16,	%22,	%25,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%2,		%2,		%20;\n\t"
		"fma.rz.f64		%16,	%22,	%25,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%1,		%1,		%20;\n\t"
		"fma.rz.f64		%16,	%22,	%26,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%3,		%3,		%20;\n\t"
		"fma.rz.f64		%16,	%22,	%26,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%2,		%2,		%20;\n\t"
		"fma.rz.f64		%16,	%22,	%27,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%4,		%4,		%20;\n\t"
		"fma.rz.f64		%16,	%22,	%27,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%3,		%3,		%20;\n\t"
		"fma.rz.f64		%16,	%22,	%28,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%5,		%5,		%20;\n\t"
		"fma.rz.f64		%16,	%22,	%28,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%4,		%4,		%20;\n\t"

		//a[2]
		"fma.rz.f64		%16,	%23,	%25,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%3,		%3,		%20;\n\t"
		"fma.rz.f64		%16,	%23,	%25,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%2,		%2,		%20;\n\t"
		"fma.rz.f64		%16,	%23,	%26,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%4,		%4,		%20;\n\t"
		"fma.rz.f64		%16,	%23,	%26,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%3,		%3,		%20;\n\t"
		"fma.rz.f64		%16,	%23,	%27,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%5,		%5,		%20;\n\t"
		"fma.rz.f64		%16,	%23,	%27,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%4,		%4,		%20;\n\t"
		"fma.rz.f64		%16,	%23,	%28,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%6,		%6,		%20;\n\t"
		"fma.rz.f64		%16,	%23,	%28,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%5,		%5,		%20;\n\t"

		//a[3]
		"fma.rz.f64		%16,	%24,	%25,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%4,		%4,		%20;\n\t"
		"fma.rz.f64		%16,	%24,	%25,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%3,		%3,		%20;\n\t"
		"fma.rz.f64		%16,	%24,	%26,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%5,		%5,		%20;\n\t"
		"fma.rz.f64		%16,	%24,	%26,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%4,		%4,		%20;\n\t"
		"fma.rz.f64		%16,	%24,	%27,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%6,		%6,		%20;\n\t"
		"fma.rz.f64		%16,	%24,	%27,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%5,		%5,		%20;\n\t"
		"fma.rz.f64		%16,	%24,	%28,	%18;\n\t"
		"sub.rz.f64		%17,	%19,	%16;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%7,		%7,		%20;\n\t"
		"fma.rz.f64		%16,	%24,	%28,	%17;\n\t"
		"mov.b64		%20,	%16;\n\t"
		"add.s64 		%6,		%6,		%20;\n\t"

		"mov.u64		%8,		%0;\n\t"
		"mov.u64		%9,		%1;\n\t"
		"mov.u64		%10,	%2;\n\t"
		"mov.u64		%11,	%3;\n\t"
		"mov.u64		%12,	%4;\n\t"
		"mov.u64		%13,	%5;\n\t"
		"mov.u64		%14,	%6;\n\t"
		"mov.u64		%15,	%7;\n\t"

		:"=l"(c[0]),"=l"(c[1]), "=l"(c[2]), "=l"(c[3]),//0
		"=l"(c[4]), "=l"(c[5]), "=l"(c[6]), "=l"(c[7]),//4
		"=l"(r[0]), "=l"(r[1]), "=l"(r[2]), "=l"(r[3]),//8
		"=l"(r[4]), "=l"(r[5]), "=l"(r[6]), "=l"(r[7]),//12
		"=d"(p_hi), "=d"(sub): "d"(t1), "d"(t2),"l"(tmp),//16
		"d"(double(a[0])), "d"(double(a[1])), "d"(double(a[2])), "d"(double(a[3])), //21
		"d"(double(b[0])), "d"(double(b[1])), "d"(double(b[2])), "d"(double(b[3]))//25
	);
}

//product scanning no shared memory
__device__ void mul_sub_routine_product_scanning(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	const uint64_t tt1 = 0x4630000000000000;
	const uint64_t tt2 = 0x4630000000000010;
	double t1 = to_double(tt1);
	double t2 = to_double(tt2);
	uint64_t c[6];
	uint64_t c_tmp1 = 0, c_tmp2 = 0;
	//uint64_t tmp = 0;
	double p_hi = 0, p_lo = 0, sub = 0, a_tmp = 0, b_tmp = 0;
	//fesetround(FE_TOWARDZERO);
	a_tmp = (double)a[0];
	//a[0]*b[0]
	b_tmp = (double)b[0]; c_tmp1 = SUM_C0;	c_tmp2 = SUM_C1;
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c[0] = c_tmp1 + to_u64(p_lo);
	c_tmp1 = c_tmp2; c_tmp2 = SUM_C2;

	//a[0]*b[1]
	b_tmp = (double)b[1];
	//p_hi = fma(a_tmp, b_tmp, to_double(t1));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	//a[1]*b[0]
	a_tmp = (double)a[1]; b_tmp = (double)b[0];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c[1] = c_tmp1 + to_u64(p_lo);
	c_tmp1 = c_tmp2; c_tmp2 = SUM_C3;

	//a[2]*b[0]
	a_tmp = (double)a[2];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	//a[1]*b[1]
	a_tmp = (double)a[1]; b_tmp = (double)b[1];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	//a[0]*b[2]
	a_tmp = (double)a[0]; b_tmp = (double)b[2];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c[2] = c_tmp1 + to_u64(p_lo);
	c_tmp1 = c_tmp2; c_tmp2 = SUM_C4;

	//a[0]*b[3]
	b_tmp = (double)b[3];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	//a[1]*b[2]
	a_tmp = (double)a[1]; b_tmp = (double)b[2];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	//a[2]*b[1]
	a_tmp = (double)a[2]; b_tmp = (double)b[1];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	//a[3]*b[0]
	a_tmp = (double)a[3]; b_tmp = (double)b[0];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c[3] = c_tmp1 + to_u64(p_lo);
	c_tmp1 = c_tmp2; c_tmp2 = SUM_C5;
	//a[3]*b[1]
	b_tmp = (double)b[1];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	a_tmp = (double)a[2]; b_tmp = (double)b[2];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	//a1*b3
	a_tmp = (double)a[1]; b_tmp = (double)b[3];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c[4] = c_tmp1 + to_u64(p_lo);
	c_tmp1 = c_tmp2; c_tmp2 = SUM_C6;
	//a2*b3
	a_tmp = (double)a[2];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	//a3*b2
	a_tmp = (double)a[3]; b_tmp = (double)b[2];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c[5] = c_tmp1 + to_u64(p_lo);
	c_tmp1 = c_tmp2; c_tmp2 = SUM_C7;
	//a3*b3
	b_tmp = (double)b[3];
	//p_hi = fma(a_tmp, b_tmp, t1);
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	//sub = t2 - p_hi;
	//p_lo = fma(a_tmp, b_tmp, sub);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));

	for (int i = 0;i < 6;i++) {
		r[i] = c[i];
	}
	r[6] = c_tmp1 + to_u64(p_lo);
	r[7] = c_tmp2;
	//fesetround(FE_TONEAREST);
}

//__device__ void mul_sub_routine_product_scanning(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
//	const uint64_t tt1 = 0x4630000000000000;
//	const uint64_t tt2 = 0x4630000000000010;
//	double t1 = to_double(tt1);
//	double t2 = to_double(tt2);
//	c[0] = SUM_C0;c[1] = SUM_C1;c[2] = SUM_C2;c[3] = SUM_C3;c[4] = SUM_C4;c[5] = SUM_C5;c[6] = SUM_C6;c[7] = SUM_C7;
//	uint64_t c_tmp1=0, c_tmp2=0;
//	//uint64_t tmp = 0;
//	double p_hi, p_lo, sub, a_tmp, b_tmp;
//	//fesetround(FE_TOWARDZERO);
//	for (int k = 0;k < 4;k++) {
//		for (int i = 0,j=k-i;i < k;i++,j--) {
//			a_tmp = (double)a[i];b_tmp = (double)b[j];
//			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
//			c_tmp2 += to_u64(p_hi);
//			//sub = t2 - p_hi;
//			//p_lo = fma(a_tmp, b_tmp, sub);
//			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
//			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));			
//			c_tmp1 += to_u64(p_lo);
//		}
//		c_tmp1 = c_tmp2;c_tmp2 = 0;
//		c[k] += c_tmp1;
//	}
//	for (int k = 4;k < 8;k++) {
//		c_tmp1 = c_tmp2;c_tmp2 = 0;
//		for (int i = k, j = k - i;i >= 0 ;i--, j++) {
//			a_tmp = (double)a[i];b_tmp = (double)b[j];
//			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
//			c_tmp2 += to_u64(p_hi);
//			//sub = t2 - p_hi;
//			//p_lo = fma(a_tmp, b_tmp, sub);
//			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
//			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
//			c_tmp1 += to_u64(p_lo);
//		}
//		c[k] += c_tmp1;
//	}
//}

//product scanning no shared memory
__device__ void sqr_sub_routine_product_scanning(argElement_x64 r, argElement_x64 a) {
	double t1 = to_double(0x4630000000000000);
	double t2 = to_double(0x4630000000000010);
	uint64_t c_tmp1 = 0, c_tmp2 = 0, c_tmp3=0;
	//uint64_t tmp = 0;
	double p_hi = 0, p_lo = 0, sub = 0, a_tmp = 0, b_tmp = 0;
	//c0
	a_tmp = (double)a[0];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[0] = SUM_C0 + to_u64(p_lo);
	//c1
	b_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 = to_u64(p_lo)<<1;
	r[1] = c_tmp1 + SUM_C1 + c_tmp3; 
	//c2
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo); 

	a_tmp = (double)a[1];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[2] = (c_tmp1 << 1) + to_u64(p_lo) + SUM_C2;
	//c3:a1b2
	c_tmp1 = c_tmp2;
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	
	a_tmp = (double)a[0];
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 += to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	r[3] = (c_tmp1 << 1) + c_tmp3 + SUM_C3;
	//c4:a1b3
	c_tmp1 = c_tmp2;
	a_tmp = (double)a[1];b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	
	a_tmp = (double)a[2];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	c_tmp3 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[4] = (c_tmp1 << 1) + to_u64(p_lo) + SUM_C4;

	//c5:a2b3
	c_tmp1 = c_tmp2;
	b_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(b_tmp), "d"(t1));
	c_tmp2 = to_u64(p_hi);
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(b_tmp), "d"(sub));
	c_tmp1 += to_u64(p_lo);
	r[5] = (c_tmp1 << 1) + c_tmp3 + SUM_C5;
	//c6:a2b4
	a_tmp = (double)a[3];
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"(a_tmp), "d"(a_tmp), "d"(t1));
	r[7] = to_u64(p_hi) + SUM_C7;
	asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
	asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"(a_tmp), "d"(a_tmp), "d"(sub));
	r[6] = (c_tmp2 << 1) + to_u64(p_lo) + SUM_C6;
}

__device__ void sqr_sub_routine2(argElement_x64 r, argElement_x64 a) {
	double t1 = to_double(0x4630000000000000);
	double t2 = to_double(0x4630000000000010);
	//uint64_t tmp = 0;
	double p_hi, sub;
	uint64_t c_tmp1, c_tmp2, c_tmp3, tmp;
	asm volatile(
		"mov.b64	%5,		0xbcd0000000000000;\n\t"
		"mov.b64	%6,		0x3370000000000000;\n\t"
		"mov.b64	%7,		0xaa10000000000000;\n\t"
		"mov.b64	%8,		0x20b0000000000000;\n\t"
		"mov.b64	%9,		0x1db0000000000000;\n\t"
		"mov.b64	%10,	0xa710000000000000;\n\t"
		"mov.b64	%11,	0x3070000000000000;\n\t"
		"mov.b64	%12,	0xb9d0000000000000;\n\t"
		/*"mov.b64	%0,		0;\n\t"
		"mov.b64	%1,		0;\n\t"
		"mov.b64	%2,		0;\n\t"*/
		//c[0]
		"fma.rz.f64		%3,		%16,	%16,	%14;\n\t"
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%2,		%3;\n\t"
		//"mov.b64 		%2,		%13;\n\t"						//c_tmp3
		"fma.rz.f64		%3,		%16,	%16,	%4;\n\t"	
		"mov.b64		%13,	%3;\n\t"
		"add.s64 		%5,		%5,		%13;\n\t"				//c[0]
		//c[1]
		"fma.rz.f64		%3,		%16,	%17,	%14;\n\t"	
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%1,		%3;\n\t"
		//"mov.b64 		%1,		%13;\n\t"						//c_tmp2
		"fma.rz.f64		%3,		%16,	%17,	%4;\n\t"
		"mov.b64		%13,	%3;\n\t"
		//"mad.lo.u64		%6,		%13,	2,		%6;\n\t"
		"shl.b64		%13,	%13,	1;\n\t"
		"add.s64 		%6,		%6,		%13;\n\t"
		"add.s64 		%6,		%6,		%2;\n\t"				//c[1]
		"mov.b64		%0,		%1;\n\t"						//c_tmp1=c_tmp2

		//c[2]
		"fma.rz.f64		%3,		%16,	%18,	%14;\n\t"
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%1,		%3;\n\t"
		//"mov.b64 		%1,		%13;\n\t"						//c_tmp2
		"fma.rz.f64		%3,		%16,	%18,	%4;\n\t"
		"mov.b64		%13,	%3;\n\t"
		"add.s64 		%0,		%0,		%13;\n\t"

		"fma.rz.f64		%3,		%17,	%17,	%14;\n\t"
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%2,		%3;\n\t"
		//"mov.b64 		%2,		%13;\n\t"						//c_tmp3
		"fma.rz.f64		%3,		%17,	%17,	%4;\n\t"
		"mov.b64		%13,	%3;\n\t"
		"shl.b64		%0,		%0,		1;\n\t"
		"add.s64 		%7,		%7,		%0;\n\t"
		"add.s64 		%7,		%7,		%13;\n\t"				//c[2]
		"mov.b64		%0,		%1;\n\t"						//c_tmp1=c_tmp2
		//c[3]
		"fma.rz.f64		%3,		%17,	%18,	%14;\n\t"
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%1,		%3;\n\t"
		//"mov.b64 		%1,		%13;\n\t"						//c_tmp2
		"fma.rz.f64		%3,		%17,	%18,	%4;\n\t"
		"mov.b64		%13,	%3;\n\t"
		"add.s64		%0,		%0,		%13;\n\t"

		"fma.rz.f64		%3,		%16,	%19,	%14;\n\t"
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%13,	%3;\n\t"
		"add.s64 		%1,		%1,		%13;\n\t"				//c_tmp2
		"fma.rz.f64		%3,		%16,	%19,	%4;\n\t"
		"mov.b64		%13,	%3;\n\t"
		"add.s64		%0,		%0,		%13;\n\t"
		"shl.b64		%0,		%0,		1;\n\t"
		"add.s64 		%8,		%8,		%0;\n\t"				//c[3]
		"add.s64 		%8,		%8,		%2;\n\t"
		"mov.b64		%0,		%1;\n\t"						//c_tmp1=c_tmp2
		//c4
		"fma.rz.f64		%3,		%17,	%19,	%14;\n\t"
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%1,		%3;\n\t"
		//"mov.b64 		%1,		%13;\n\t"						//c_tmp2
		"fma.rz.f64		%3,		%17,	%19,	%4;\n\t"
		"mov.b64		%13,	%3;\n\t"
		"add.s64		%0,		%0,		%13;\n\t"

		"fma.rz.f64		%3,		%18,	%18,	%14;\n\t"
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%2,		%3;\n\t"
		//"mov.b64 		%1,		%13;\n\t"						//c_tmp3
		"fma.rz.f64		%3,		%18,	%18,	%4;\n\t"
		"mov.b64		%13,	%3;\n\t"
		"shl.b64		%0,		%0,		1;\n\t"
		"add.s64 		%9,		%9,		%0;\n\t"
		"add.s64 		%9,		%9,		%13;\n\t"				//c[4]
		"mov.b64		%0,		%1;\n\t"
		//c5
		"fma.rz.f64		%3,		%18,	%19,	%14;\n\t"
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%1,		%3;\n\t"
		//"mov.b64 		%1,		%13;\n\t"						//c_tmp2
		"fma.rz.f64		%3,		%18,	%19,	%4;\n\t"
		"mov.b64		%13,	%3;\n\t"
		"add.s64		%0,		%0,		%13;\n\t"
		"shl.b64		%0,		%0,		1;\n\t"
		"add.s64 		%10,	%10,	%0;\n\t"				//c[3]
		"add.s64 		%10,	%10,	%2;\n\t"

		"fma.rz.f64		%3,		%19,	%19,	%14;\n\t"
		"sub.rz.f64		%4,		%15,	%3;\n\t"
		"mov.b64		%2,		%3;\n\t"
		"add.s64		%12,	%12,	%2;\n\t"
		"fma.rz.f64		%3,		%19,	%19,	%4;\n\t"
		"mov.b64		%13,	%3;\n\t"
		"add.s64		%0,		%0,		%13;\n\t"
		"shl.b64		%1,		%1,		1;\n\t"
		"add.s64 		%11,	%11,	%1;\n\t"				//c[3]
		"add.s64 		%11,	%11,	%13;"

		:"=l"(c_tmp1),"=l"(c_tmp2), "=l"(c_tmp3),"=d"(p_hi), "=d"(sub),//0
		"=l"(r[0]), "=l"(r[1]), "=l"(r[2]), "=l"(r[3]),"=l"(r[4]), //5
		"=l"(r[5]), "=l"(r[6]), "=l"(r[7]),"=l"(tmp):"d"(t1),//10
		"d"(t2),"d"(double(a[0])), "d"(double(a[1])), "d"(double(a[2])), "d"(double(a[3]))//15
	);
}
#undef SUM_C0 
#undef SUM_C1 
#undef SUM_C2 
#undef SUM_C3 
#undef SUM_C4 
#undef SUM_C5 
#undef SUM_C6 
#undef SUM_C7

__device__ void kara_mul_origin(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	uint64_t r_sum[8];
	for (int i = 0; i < 4; i++) {
		r_sum[i] = a[i] + a[4 + i];//<2^49
		r_sum[4 + i] = b[i] + b[4 + i];//<2^49
	}
	mul_sub_routine_origin(c, a, b);//<(2^48)*7<2^51
	mul_sub_routine_origin(c + 8, a + 4, b + 4);//<(2^48)*7<2^51
	mul_sub_routine_origin(r_sum, r_sum, r_sum + 4);//<(2^48)*3+2^50*4=19*2^48<2^53
	//mul_sub_routine2(c, a, b);//<(2^48)*7<2^51
	//mul_sub_routine2(c + 8, a + 4, b + 4);//<(2^48)*7<2^51
	//mul_sub_routine2(r_sum, r_sum, r_sum + 4);//<(2^48)*3+2^50*4=19*2^48<2^53
	for (int i = 0; i < 7; i++) {
		r_sum[i] += c[4 + i];
		r_sum[i] |= ((uint64_t)1 << 54);
		r_sum[i + 1] -= (1 << 6);
	}
	r_sum[7] += c[11];
	r_sum[7] |= ((uint64_t)1 << 54);
	for (int i = 0; i < 8; i++) {
		r_sum[i] -= c[i];
	}
	for (int i = 0; i < 8; i++) {
		c[4 + i] = r_sum[i] - c[8 + i];
	}
	c[12] -= (1 << 6);
}

//incorrect
__device__ void kara_mul_with_simplify(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	uint64_t a_sum[4],b_sum[4],r_sum[8];
	for (int i = 0; i < 4; i++) {
		a_sum[i] = a[i] + a[4 + i];//<2^49
		b_sum[i] = b[i] + b[4 + i];//<2^49
		a_sum[i + 1] = (a_sum[i] >> 48);
		b_sum[i+1] = (b_sum[i] >> 48);
		a_sum[i] &= 0xFFFFFFFFFFFF;
		b_sum[i] &= 0xFFFFFFFFFFFF;
	}
	a_sum[3] = a_sum[3] + a[3] + a[7];
	b_sum[3] = b_sum[7] + b[3] + b[7];
	uint64_t carry_a = -(int64_t)(r_sum[3] >> 48);
	uint64_t carry_b = -(int64_t)(b_sum[3] >> 48);
	a_sum[3] &= 0xFFFFFFFFFFFF;
	b_sum[3] &= 0xFFFFFFFFFFFF;

	mul_sub_routine_origin(c, a, b);//<(2^48)*7<2^51
	mul_sub_routine_origin(c + 8, a + 4, b + 4);//<(2^48)*7<2^51
	mul_sub_routine_origin(r_sum, a_sum, b_sum);//<(2^48)*3+2^50*4=19*2^48<2^53
	for (int i = 0; i < 5; i++) {
		r_sum[i + 4] = r_sum[i + 4] + (carry_a & b_sum[i]) + (carry_b & a_sum[i]);//<2^58
	}
	
	for (int i = 0; i < 7; i++) {
		r_sum[i] += c[4 + i];
		r_sum[i] |= ((uint64_t)1 << 53);
		r_sum[i + 1] -= (1 << 5);
	}
	r_sum[7] += c[11];
	r_sum[7] |= ((uint64_t)1 << 53);
	
	for (int i = 0; i < 8; i++) {
		r_sum[i] -= c[i];
	}
	for (int i = 0; i < 8; i++) {
		c[4 + i] = r_sum[i] - c[8 + i];
	}
	c[12] += (-(int64_t)carry_a) & (-(int64_t)carry_b);//add to c[12]
}


__device__ void kara_mul_product_scanning(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	uint64_t a_sum[4], b_sum[4], r_sum[8];
	a_sum[0] = 0;
	b_sum[0] = 0;
	for (int i = 0; i < 4; i++) {
		a_sum[i] = a[i] + a[4 + i];//<2^49
		b_sum[i] = b[i] + b[4 + i];//<2^49
	}
	mul_sub_routine_product_scanning(c, a, b);//<(2^48)*7<2^51
	mul_sub_routine_product_scanning(c + 8, a + 4, b + 4);//<(2^48)*7<2^51
	mul_sub_routine_product_scanning(r_sum, a_sum, b_sum);//<(2^48)*3+2^50*4=19*2^48<2^53
	for (int i = 0; i < 7; i++) {
		r_sum[i] += c[4 + i];
		r_sum[i] |= ((uint64_t)1 << 53);
		r_sum[i + 1] -= (1 << 5);
	}
	r_sum[7] += c[11];
	r_sum[7] |= ((uint64_t)1 << 53);
	
	for (int i = 0; i < 8; i++) {
		r_sum[i] -= c[i];
	}
	for (int i = 0; i < 8; i++) {
		c[4 + i] = r_sum[i] - c[8 + i];
	}
	c[12] -= (1 << 5);
}

__device__ void kara_sqr_product_scanning(argElement_x64 c, argElement_x64 a) {
	uint64_t a_sum[4], r_sum[8];
	for (int i = 0; i < 4; i++) {
		a_sum[i] = a[i] + a[4 + i];//<2^49
	}
	//sqr_sub_routine_product_scanning(c, a);//<(2^48)*7<2^51
	//sqr_sub_routine_product_scanning(c + 8, a + 4);//<(2^48)*7<2^51
	//sqr_sub_routine_product_scanning(r_sum, a_sum);//<(2^48)*3+2^50*4=19*2^48<2^53
	sqr_sub_routine2(c, a);
	sqr_sub_routine2(c + 8, a + 4);
	sqr_sub_routine2(r_sum, a_sum);
	for (int i = 0; i < 7; i++) {
		r_sum[i] += c[4 + i];
		r_sum[i] |= ((uint64_t)1 << 53);
		r_sum[i + 1] -= (1 << 5);
	}
	r_sum[7] += c[11];
	r_sum[7] |= ((uint64_t)1 << 53);
	for (int i = 0; i < 8; i++) {
		r_sum[i] -= c[i];
	}
	for (int i = 0; i < 8; i++) {
		c[4 + i] = r_sum[i] - c[8 + i];
	}
	c[12] -= (1 << 5);
}

__device__ void kara_sqr_with_simplify(argElement_x64 c, argElement_x64 a) {
	uint64_t a_sum[4], r_sum[8];
	a_sum[0] = 0;
	for (int i = 0; i < 4; i++) {
		a_sum[i] = a[i] + a[4 + i];//<2^49
		a_sum[i + 1] = (a_sum[i] >> 48);
		a_sum[i] &= 0xFFFFFFFFFFFF;
	}
	a_sum[3] = a_sum[3] + a[3] + a[7];
	uint64_t carry_a = -(int64_t)(r_sum[3] >> 48);
	a_sum[3] &= 0xFFFFFFFFFFFF;
	sqr_sub_routine_product_scanning(c, a);//<(2^48)*7<2^51
	sqr_sub_routine_product_scanning(c + 8, a + 4);//<(2^48)*7<2^51
	sqr_sub_routine_product_scanning(r_sum, a_sum);//<(2^48)*3+2^50*4=19*2^48<2^53
	for (int i = 0; i < 5; i++) {
		r_sum[i + 5] = r_sum[i + 5] + (carry_a & a_sum[i]) << 1;//<2^58
	}
	c[12] += (-(int64_t)carry_a);//add to c[12]
	for (int i = 0; i < 7; i++) {
		r_sum[i] += c[4 + i];
		r_sum[i] |= ((uint64_t)1 << 53);
		r_sum[i + 1] -= (1 << 5);
	}
	r_sum[7] += c[11];
	r_sum[7] |= ((uint64_t)1 << 53);
	c[12] -= (1 << 5);
	for (int i = 0; i < 8; i++) {
		r_sum[i] -= c[i];
	}
	for (int i = 0; i < 8; i++) {
		c[4 + i] = r_sum[i] - c[8 + i];
	}
}


//reduction for Multiplication and squaring
__device__ void reduction(argElement_x64 c, argElement_x64 a) {
	for (int i = 0; i < 8; i++) {
		c[i] = a[i] + 374 * a[8 + i];
	}
}

__device__ void mul_1(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	EltBN383_x64_fp_buffer tmp = { 0 };
	mul_fma_8(tmp, a, b);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}
__device__ void mul_2(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	EltBN383_x64_fp_buffer tmp = { 0 };
	kara_mul_origin(tmp, a, b);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}

__device__ void mul_3(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	EltBN383_x64_fp_buffer tmp = { 0 };
	kara_mul_product_scanning(tmp, a, b);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}

__device__ void sqr_1(argElement_x64 c, argElement_x64 a) {
	EltBN383_x64_fp_buffer tmp = { 0 };
	mul_fma_8(tmp, a, a);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}
__device__ void sqr_2(argElement_x64 c, argElement_x64 a) {
	EltBN383_x64_fp_buffer tmp = { 0 };
	kara_mul_origin(tmp, a, a);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}

__device__ void sqr_3(argElement_x64 c, argElement_x64 a) {
	EltBN383_x64_fp_buffer tmp = { 0 };
	kara_mul_product_scanning(tmp, a, a);
	reduction(tmp, tmp);
	simplification_unique(c, tmp);
}

__host__ __device__ void copy(argElement_x64 c, argElement_x64 a) {
	c[0] = a[0];
	c[1] = a[1];
	c[2] = a[2];
	c[3] = a[3];
	c[4] = a[4];
	c[5] = a[5];
	c[6] = a[6];
	c[7] = a[7];
}

//test speed
__device__ void inv_1(argElement_x64 c, argElement_x64 x) {
	EltBN383_x64_fp x127, w, t, z;
	sqr_1(x127, x);//x127=a^2
	mul_1(t, x, x127);//t=a^3
	sqr_1(x127, t);//t3=a^6
	mul_1(w, x127, x);//w=a^7
	sqr_1(x127, w);//a^14
	sqr_1(t, x127);//28
	sqr_1(x127, t);//x127=x^56
	copy(t, x127);
	mul_1(x127, w, t);//x^63
	sqr_1(t, x127);//x^126
	mul_1(x127, t, x);//x^127

	sqr_1(t, x127);
	mul_1(z, t, x);//z=x^255
	copy(w, z);
	for (int i = 0; i < 4; i++) {
		sqr_1(t, z);
		sqr_1(z, t);
	}
	mul_1(t, z, w);//t=z16

	copy(w, t);
	for (int i = 0; i < 8; i++)
	{
		sqr_1(z, t);
		sqr_1(t, z);
	}
	mul_1(z, t, w);        // z=z32      

	copy(w, z);
	for (int i = 0; i < 16; i++)
	{
		sqr_1(t, z);
		sqr_1(z, t);
	}
	mul_1(t, z, w);        // t=z64      

	copy(w, t);
	for (int i = 0; i < 32; i++)
	{
		sqr_1(z, t);
		sqr_1(t, z);
	}
	mul_1(z, t, w);        // z=z128     

	copy(w, z);
	for (int i = 0; i < 64; i++)
	{
		sqr_1(t, z);
		sqr_1(z, t);
	}
	mul_1(t, z, w);        // z=z256      
	mul_1(z, t, x127);
	sqr_1(t, z);
	sqr_1(z, t);
	mul_1(t, z, x);
	copy(c, t);
}

__device__ void inv_2(argElement_x64 c, argElement_x64 x) {
	EltBN383_x64_fp x127, w, t, z;
	sqr_2(x127, x);//x127=a^2
	mul_2(t, x, x127);//t=a^3
	sqr_2(x127, t);//t3=a^6
	mul_2(w, x127, x);//w=a^7
	sqr_2(x127, w);//a^14
	sqr_2(t, x127);//28
	sqr_2(x127, t);//x127=x^56
	copy(t, x127);
	mul_2(x127, w, t);//x^63
	sqr_2(t, x127);//x^126
	mul_2(x127, t, x);//x^127

	sqr_2(t, x127);
	mul_2(z, t, x);//z=x^255
	copy(w, z);
	for (int i = 0; i < 4; i++) {
		sqr_2(t, z);
		sqr_2(z, t);
	}
	mul_2(t, z, w);//t=z16

	copy(w, t);
	for (int i = 0; i < 8; i++)
	{
		sqr_2(z, t);
		sqr_2(t, z);
	}
	mul_2(z, t, w);        // z=z32      

	copy(w, z);
	for (int i = 0; i < 16; i++)
	{
		sqr_2(t, z);
		sqr_2(z, t);
	}
	mul_2(t, z, w);        // t=z64      

	copy(w, t);
	for (int i = 0; i < 32; i++)
	{
		sqr_2(z, t);
		sqr_2(t, z);
	}
	mul_2(z, t, w);        // z=z128     

	copy(w, z);
	for (int i = 0; i < 64; i++)
	{
		sqr_2(t, z);
		sqr_2(z, t);
	}
	mul_2(t, z, w);        // z=z256      
	mul_2(z, t, x127);
	sqr_2(t, z);
	sqr_2(z, t);
	mul_2(t, z, x);
	copy(c, t);
}

__device__ void inv_3(argElement_x64 c, argElement_x64 x) {
	EltBN383_x64_fp x127, w, t, z;
	sqr_3(x127, x);//x127=a^2
	mul_3(t, x, x127);//t=a^3
	sqr_3(x127, t);//t3=a^6
	mul_3(w, x127, x);//w=a^7
	sqr_3(x127, w);//a^14
	sqr_3(t, x127);//28
	sqr_3(x127, t);//x127=x^56
	copy(t, x127);
	mul_3(x127, w, t);//x^63
	sqr_3(t, x127);//x^126
	mul_3(x127, t, x);//x^127

	sqr_3(t, x127);
	mul_3(z, t, x);//z=x^255
	copy(w, z);
	for (int i = 0; i < 4; i++) {
		sqr_3(t, z);
		sqr_3(z, t);
	}
	mul_3(t, z, w);//t=z16

	copy(w, t);
	for (int i = 0; i < 8; i++)
	{
		sqr_3(z, t);
		sqr_3(t, z);
	}
	mul_3(z, t, w);        // z=z32      

	copy(w, z);
	for (int i = 0; i < 16; i++)
	{
		sqr_3(t, z);
		sqr_3(z, t);
	}
	mul_3(t, z, w);        // t=z64      

	copy(w, t);
	for (int i = 0; i < 32; i++)
	{
		sqr_3(z, t);
		sqr_3(t, z);
	}
	mul_3(z, t, w);        // z=z128     

	copy(w, z);
	for (int i = 0; i < 64; i++)
	{
		sqr_3(t, z);
		sqr_3(z, t);
	}
	mul_3(t, z, w);        // z=z256      
	mul_3(z, t, x127);
	sqr_3(t, z);
	sqr_3(z, t);
	mul_3(t, z, x);
	copy(c, t);
}

__global__ void mul_fma_8Kernel(argElement_x64 r, argElement_x64 a, argElement_x64 b) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int tid = t * 8;
	int tidC = t * 16;
	EltBN383_x64_fp_buffer c = { 0 };

	const uint64_t ut1 = 0x4630000000000000;
	const uint64_t ut2 = 0x4630000000000010;
	const double t1 = to_double(ut1);
	const double t2 = to_double(ut2);

	int w2 = 15;
	for (int i = 0; i < 8; i++) {
		c[i] = (uint64_t)0x463 * i + (uint64_t)0x433 * (i + 1);
		c[i] = -(int64_t)(c[i] & 0xFFF) << 52;
		c[w2 - i] = (uint64_t)0x463 * (i + 1) + (uint64_t)0x433 * i;
		c[w2 - i] = -(int64_t)(c[w2 - i] & 0xFFF) << 52;
	}
	//fesetround(FE_TOWARDZERO);
	double p_hi = 0, p_lo = 0, sub = 0;

	for (int i = 0; i < 8; i++) {
		for (int j = 0; j < 8; j++) {
			
			//p_hi = fma((double)a[i], (double)b[j], t1);
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_hi) : "d"((double)a[i + tid]), "d"((double)b[j + tid]), "d"(t1));
			c[i + j + 1] += to_u64(p_hi);
			//sub = t2 - p_hi;
			asm("sub.rz.f64 %0, %1, %2; " : "=d"(sub) : "d"(t2), "d"(p_hi));
			//p_lo = fma((double)a[i], (double)b[j], sub);
			asm("fma.rz.f64 %0, %1, %2, %3; " : "=d"(p_lo) : "d"((double)a[i + tid]), "d"((double)b[j + tid]), "d"(sub));

			c[i + j] += to_u64(p_lo);
		}
	}
	for (int i = 0; i < 16; i++) {
		r[i + tidC] = c[i];
	}
	//fesetround(FE_TONEAREST);
}

__global__ void mul_fma_reduction(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	//int tidC = t * (NUM_DIGITS_383_x64_fp * 2);

	EltBN383_x64_fp_buffer tmp = { 0 };
	EltBN383_x64_fp ta,tb,tc;
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		mul_fma_8(tmp, ta, tb);
		reduction(tmp, tmp);
		simplification_unique(tc, tmp);
	}
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		c[i+tid]=tc[i];
	}
	
}


__global__ void mul_1_kernel(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	//int tidC = t * (NUM_DIGITS_383_x64_fp * 2);
	EltBN383_x64_fp ta,tb,tc;
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		mul_1(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		c[i+tid]=tc[i];
	}
	
}

__global__ void kara_mul_origin_reduction(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	//int tidC = t * (NUM_DIGITS_383_x64_fp * 2);

	EltBN383_x64_fp_buffer tmp = { 0 };
	EltBN383_x64_fp ta,tb,tc;
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		kara_mul_origin(tmp, ta, tb);
		reduction(tmp, tmp);
		simplification_unique(tc, tmp);
	}
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		c[i+tid]=tc[i];
	}	
}

__global__ void kara_mul_with_simplify_reduction(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	//int tidC = t * (NUM_DIGITS_383_x64_fp * 2);

	EltBN383_x64_fp_buffer tmp = { 0 };
	EltBN383_x64_fp ta,tb,tc;
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		kara_mul_with_simplify(tmp, ta, tb);
		reduction(tmp, tmp);
		simplification_unique(tc, tmp);
	}
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		c[i+tid]=tc[i];
	}	
	// for (int i = 0;i < Iterate;i++) {
	// 	kara_mul_with_simplify(tmp, a + tid, b + tid);
	// 	reduction(tmp, tmp);
	// 	simplification_unique(c + tid, tmp);
	// }
	
}

__global__ void kara_sqr_reduction(argElement_x64 c, argElement_x64 a) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	//int tidC = t * (NUM_DIGITS_383_x64_fp * 2);

	EltBN383_x64_fp_buffer tmp = { 0 };
	EltBN383_x64_fp ta,tc;
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		ta[i]=a[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		kara_sqr_product_scanning(tmp, ta);
		reduction(tmp, tmp);
		simplification_unique(tc, tmp);
	}
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		c[i+tid]=tc[i];
	}	

	// EltBN383_x64_fp_buffer tmp = { 0 };
	// for (int i = 0;i < Iterate;i++) {
	// 	kara_sqr_product_scanning(tmp, a + tid);
	// 	reduction(tmp, tmp);
	// 	simplification_unique(c + tid, tmp);
	// }
	
}
__global__ void kara_sqr_with_simplify_reduction(argElement_x64 c, argElement_x64 a) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	//int tidC = t * (NUM_DIGITS_383_x64_fp * 2);

	EltBN383_x64_fp_buffer tmp = { 0 };
	EltBN383_x64_fp ta,tc;
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		ta[i]=a[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		kara_sqr_with_simplify(tmp, ta);
		reduction(tmp, tmp);
		simplification_unique(tc, tmp);
	}
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		c[i+tid]=tc[i];
	}	

	// EltBN383_x64_fp_buffer tmp = { 0 };
	// for (int i = 0;i < Iterate;i++) {
	// 	kara_sqr_with_simplify(tmp, a + tid);
	// 	reduction(tmp, tmp);
	// 	simplification_unique(c + tid, tmp);
	// }
	
}


__global__ void inv_1_kernel(argElement_x64 c, argElement_x64 a) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	//int tidC = t * (NUM_DIGITS_383_x64_fp * 2);
	inv_1(c + tid, a + tid);

	
}

__global__ void inv_2_kernel(argElement_x64 c, argElement_x64 a) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	inv_2(c + tid, a + tid);
}

__global__ void inv_3_kernel(argElement_x64 c, argElement_x64 a) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	inv_3(c + tid, a + tid);
}
__global__ void intsqr_kernel(argElement_x32 c, argElement_x32 a) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_383_x32;
	uint32_t tc[NUM_DIGITS_383_x32],ta[NUM_DIGITS_383_x32];
	for(int i=0;i<NUM_DIGITS_383_x32;i++){
		ta[i]=a[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		intsqr(tc, ta);
	}
	for(int i=0;i<NUM_DIGITS_383_x32;i++){
		c[i+tid]=tc[i];
	}
}



//input for threadIdx.x%2=0:A||B_lo;for threadIdx.x%2=1: B||A_hi
//#define CUDA_VER 10
//#if CUDA_VER>=9
//__device__ void kara_mul_origin_reduction_2threads(argElement_x64 c, argElement_x64 in) {
//	int t = blockIdx.x * blockDim.x * blockDim.y + (threadIdx.x + threadIdx.y * blockDim.x);
//	int laneId = threadIdx.x & 1;//0 or 1
//	const int half_n = 8 / 2;
//	uint64_t sum[8], c_tmp[8], sum1[half_n], c_tmp1[half_n];
//	for (int i = 0; i < half_n; i++) {
//		sum[i] = in[i+t*12] + in[i + t * 12 + half_n];
//	}
//	mul_sub_routine_origin(c_tmp, in + t * 12 + laneId * half_n, in + t * 12 + 8);
//	for (int i = 0; i < 4; i++) {
//		sum1[i] = __shfl_xor_sync(-1, sum[i], 1);
//		c_tmp1[i] = __shfl_xor_sync(-1, c_tmp[i + (1 - laneId) * 4], 1);
//	}
//	mul_sub_routine_origin(sum, sum, sum1);
//	for (int i = 0; i < 3; i++) {
//		sum[i + laneId * 4] += c_tmp[i + (1 - laneId) * 4];
//		sum[i + laneId * 4] |= ((uint64_t)1 << 54);
//		sum[i + 1 + laneId * 4] -= (1 << 6);
//	}
//	sum[3 + laneId * 4] += c_tmp[3 + (1 - laneId) * 4];
//	sum[3 + laneId * 4] |= ((uint64_t)1 << 54);
//	if (laneId == 1) {
//		sum[4] -= (1 << 6);
//		c_tmp[5] -= (1 << 6);
//	}
//	for (int i = 0; i < 4; i++) {
//		c_tmp[i + (1 - laneId) * 4] = sum[i + laneId * 4] - c_tmp[i + laneId * 4] - c_tmp1[i + laneId * 4];
//	}
//	for (int i = 0;i < 4;i++) {
//		c_tmp1[i] = __shfl_xor_sync(-1, c_tmp[i + (1 - laneId) * 4], 1);//laneId=0: c_hi[0:3]; laneId=1: c_lo[4:7]
//	}
//	int m1 = (laneId == 0) ? 1 : 374;//laneId=0:1
//	int m2 = (laneId == 1) ? 1 : 374;//laneId=0:374
//	for (int i = 0; i < 4; i++) {
//		c[i+t*4] = c_tmp[i + laneId * 4] * m1 + c_tmp1[i] * m2;
//	}
//}
//
//__global__ void kara_mul_2threadsKernel(argElement_x64 c, argElement_x64 in) {
//	int bid = blockIdx.x * blockDim.x * blockDim.y;
//	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
//	kara_mul_origin_reduction_2threads(c + 4 * t, in + 12 * t);
//}
//
////in=A||B=AL||AH||BL|BH
//__global__ void kara_mul_2threadsKernel1(argElement_x64 c, argElement_x64 in) {
//	int tid = blockIdx.x * blockDim.x * blockDim.y;
//	int t = tid + (threadIdx.x + threadIdx.y * blockDim.x);//global index to access input and output
//	tid = threadIdx.x + threadIdx.y * blockDim.x;//index inside a block
//	int laneId = threadIdx.x & 1;//0 or 1
//	int half_n = 4;
//	int sid = (tid / 2) * 8;//laneId=0:0-8;laneId=1:0-8;
//
//	extern __shared__ uint64_t ssum[4 * BLOCK_SIZE];//each block and each thread contains 4 digits
//	extern __shared__ uint64_t sc_tmp[8 * BLOCK_SIZE];//each block and each thread contains 8 digits
//	int index1,index2;
//	index1 = t * 8;index2 = half_n * tid;
//	for (int i = 0; i < half_n; i++) {
//		ssum[i + index2] = in[i + index1] + in[i + index1 + half_n];//ssum[0:3]=AH+AL;ssum[4:7]=BH+BL
//	}
//	if(laneId==0)
//		mul_sub_routine_origin(ssum + sid, ssum + sid, ssum + sid + half_n);//ssum=(AH+AL)(BH+BL)
//
//	sid = (tid / 2) * 16 + laneId * half_n;
//	mul_sub_routine_origin(sc_tmp + tid * 8, in + sid, in + sid + 8);//sc_tmp[0:7]=AL*BL;sc_tmp[8:15]=AH*BL;
//	
//	index1 = tid * 8 + (1 - laneId) * half_n;
//	for (int i = 0; i < 3; i++) {
//		ssum[i + index2] += sc_tmp[i + index1];
//		ssum[i + index2] |= ((uint64_t)1 << 54);
//		ssum[i + 1 + index2] -= (1 << 6);
//	}
//	ssum[3 + index2] += sc_tmp[3 + index1];
//	ssum[3 + index2] |= ((uint64_t)1 << 54);
//	if (laneId == 1) {
//		ssum[index2] -= (1 << 6);
//		sc_tmp[5+8*tid] -= (1 << 6);
//	}
//	index2 = 8 * (tid - laneId) + laneId * half_n;
//	for (int i = 0; i < 4; i++) {
//		sc_tmp[i + index1] = ssum[i + half_n * tid] - sc_tmp[i + index2] - sc_tmp[i + index2 + 8];
//	}
//	index2 = half_n * tid;
//	for (int i = 0; i < 8; i++) {
//		c[i + t * 4] = sc_tmp[i + index2] + sc_tmp[i + index2 + 8] * 374;
//	}
//
//	//kara_mul_origin_reduction_2threads1(c + 4 * t, in + 12 * t);
//}


__global__ void kara_mul_product_scanning_reduction(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * 8;
	//int tidC = t * (NUM_DIGITS_383_x64_fp * 2);

	EltBN383_x64_fp_buffer tmp = { 0 };
	EltBN383_x64_fp ta,tb,tc;
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		kara_mul_product_scanning(tmp, ta, tb);
		reduction(tmp, tmp);
		simplification_unique(tc, tmp);
	}
	for(int i=0;i<NUM_DIGITS_383_x64_fp;i++){
		c[i+tid]=tc[i];
	}	

	// EltBN383_x64_fp_buffer tmp = { 0 };
	// kara_mul_product_scanning(tmp, a + tid, b + tid);
	// reduction(tmp, tmp);
	// simplification_unique(c + tid, tmp);
}


//modular multiplication with karatsuba multiplication
__global__ void intmul_kernel(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_383_x32;
	//mul_fma_5_with_reduction(c + tid, a + tid, b + tid);
	//EltBN221_x64_fp_buffer tmp = { 0 };
	uint32_t tc[NUM_DIGITS_383_x32],ta[NUM_DIGITS_383_x32],tb[NUM_DIGITS_383_x32];
	for(int i=0;i<NUM_DIGITS_383_x32;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		intmul(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_383_x32;i++){
		c[i+tid]=tc[i];
	}
	//reduction(tmp, tmp);
	//simplification_unique(c + tid, tmp);
}
__global__ void intmul2_kernel(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	//int t = threadIdx.x + threadIdx.y * blockDim.x;
	int bid = blockIdx.x * blockDim.x * blockDim.y;
	int t = bid + (threadIdx.x + threadIdx.y * blockDim.x);
	int tid = t * NUM_DIGITS_383_x32;
	//mul_fma_5_with_reduction(c + tid, a + tid, b + tid);
	//EltBN221_x64_fp_buffer tmp = { 0 };
	uint32_t tc[NUM_DIGITS_383_x32],ta[NUM_DIGITS_383_x32],tb[NUM_DIGITS_383_x32];
	for(int i=0;i<NUM_DIGITS_383_x32;i++){
		ta[i]=a[i+tid];
		tb[i]=b[i+tid];
	}
	for (int i = 0;i < Iterate;i++) {
		intmul2(tc, ta, tb);
	}
	for(int i=0;i<NUM_DIGITS_383_x32;i++){
		c[i+tid]=tc[i];
	} 
	//reduction(tmp, tmp);
	//simplification_unique(c + tid, tmp);
}

extern "C"
void intmulAPI(argElement_x32 c, argElement_x32 a, argElement_x32 b) {
	argElement_x32 dev_a = NULL;
	argElement_x32 dev_b = NULL;
	argElement_x32 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_383_x32 * sizeof(argElement_x32) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_383_x32 * sizeof(uint32_t);
	//int tmpM = (NUM_DIGITS_383_x32 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_383_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, NUM_DIGITS_383_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	intmul_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	intmul2_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}


extern "C"
void intsqrAPI(argElement_x32 c, argElement_x32 a) {
	argElement_x32 dev_a = NULL;
	argElement_x32 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);
	//int tmpN = NUM_DIGITS_383_x32 * sizeof(argElement_x32) * numBlocks * BLOCK_SIZE;
	int tmpN = NUM_DIGITS_383_x32 * sizeof(uint32_t);
	//int tmpM = (NUM_DIGITS_383_x32 * 2 + 1) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, NUM_DIGITS_383_x32 * sizeof(uint32_t) * numThreads, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	intsqr_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_10 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);

}

//extern "C"
//void kara_mul_2threadsAPI(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
//	argElement_x64 in = new uint64_t[24*N];
//	argElement_x64 dev_in = NULL;
//	argElement_x64 dev_c = NULL;
//
//	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
//	int numBlocks = (2 * N + BLOCK_SIZE - 1) / BLOCK_SIZE;
//	int numThreads = numBlocks * BLOCK_SIZE;
//
//	int deviceNum = 0;
//	float delta_time1 = 0.0f;
//	cudaEvent_t kernel_start1, kernel_stop1;
//	struct cudaDeviceProp device_prop;
//	char device_prefix[261];
//
//	for (int j = 0;j < N;j++) {
//		for (int i = 0;i < 8;i++) {
//			in[i + j * 24] = a[i + 8 * j];
//		}
//		for (int i = 0;i < 4;i++) {
//			in[8 + i + j * 24] = b[i + 8 * j];
//		}
//		for (int i = 0;i < 8;i++) {
//			in[12 + i + j * 24] = b[i + 8 * j];
//		}
//		for (int i = 0;i < 4;i++) {
//			in[20 + i + j * 24] = a[i + 4 + 8 * j];
//		}
//	}
//
//	cudaSetDevice(deviceNum);
//	cudaMalloc((void**)& dev_c, 4 * sizeof(uint64_t) * numThreads);
//	cudaMalloc((void**)& dev_in, 12 * sizeof(uint64_t) * numThreads);
//
//	cudaMemset(dev_c, 0, 4 * sizeof(uint64_t) * numThreads);
//	cudaMemset(dev_in, 0, 12 * sizeof(uint64_t) * numThreads);
//
//	cudaMemcpy(dev_in, in, 24 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);
//
//	cudaEventCreate(&kernel_start1);
//	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
//	cudaGetDeviceProperties(&device_prop, deviceNum);
//	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
//	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
//	cudaEventRecord(kernel_start1, 0);
//
//	kara_mul_2threadsKernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_in);
//	cudaEventRecord(kernel_stop1, 0);
//	cudaEventSynchronize(kernel_stop1);
//	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
//	//printf("%s:kara_mul_2threadsKernel time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);
//
//	cudaMemcpy(c, dev_c, 8* sizeof(uint64_t) * N, cudaMemcpyDeviceToHost);
//	delete in;
//	cudaFree(dev_c);
//	cudaFree(dev_in);
//}

//extern "C"
//void kara_mul_2threads1API(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
//	argElement_x64 in = new uint64_t[24 * N];
//	argElement_x64 dev_in = NULL;
//	argElement_x64 dev_c = NULL;
//
//	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
//	int numBlocks = (2 * N + BLOCK_SIZE - 1) / BLOCK_SIZE;
//	int numThreads = numBlocks * BLOCK_SIZE;
//
//	int deviceNum = 0;
//	float delta_time1 = 0.0f;
//	cudaEvent_t kernel_start1, kernel_stop1;
//	struct cudaDeviceProp device_prop;
//	char device_prefix[261];
//
//	cudaSetDevice(deviceNum);
//	cudaMalloc((void**)& dev_c, 4 * sizeof(uint64_t) * numThreads);
//	cudaMalloc((void**)& dev_in, 8 * sizeof(uint64_t) * numThreads);
//
//	cudaMemset(dev_c, 0, 4 * sizeof(uint64_t) * numThreads);
//	cudaMemset(dev_in, 0, 12 * sizeof(uint64_t) * numThreads);
//
//	for (int j = 0;j < N;j++) {
//		for (int i = 0;i < 8;i++) {
//			in[i + j*16] = a[i + 8 * j];
//		}
//		for (int i = 0;i < 8;i++) {
//			in[8 + i + j * 16] = b[i + 8 * j];
//		}
//	}
//	cudaMemcpy(dev_in, in, 16 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);
//
//	cudaEventCreate(&kernel_start1);
//	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
//	cudaGetDeviceProperties(&device_prop, deviceNum);
//	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
//	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
//	cudaEventRecord(kernel_start1, 0);
//
//	kara_mul_2threadsKernel1 << <numBlocks, threadsPerBlock >> > (dev_c, dev_in);
//	cudaEventRecord(kernel_stop1, 0);
//	cudaEventSynchronize(kernel_stop1);
//	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
//	//printf("%s:kara_mul_2threadsKernel time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);
//
//	cudaMemcpy(c, dev_c, 8 * sizeof(uint64_t) * N, cudaMemcpyDeviceToHost);
//	delete in;
//	cudaFree(dev_c);
//	cudaFree(dev_in);
//}

//#endif // 

extern "C"
void mul_fmaAPI(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);

	int tmpN = 8 * sizeof(uint64_t);
	int tmpM = 16 * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpM * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpM * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	mul_fma_8Kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_8 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpM * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}


extern "C"
void inv_API(argElement_x64 c, argElement_x64 a) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);

	int tmpN = 8 * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	inv_1_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	inv_2_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	inv_3_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_8 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);

}


extern "C"
void mul_fma_reductionAPI(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;

	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);

	int tmpN = 8 * sizeof(uint64_t);
	//int tmpM = (NUM_DIGITS_383_x64_fp * 2) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);

	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	mul_fma_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	//mul_1_kernel << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:mul_fma_8 time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}

extern "C"
void kara_mul_origin_reductionAPI(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;
	//int numBlocks = 1;
	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);

	int tmpN = 8 * sizeof(uint64_t);
	//int tmpM = (NUM_DIGITS_383_x64_fp * 2) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);


	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	kara_mul_origin_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	kara_mul_with_simplify_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:kara_mul time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}

extern "C"
void kara_sqr_origin_reductionAPI(argElement_x64 c, argElement_x64 a) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;
	//int numBlocks = 1;
	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);

	int tmpN = 8 * sizeof(uint64_t);
	//int tmpM = (NUM_DIGITS_383_x64_fp * 2) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);


	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	kara_sqr_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	//kara_sqr_with_simplify_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:kara_mul time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
}

extern "C"
void kara_mul_product_scanning_reductionAPI(argElement_x64 c, argElement_x64 a, argElement_x64 b) {
	argElement_x64 dev_a = NULL;
	argElement_x64 dev_b = NULL;
	argElement_x64 dev_c = NULL;

	dim3 threadsPerBlock(BLOCK_WIDTH, BLOCK_HEIGHT);//256 thread
	int numBlocks = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
	int numThreads = numBlocks * BLOCK_SIZE;
	//int numBlocks = 1;
	int deviceNum = 0;
	float delta_time1 = 0.0f;
	cudaEvent_t kernel_start1, kernel_stop1;
	struct cudaDeviceProp device_prop;
	char device_prefix[261];

	//Ñ¡Ôñ³ÌÐòÔËÐÐÔÚÄÄ¿éGPUÉÏ
	cudaSetDevice(deviceNum);

	int tmpN = 8 * sizeof(uint64_t);
	//int tmpM = (NUM_DIGITS_383_x64_fp * 2) * sizeof(uint64_t);
	cudaMalloc((void**)& dev_c, tmpN * numThreads);
	cudaMalloc((void**)& dev_a, tmpN * numThreads);
	cudaMalloc((void**)& dev_b, tmpN * numThreads);

	//ÒÀ´ÎÎª c = a * b Èý¸öÊ¸Á¿¿ª±ÙÄÚ´æ
	cudaMemset(dev_c, 0, tmpN * numThreads);
	cudaMemset(dev_a, 0, tmpN * numThreads);
	cudaMemset(dev_b, 0, tmpN * numThreads);

	cudaMemcpy(dev_a, a, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, 8 * sizeof(uint64_t) * N, cudaMemcpyHostToDevice);


	cudaEventCreate(&kernel_start1);
	cudaEventCreateWithFlags(&kernel_stop1, cudaEventBlockingSync);
	cudaGetDeviceProperties(&device_prop, deviceNum);
	//sprintf(device_prefix, "ID:%d %s:", deviceNum, device_prop.name);
	//printf("\nLauching %u blocks. %u threads", numBlocks, BLOCK_SIZE);
	cudaEventRecord(kernel_start1, 0);

	kara_mul_product_scanning_reduction << <numBlocks, threadsPerBlock >> > (dev_c, dev_a, dev_b);
	/*cuda_error_check("Error ", " returned from subKernel");*/
	cudaEventRecord(kernel_stop1, 0);
	cudaEventSynchronize(kernel_stop1);
	cudaEventElapsedTime(&delta_time1, kernel_start1, kernel_stop1);
	//printf("%s:kara_mul time: %.2fms. Execute %d operations.\n", device_prefix, delta_time1, N);

	cudaMemcpy(c, dev_c, tmpN * N, cudaMemcpyDeviceToHost);

	cudaFree(dev_c);
	cudaFree(dev_a);
	cudaFree(dev_b);

}


void print(FILE* file, argElement_x64 c, int num) {
	if (num == 8) {
		simplification_unique(c, c);
		simplification_unique(c, c);
	}
	else {
		simplification_fast(c, num);
	}

	printf("0X");
	for (int i = num - 1; i >= 0; i--) {
		printf("%012llX", c[i]);
	}
	printf("\n");
}


//only for EltBN383_x64_fp
void AssignNfp(argElement_x64 dst, argElement_x64 src, int n) {
	for (int i = 0; i < 8; i++) {
		for (int j = 0; j < n; j++) {
			dst[i + 8 * j] = src[i];
		}
	}
}

void AssignN32(argElement_x32 dst, argElement_x32 src, int n) {
	for (int i = 0; i < NUM_DIGITS_383_x32; i++) {
		for (int j = 0; j < n; j++) {
			dst[i + NUM_DIGITS_383_x32 * j] = src[i];
		}
	}
}

int main() {
	EltBN383_x64_fp A = { 0x534934453435,0x734462363435,0x731232453435,0x734925323435,0x225353535345,
		0x634925353435,0x634925353435,0x634925353435 };
	EltBN383_x64_fp B = { 0xFF35124FFFFF,0xFF24144FFFFF,0xFFF1252FFFFF,0xFF152FFFFFFF,0xFFF4542222FF,
		0xFFFFFFFFFFFF,0xFFFFFFFFFFFF,0x7FFFFFFFFFFF };
	EltBN383_x64_fp P = { 0 };
	EltBN383_x64_fp_buffer C = { 0 };

	uint64_t* AN = new uint64_t[8 * N];
	uint64_t* BN = new uint64_t[8 * N];
	uint64_t* CN = new uint64_t[8 * N];
	uint64_t* DN = new uint64_t[8 * N];
	uint64_t* TN = new uint64_t[2 * 8 * N];
	//print(stdout, A, 10);
	//print(stdout, B, 10);
	//This result is correct.

	AssignNfp(AN, A, N);
	AssignNfp(BN, B, N);

	print(stdout, AN, 8);
	print(stdout, BN, 8);
	print(stdout, AN + (N - 1) * 8, 8);
	print(stdout, BN + (N - 1) * 8, 8);
	mul_fma_reductionAPI(CN, AN, AN);
	printf("A^2  mod P mul fma:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);
	kara_mul_product_scanning_reductionAPI(DN, AN, AN);
	printf("A^2  mod P kara mul product scanning:=\n");
	print(stdout, DN, 8);
	print(stdout, DN + (N - 1) * 8, 8);
	kara_mul_origin_reductionAPI(CN, AN, AN);
	printf("A^2  mod P kara mul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);
	kara_sqr_origin_reductionAPI(CN, AN);
	printf("A^2  mod P kara sqr:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	mul_fma_reductionAPI(CN, AN, BN);
	printf("AB mod P:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	kara_mul_origin_reductionAPI(CN, AN, BN);
	printf("AB  mod P kara mul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	kara_mul_product_scanning_reductionAPI(CN, AN, BN);
	printf("AB  mod P kara_product_scanning mul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	mul_fma_reductionAPI(CN, BN, BN);
	printf("B^2  mod P:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	kara_mul_origin_reductionAPI(CN, BN, BN);
	printf("B^2  mod P kara mul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	kara_mul_product_scanning_reductionAPI(CN, BN, BN);
	printf("B^2  mod P kara_product_scanning mul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + 8, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	mul_fma_reductionAPI(CN, AN, AN);
	printf("A^2  mod P:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	kara_mul_origin_reductionAPI(CN, AN, AN);
	printf("A^2  mod P kara mul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	kara_mul_product_scanning_reductionAPI(CN, AN, AN);
	printf("A^2  mod P kara_product_scanning mul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);

	/*kara_mul_2threadsAPI(CN, AN, AN);
	printf("A^2  mod P kara_mul_2threads mul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);
	kara_mul_2threads1API(CN, AN, AN);
	printf("A^2  mod P kara_mul_2threads1 mul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);*/

	//for int32 implementation
	uint32_t A32[12] = { 0x1234124,0x324234,0x34242332,0x342342,0x5342423,0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332 };
	uint32_t B32[12] = { 0x1234124,0x324234,0x34242332,0x342342,0x5342423,0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332, 0x34242332 };

	uint32_t* A32N = new uint32_t[NUM_DIGITS_383_x32 * N];
	uint32_t* B32N = new uint32_t[NUM_DIGITS_383_x32 * N];
	uint32_t* C32N = new uint32_t[NUM_DIGITS_383_x32 * N];
	AssignN32(A32N, A32, N);
	AssignN32(B32N, B32, N);

	intmulAPI(C32N, A32N, B32N);
	/*printf("A*B  mod P intmul:=\n");
	print(stdout, CN, 8);
	print(stdout, CN + (N - 1) * 8, 8);*/
}