#include "ph.h"

#include <cuda.h>
#include "aesUtils.h"
#include "cudaUtils.h"
#include <iostream>
#include <linux/types.h>
#include "BigInteger.h"


__device__ uint32_t dTe0[256];
__device__ uint32_t dTe1[256];
__device__ uint32_t dTe2[256];
__device__ uint32_t dTe3[256];

#define le32_to_cpu(x) x
#define cpu_to_le32(x) x

#define f_rn(bo, bi, n, k)  do {                \
        bo[n] = Te0[bi[n] & 0xFF] ^         \
        Te1[(bi[(n + 1) & 3] >> 8) & 0xFF] ^        \
        Te2[(bi[(n + 2) & 3] >> 16) & 0xFF] ^       \
        Te3[(bi[(n + 3) & 3] >> 24) & 0xFF] ^ *(k + n); \
} while (0)

#define f_rnp(bo, bi, n, k)  do {                       \
        bo[n] = Te0[bi[n] & 0xFF] ;                    \
        bo[n] ^= Te1[(bi[(n + 1) & 3] >> 8) & 0xFF] ;            \
        bo[n] ^=Te2[(bi[(n + 2) & 3] >> 16) & 0xFF] ;           \
        bo[n] ^=Te3[(bi[(n + 3) & 3] >> 24) & 0xFF] ^ *(k + n); \
} while (0)

#define f_rl(bo, bi, n, k)  do {                \
    bo[n] = (Te3[bi[n] & 0xFF] & 0xFF)^         \
        (Te0[(bi[(n + 1) & 3] >> 8) & 0xFF] & 0xFF00) ^     \
        (Te1[(bi[(n + 2) & 3] >> 16) & 0xFF] & 0xFF0000) ^      \
        (Te2[(bi[(n + 3) & 3] >> 24) & 0xFF] & 0xFF000000) ^ *(k + n);  \
} while (0)

#define f_nround(bo, bi, k, i) do {\
    f_rn(bo, bi, i, k); \
    k += 4;         \
} while (0)


#define f_lround(bo, bi, k, i) do {\
    f_rl(bo, bi, i, k); \
} while (0)


void aesFastCipherInit()
{
	static bool initialized = false;
	if (!initialized)
	{
		cuInit(0);
		CUDA_CHECK(cudaMemcpyToSymbol(dTe0, &TE0, 256*sizeof(uint32_t), 0, cudaMemcpyHostToDevice));
		CUDA_CHECK(cudaMemcpyToSymbol(dTe1, &TE1, 256*sizeof(uint32_t), 0, cudaMemcpyHostToDevice));
		CUDA_CHECK(cudaMemcpyToSymbol(dTe2, &TE2, 256*sizeof(uint32_t), 0, cudaMemcpyHostToDevice));
		CUDA_CHECK(cudaMemcpyToSymbol(dTe3, &TE3, 256*sizeof(uint32_t), 0, cudaMemcpyHostToDevice));
		initialized = true;
	}
}

__device__ void cudaAes_encrypt(
        byte *in, 
        byte* out, 
        unsigned int* rk, 
        unsigned int nr,
        uint32_t* Te0, 
        uint32_t* Te1, 
        uint32_t* Te2, 
        uint32_t* Te3)
{
    const __le32 *src = (const __le32 *)in;
    __le32 *dst = (__le32 *)out;
    
    __shared__ u32 b0[128][4], b1[128][4];
    
    const u32 *kp = rk + 4;
    int index = threadIdx.x;// + blockIdx.x*blockDim.x;
    int i = index%4;
    int block = index/4;

    b0[block][i] = le32_to_cpu(*(src + i + 4*block)) ^ rk[i];
   
    
   if (nr > 11) {
        
        //__syncthreads();     
        f_nround(b1[block], b0[block], kp, i);
       // __syncthreads();
        f_nround(b0[block], b1[block], kp, i);
    }

       
    if (nr > 13) {
        //__syncthreads();
        f_nround(b1[block], b0[block], kp, i);
        //__syncthreads();
        f_nround(b0[block], b1[block], kp, i);
    }


    f_nround(b1[block], b0[block], kp, i);
    
    f_nround(b0[block], b1[block], kp, i);
 
    f_nround(b1[block], b0[block], kp, i);

    f_nround(b0[block], b1[block], kp, i);

    f_nround(b1[block], b0[block], kp, i);

    f_nround(b0[block], b1[block], kp, i);

    f_nround(b1[block], b0[block], kp, i);

    f_nround(b0[block], b1[block], kp, i);

    f_nround(b1[block], b0[block], kp, i);

    f_lround(b0[block], b1[block], kp, i);
    
    uint32_t *d = dst +i + block*4;
    *d = cpu_to_le32(b0[block][i]);
    
    
}

__device__ void init_shared_key(uint32_t* rk, uint32_t* sh_key)
{
	int index = threadIdx.x;
	
	if (index < 240)
	{
		sh_key[index] = rk[index];
	}
}

__device__ void init_shared_tablesIV(
		uint32_t* Te0,
		uint32_t* Te1,
		uint32_t* Te2, 
		uint32_t* Te3)
{ 
	//__syncthreads();  
	int index = threadIdx.x;
	//__syncthreads();  
	if (index < 256)
	{
		Te0[index] = dTe0[index];
		Te1[index] = dTe1[index];
		Te2[index] = dTe2[index];
		Te3[index] = dTe3[index];
		//assert(Te0[index] == dTe0[index]);
		//assert(Te1[index] == dTe1[index]);
		//assert(Te2[index] == dTe2[index]);
		//assert(Te3[index] == dTe3[index]);
	}
}

__device__ void BN_add_word_verCUDA(byte *aval, uint32_t w_add)
{
	uint32_t l;
	uint32_t w = w_add;

	int i;
	uint32_t* a = (uint32_t*)aval;
	
	for (i = 0; i < 4; ++i)
	{
		l = (a[i]+w)&0xFFFFFFFF;
		a[i]=l;
		if (w <= l)
		{
			break;
		}
		w = l;
	}
}

__device__ void init_out_sharedZero(uint32_t* out_shared32, uint32_t* iv_vector)
{
	uint32_t idx = threadIdx.x + blockIdx.x*blockDim.x;
	out_shared32[idx] = threadIdx.x;
}

__device__ void init_out_shared(uint32_t* out_shared32, uint32_t* iv_vector)
{
	uint32_t idx = threadIdx.x;// + blockIdx.x*blockDim.x;
	uint32_t add = idx + blockIdx.x*blockDim.x;
	
	out_shared32[idx] = iv_vector[idx&3];
	__syncthreads(); 
	if ((idx&3) == 0)
	{
		BN_add_word_verCUDA((byte*)(out_shared32 +idx), add>>2);
	}
}

__device__ void copy(byte* dest, byte* src, uint32_t size)
{
    for (uint32_t i = 0; i < size; ++i)
    {
    	*dest = *src;
    	dest++;
    	src++;
    }
} 

__global__ void KcudaAesEncryptIVBlocks(byte* iv_vector, byte* out, unsigned int* rk)
{
	extern __shared__ uint32_t T[];
    /*
    __shared__ uint32_t Te0[256];
    __shared__ uint32_t Te1[256];
    __shared__ uint32_t Te2[256];
    __shared__ uint32_t Te3[256];
    __shared__ uint32_t key[240];
    init_shared_key(rk, key);
    init_shared_tablesIV(Te0, Te1, Te2, Te3);
    */
    uint32_t* key = rk;
    uint32_t* Te0 = dTe0;
    uint32_t* Te1 = dTe1;
    uint32_t* Te2 = dTe2;
    uint32_t* Te3 = dTe3;

    
    int shift = blockDim.x*blockIdx.x*4;
    uint32_t* out_shared = T;//(uint32_t*)out + shift; 

    
    //out_shared = key+240;//sizeofexpandedkey;
    
    
    uint32_t* iv32_vector = (uint32_t*)iv_vector;
	//__syncthreads();      
    init_out_shared((uint32_t*)out_shared, iv32_vector);
    __syncthreads();
    cudaAes_encrypt((byte*)out_shared, (byte*)out+shift, key, 11, Te0, Te1, Te2, Te3);
    //__syncthreads();
    //copy(out+shift, (byte*)out_shared, 2048);
}

void xor_data(byte* data, byte* data_out, uint32_t size)
{
    uint32_t* data_32 = (uint32_t*)(data);
    uint32_t* data_out_32 = (uint32_t*)(data_out);
    size_t n = size/4;
    for (uint32_t i = 0; i < n; ++i)
    {
    	//*data_32 = *data_out_32;
		*data_32 = *data_32 ^ *data_out_32;
    	data_32++;
    	data_out_32++;
    }
} 

/**
*	/param data = pointer to data to encrypt( host memory)
*	/param dev_data_out - result of encryption of iv vectors see CTR(Block cipher mode) (device memory)
* 	/param host_data_out - parameter to free memory in host memory.
*	/param size - size of above tables in bytes
* 	/param d_key - expanded key(device memory)
*	/param iv - big number in size of 16 bytes(128 bits) as init value in CTR block cipher mode. 						
*/
void aesEncryptIVBlocks(byte* data, 
					  byte* dev_data_out,
					  byte* host_data_out, 
					  uint32_t size, 
					  uint32_t* d_key,
					  byte* ivb)
{
	assert((size % 16) == 0);
	assert((size % 1024) == 0);
	uint32_t num_of_blocks = size / 16;
	uint32_t num_of_neaded_threads = num_of_blocks * 4;
	
	uint32_t max_num_of_threads = 512;// getMaxThreads();
	if ((size % 2048) == 0)
	{
		max_num_of_threads = 512;
	}
	else if ((size % 1024) == 0)
	{
		max_num_of_threads = 256;
	}
	
 
	uint32_t grid_size = num_of_neaded_threads / max_num_of_threads + 
					(num_of_neaded_threads % max_num_of_threads? 1 : 0);
					
	uint32_t threads_per_block = max_num_of_threads;
  
    
    CUDA_CHECK(cudaMemcpy(dev_data_out, ivb, 16, cudaMemcpyHostToDevice));
    KcudaAesEncryptIVBlocks<<<grid_size, threads_per_block, 2048>>>(dev_data_out, dev_data_out, d_key);
    //CUDA_CHECK(cudaThreadSynchronize());
    CUDA_CHECK(cudaMemcpy(host_data_out, dev_data_out, size, cudaMemcpyDeviceToHost));
    
    xor_data(host_data_out, data, size);
}

void aesEncryptIVBlocksBig2(byte* data, 
					  byte* dev_data_out,
					  byte* host_data_out, 
					  uint32_t size, 
					  uint32_t* d_key,
					  BigInteger& ivc)
{
	uint32_t size2048 = size / 2048;
	uint32_t size1024 = size % 2048;
	if (size2048)
	{
		//int shift = 2048*size2048;
		//while (shift < size2048*2048)
		//{
	//	std::cout << "\nsize2048 " <<size2048*2048 << " size1024 "<< size1024<<" " <<(size2048*2048)/16 << std::endl;
			aesEncryptIVBlocks(data, 
						   dev_data_out,
						   host_data_out,
						   2048*size2048,
						   d_key,
						   (byte*)ivc.value->d);
			ivc.add(2048*size2048/16);
			
		//}
	}
	if (size1024)
	{
		int shift = size2048*2048;
		aesEncryptIVBlocks(data, 
						   dev_data_out+shift,
						   host_data_out + shift,
						   size1024,
						   d_key,
						   (byte*)ivc.value->d);
	}
}
void aesEncryptIVBlocksBig(byte* data, 
					  byte* dev_data_out,
					  byte* host_data_out, 
					  uint32_t size, 
					  uint32_t* d_key,
					  BigInteger& ivc)
{
	uint32_t size1024 = size / 1024;
	for (uint32_t i = 0; i < size1024; ++i)
	{
		int shift = i*1024;
		aesEncryptIVBlocks(data+shift, 
						   dev_data_out,
						   host_data_out + shift,
						   1024,
						   d_key,
						   (byte*)ivc.value->d);
		ivc.add(1024/16);
	}
}
