/****
 *
 * $Id: gpu_cuda.cu,v 1.10 2009/09/23 19:02:39 rdilley Exp $
 *
 * Copyright (c) 2009, Ron Dilley
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *   - Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   - Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in
 *     the documentation and/or other materials provided with the
 *     distribution.
 *   - Neither the name of Uberadmin/BaraCUDA/Nightingale nor the names of
 *     its contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 ****/

/****
 *
 * includes
 *
 ****/

#include <cuda.h>
#include "cutil.h"
#include "gpu_cuda.h"
#include "file.h"

/****
 *
 * global variables
 *
 ****/

unsigned int gpuFreeMem;
int gpuCores;
int canMapMem;

workUnit_t *dWorkUnits = 0;

cudaEvent_t startEvent, stopEvent;

/****
 *
 * external globals
 *
 ****/

extern int testIt;

/****
 *
 * cuda functions
 *
 ****/

BEGIN_C_DECLS

/****
 *
 * cuda setup (external)
 *
 ****/

int cudaInit( void ) {
  int deviceCount = 0, primaryDevice = 0, i;
  unsigned int free, total;
  CUresult res;
  CUdevice dev;
  CUcontext ctx;
  cudaDeviceProp deviceProp;

  /* check for CUDA support */
  cudaGetDeviceCount(&deviceCount);
  if ( deviceCount EQ 0 ) {
    fprintf( stderr, "No CURA devices found, CPU for everything\n" );
    return FALSE;
  }

  cudaEventCreate( &startEvent );
  cudaEventCreate( &stopEvent );

  for ( i = 0; i < deviceCount; i++ ) {
    /* enumerate available GPU */
    /* XXX should save properties for each device */
      printCudaInfo( i );
  }

  /* pick fastest gpu */

  /* store cuda and gpu info for later use */

  /* XXX this is repetative */
  cudaGetDeviceProperties(&deviceProp, primaryDevice);

  cuDeviceGet(&dev,primaryDevice);
  cuCtxCreate(&ctx, 0, dev);
  res = cuMemGetInfo(&free, &total);
  if(res != CUDA_SUCCESS)
    printf("!!!! cuMemGetInfo failed! (status = %x)", res);
  cuCtxDetach(ctx);

  gpuFreeMem = free;
  gpuCores = 8 * deviceProp.multiProcessorCount;
  canMapMem = deviceProp.canMapHostMemory;

  return TRUE;
}

/****
*
* cuda shutdown (external)
*
****/

void cudaExit( void ) {
  cudaEventDestroy( startEvent );
  cudaEventDestroy( stopEvent );
  cudaThreadExit();
}

/****
 *
 * get number of cores (external wrapper)
 *
 ****/

int getCudaCores( int device ) {
  return getCores( device );
}

/****
 *
 * get free gpu mem (external wrapper)
 *
 ****/

unsigned int getCudaFreeMem( int device ) {
  return getFreeMem( device );
}

/****
 *
 * allocate workUnit buffers (page locked host and device)
 *
 ****/

void *workUnitAlloc( size_t size ) {
  workUnit_t *hWorkUnits = 0;

  if ( cudaHostAlloc( (void **)&hWorkUnits, size, cudaHostAllocWriteCombined ) != cudaSuccess ) {
    fprintf( stderr, "CUDA:ERR - Unable to allocate page locked host memory\n" );
    exit( EXIT_FAILURE );
  }
  bzero( hWorkUnits, size );

  /* allocate device mem */
  if ( cudaMalloc( (void **)&dWorkUnits, size ) != cudaSuccess ) {
    cudaFreeHost( hWorkUnits );
    fprintf( stderr, "CUDA:ERR - Unable to allocate device memory\n" );
    exit( EXIT_FAILURE );
  }
  
  return hWorkUnits;
}

/****
 *
 * free workUnit buffers (page locked host and device)
 *
 ****/

void workUnitFree( workUnit_t *buf ) {
  /* free buffer on device */
  cudaFree( dWorkUnits );

  /* free host buffer */
  cudaFreeHost( buf );
}

/****
 *
 * process work units on the GPU, (external wrapper)
 *
 ****/

unsigned long gpuProcessWorkUnits( workUnit_t *workUnits, unsigned long workUnitCount, uint32_t hashSize ) {
  /* call cuda function */
  return cudaProcessWorkUnits( workUnits, workUnitCount, hashSize );
}


END_C_DECLS

/****
 *
 * kernel function
 *
 ****/

/* MD4 CUDA functions lifted from cryptohaze.com Multiforcer */

typedef uint32_t UINT4;

/* MD4 Defines as per RFC reference implementation */
#define MD4F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define MD4G(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
#define MD4H(x, y, z) ((x) ^ (y) ^ (z))
#define MD4ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
#define MD4FF(a, b, c, d, x, s) {      \
    (a) += MD4F ((b), (c), (d)) + (x); \
    (a) = MD4ROTATE_LEFT ((a), (s));   \
  }
#define MD4GG(a, b, c, d, x, s) {			   \
    (a) += MD4G ((b), (c), (d)) + (x) + (UINT4)0x5a827999; \
    (a) = MD4ROTATE_LEFT ((a), (s));			   \
  }
#define MD4HH(a, b, c, d, x, s) {			   \
    (a) += MD4H ((b), (c), (d)) + (x) + (UINT4)0x6ed9eba1; \
    (a) = MD4ROTATE_LEFT ((a), (s));			   \
  }
#define MD4S11 3
#define MD4S12 7
#define MD4S13 11
#define MD4S14 19
#define MD4S21 3
#define MD4S22 5
#define MD4S23 9
#define MD4S24 13
#define MD4S31 3
#define MD4S32 9
#define MD4S33 11
#define MD4S34 15
/* End MD4 Defines */

__global__ void kernelProcessWorkUnits( workUnit_t *workUnits, unsigned long workUnitCount, uint32_t hashSize ) {
  int i, j = 0, idx = blockIdx.x*blockDim.x + threadIdx.x;
  int32_t val = 0;
  /* 32-bit unsigned values for the hash */
  UINT4 a,b,c,d;
  UINT4 block[16];
  
  if ( idx < workUnitCount ) {
    /* init md4 contexts */
    a = 0x67452301;
    b = 0xefcdab89;
    c = 0x98badcfe;
    d = 0x10325476;

    /* clear block */
    for ( i = 0; i < 16; i++ )
      block[i] = 0x00000000;

    /* load data into md4 block in groups of four */
    for ( i = 0; i < workUnits[idx].ptLen; i++ ) {
      if ( j == 0 ) {
	block[i/4] |= workUnits[idx].pt[i];
	j++;
      } else if ( j == 1 ) {
	block[i/4] |= workUnits[idx].pt[i] << 8;
        j++;
      } else if ( j == 2 ) {
	block[i/4] |= workUnits[idx].pt[i] << 16;
        j++;
      } else {
	block[i/4] |= workUnits[idx].pt[i] << 24;
        j = 0;
      }
    }
    
    /* add 1 bit padding, zeros already there */
    if ( j == 0 ) {
      block[i/4] |= 0x00000080;
    } else if ( j == 1 ) {
      block[i/4] |= 0x00008000;
    } else if ( j == 2 ) {
      block[i/4] |= 0x00800000;
    } else {
      block[i/4] |= 0x80000000;
    }

    /* set length in bits */
    block[14] = workUnits[idx].ptLen * 8;

    /* make some magic */

    /* round 1 */
    MD4FF (a, b, c, d, block[ 0], MD4S11); /* 1 */
    MD4FF (d, a, b, c, block[ 1], MD4S12); /* 2 */
    MD4FF (c, d, a, b, block[ 2], MD4S13); /* 3 */
    MD4FF (b, c, d, a, block[ 3], MD4S14); /* 4 */
    MD4FF (a, b, c, d, block[ 4], MD4S11); /* 5 */
    MD4FF (d, a, b, c, block[ 5], MD4S12); /* 6 */
    MD4FF (c, d, a, b, block[ 6], MD4S13); /* 7 */
    MD4FF (b, c, d, a, block[ 7], MD4S14); /* 8 */
    MD4FF (a, b, c, d, block[ 8], MD4S11); /* 9 */
    MD4FF (d, a, b, c, block[ 9], MD4S12); /* 10 */
    MD4FF (c, d, a, b, block[10], MD4S13); /* 11 */
    MD4FF (b, c, d, a, block[11], MD4S14); /* 12 */
    MD4FF (a, b, c, d, block[12], MD4S11); /* 13 */
    MD4FF (d, a, b, c, block[13], MD4S12); /* 14 */
    MD4FF (c, d, a, b, block[14], MD4S13); /* 15 */
    MD4FF (b, c, d, a, block[15], MD4S14); /* 16 */

    /* round 2 */
    MD4GG (a, b, c, d, block[ 0], MD4S21); /* 17 */
    MD4GG (d, a, b, c, block[ 4], MD4S22); /* 18 */
    MD4GG (c, d, a, b, block[ 8], MD4S23); /* 19 */
    MD4GG (b, c, d, a, block[12], MD4S24); /* 20 */
    MD4GG (a, b, c, d, block[ 1], MD4S21); /* 21 */
    MD4GG (d, a, b, c, block[ 5], MD4S22); /* 22 */
    MD4GG (c, d, a, b, block[ 9], MD4S23); /* 23 */
    MD4GG (b, c, d, a, block[13], MD4S24); /* 24 */
    MD4GG (a, b, c, d, block[ 2], MD4S21); /* 25 */
    MD4GG (d, a, b, c, block[ 6], MD4S22); /* 26 */
    MD4GG (c, d, a, b, block[10], MD4S23); /* 27 */
    MD4GG (b, c, d, a, block[14], MD4S24); /* 28 */
    MD4GG (a, b, c, d, block[ 3], MD4S21); /* 29 */
    MD4GG (d, a, b, c, block[ 7], MD4S22); /* 30 */
    MD4GG (c, d, a, b, block[11], MD4S23); /* 31 */
    MD4GG (b, c, d, a, block[15], MD4S24); /* 32 */

    /* round 3 */
    MD4HH (a, b, c, d, block[ 0], MD4S31); /* 33 */
    MD4HH (d, a, b, c, block[ 8], MD4S32); /* 34 */
    MD4HH (c, d, a, b, block[ 4], MD4S33); /* 35 */
    MD4HH (b, c, d, a, block[12], MD4S34); /* 36 */
    MD4HH (a, b, c, d, block[ 2], MD4S31); /* 37 */
    MD4HH (d, a, b, c, block[10], MD4S32); /* 38 */
    MD4HH (c, d, a, b, block[ 6], MD4S33); /* 39 */
    MD4HH (b, c, d, a, block[14], MD4S34); /* 40 */
    MD4HH (a, b, c, d, block[ 1], MD4S31); /* 41 */
    MD4HH (d, a, b, c, block[ 9], MD4S32); /* 42 */
    MD4HH (c, d, a, b, block[ 5], MD4S33); /* 43 */
    MD4HH (b, c, d, a, block[13], MD4S34); /* 44 */
    MD4HH (a, b, c, d, block[ 3], MD4S31); /* 45 */
    MD4HH (d, a, b, c, block[11], MD4S32); /* 46 */
    MD4HH (c, d, a, b, block[ 7], MD4S33); /* 47 */
    MD4HH (b, c, d, a, block[15], MD4S34); /* 48 */
    
    /* add initial values */
    a += 0x67452301;
    b += 0xefcdab89;
    c += 0x98badcfe;
    d += 0x10325476;

    /* copy state to hash */
    workUnits[idx].uHash[0] = a;
    workUnits[idx].uHash[1] = a >> 8;
    workUnits[idx].uHash[2] = a >> 16;
    workUnits[idx].uHash[3] = a >> 24;
    workUnits[idx].uHash[4] = b;
    workUnits[idx].uHash[5] = b >> 8;
    workUnits[idx].uHash[6] = b >> 16;
    workUnits[idx].uHash[7] = b >> 24;
    workUnits[idx].uHash[8] = c;
    workUnits[idx].uHash[9] = c >> 8;
    workUnits[idx].uHash[10] = c >> 16;
    workUnits[idx].uHash[11] = c >> 24;
    workUnits[idx].uHash[12] = d;
    workUnits[idx].uHash[13] = d >> 8;
    workUnits[idx].uHash[14] = d >> 16;
    workUnits[idx].uHash[15] = d >> 24;
    workUnits[idx].uhLen = NTLM_HASH_LEN;

    /* calc lookup hash */
    for( j = 0; j < workUnits[idx].uhLen; j++ ) {
      int tmp;
      val = (val << 4) + ( workUnits[idx].uHash[j] & 0xff );
      if ( tmp = (val & 0xf0000000)) {
	val = val ^ (tmp >> 24);
	val = val ^ tmp;
      }
    }
    workUnits[idx].key = val % hashSize;
  }
}

/****
 *
 * process work units on the GPU
 *
 ****/

unsigned long cudaProcessWorkUnits( workUnit_t *workUnits, unsigned long workUnitCount, uint32_t hashSize ) {

  if ( testIt ) {
    /* start the clock */
    cudaEventRecord(startEvent, 0);
  }

  /* setup memory for kernel run */
  if ( canMapMem ) {
    /* map host memory to device */

  } else {

#ifdef DEBUG
    fprintf( stderr, "CUDA - Copying work units from host to device\n" );
#endif
    /* copy work units to device */
    if ( cudaMemcpy( dWorkUnits, workUnits, workUnitCount * sizeof( workUnit_t ), cudaMemcpyHostToDevice ) != cudaSuccess ) {
      fprintf( stderr, "CUDA:ERR - Unable to copy work units from host to device\n" );
      exit( EXIT_FAILURE );
    }
  }

#ifdef DEBUG 
  fprintf( stderr, "CUDA - Starting kernels\n" );
#endif

  /* allocate grids and blocks */
  dim3 block(256);
  dim3 grid((unsigned int)ceil(workUnitCount/(float)block.x));

  /* kick of cuda kernels */
  kernelProcessWorkUnits<<< grid, block >>>( dWorkUnits, workUnitCount, hashSize );

  /* wait for all kernels to finish */
  cudaThreadSynchronize();

  /* cleanup memory */
  if ( canMapMem ) {
    /* unmap memory */

  } else {
    /* copy work units from device back to host */
    if ( cudaMemcpy( workUnits, dWorkUnits, workUnitCount * sizeof( workUnit_t ), cudaMemcpyDeviceToHost ) != cudaSuccess ) {
      fprintf( stderr, "CUDA:ERR - Unable to copy work units from device to host\n" );
      exit( EXIT_FAILURE );
    }
  }

  if ( testIt ) {
    /* stop the clock */
    cudaEventRecord(stopEvent, 0);
    cudaEventSynchronize(stopEvent);

    float milliseconds = 0;
    cudaEventElapsedTime(&milliseconds, startEvent, stopEvent);
    
    fprintf( stderr, "%llu c/s\n", workUnitCount * 1000/milliseconds );
  }

  /* done */
  return workUnitCount;
}


/****
 *
 * print CUDA GPU info
 *
 * shamelessly lifted from Multuforcer by Cryptohaze
 *
 ****/

int printCudaInfo( int device ) {
  unsigned int free, total;
  CUresult res;
  CUdevice dev;
  CUcontext ctx;
  cudaDeviceProp deviceProp;

  cudaGetDeviceProperties(&deviceProp, device);

  cuDeviceGet(&dev,device);
  cuCtxCreate(&ctx, 0, dev);
  res = cuMemGetInfo(&free, &total);
  if(res != CUDA_SUCCESS)
    printf("!!!! cuMemGetInfo failed! (status = %x)", res);
  cuCtxDetach(ctx);

  printf("CUDA Device Information:\n");
  printf("Device %d: \"%s\"\n", device, deviceProp.name);
  printf("    Number of cores: %d\n", 8 * deviceProp.multiProcessorCount);
  printf("Supports mapped mem: %s\n", deviceProp.canMapHostMemory ? "Y" : "N" );
  printf("         Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f);
  printf("     Memory (total): %lu MB\n", (total / (1024 * 1024)));
  printf("      Memory (free): %lu MB\n\n", (free / (1024 * 1024)));
  return deviceProp.multiProcessorCount;
}

/****
 *
 * get number of cores
 *
 ****/

int getCores( int device ) {
  return ( gpuCores );
}

/****
 *
 * get free gpu mem
 *
 ****/

unsigned int getFreeMem( int device ) {
  return ( gpuFreeMem );
}

