import sys

import numpy
from math import ceil

from disco import Disco, Params, result_iterator, func

def fun_map_init(input, params):
  # Add memcached support for IPC
  import pylibmc
  memc = pylibmc.Client(["127.0.0.1:11211"])

  if not params.has_key('weights'):
    # Load stored weights from memcached
    params['weights'] = numpy.loads(memc.get('nn_default/weights'))

  params['memc'] = memc

  # Add GPU support for SIMD action
  import os
  os.environ["DYLD_LIBRARY_PATH"] = ':'.join([
    "/usr/local/lib",
    "/opt/local/lib",
    "/usr/local/cuda/lib",
    "/Developer/CUDA/lib",
    ])

  params['pycuda'] = __import__("pycuda", fromlist=["autoinit", "driver", "compiler", "gpuarray"])

  # GPU Stats
  free, total = params['pycuda'].driver.mem_get_info()
  msg("GPU status: {0}/{1}".format(free, total))


# Generator to initialize an MNIST-formatted image binary file,
# and return a subset of images
def mnist_img_reader(fd, size, fname):
  msg("Executed mnist_img_reader")
  # Disco function must be pure (ie no globals, ie no imports)
  if fd.tell() == 0:
    # Read the header
    magic, num_imgs, rows, cols = struct.unpack('>iiii', fd.read(4*4))
    # Basic checksum
    assert magic == 2051, 'MNIST checksum failed'

    pixels_per_img = cols*rows

  #imgs_in_batch = size / pixels_per_img
  imgs_in_batch = 60000/50
  #shape = (imgs_in_batch, num_cols, num_rows)
  shape = (imgs_in_batch, cols*rows)
  read_bytes = imgs_in_batch*pixels_per_img

  # Read the imgs_in_batch*rows*cols pixels from file
  # incrementing the array index for each image vector
  imgs = numpy.fromfile(file=fd, dtype=numpy.uint8, count=read_bytes).reshape(shape).astype(numpy.float32)
  if len(imgs) > 0:
    # The generator has something to return
    yield (imgs, len(imgs), rows, cols)


def fun_map(img_data, params):
  # Retain module import from map_init
  pycuda = params['pycuda']
  # h: Host (CPU), d: Device (GPU)
  htod = pycuda.driver.In
  dtoh = pycuda.driver.Out

  # Info from header
  imgs, num_imgs, rows, cols = img_data
  block_size = params.get('block_size', 16)

  # Divide-and-conquer matrix multiplication
  # 1: Store in stored memory a subblock for each
  #      A[:][block_size'th], B[block_size'th][:] element
  #      (each subblock corresponds to an element in the final matrix)
  # 2: Reduce two subblocks by taking the dot product
  #      along the Asub row and Bsub col
  # 3: Store the dot product as an element in result matrix
  matrix_mul_proto = pycuda.compiler.SourceModule("""
  #include <stdio.h>

  #define BLOCK_SIZE 16

  __global__ void
  matrix_mul( float* A, float* B, int wA, int wB, float* C )
  {
      int bx = blockIdx.x;
      int by = blockIdx.y;

      int tx = threadIdx.x;
      int ty = threadIdx.y;

      int aBegin = wA * BLOCK_SIZE * by;
      int aEnd   = aBegin + wA - 1;
      int aStep  = BLOCK_SIZE;

      int bBegin = BLOCK_SIZE * bx;
      int bStep  = BLOCK_SIZE * wB;

      float Csub = 0;

      for (int a = aBegin, b = bBegin;
               a <= aEnd;
               a += aStep, b += bStep) {

          __shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
          __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];

          As[ty][tx] = A[a + wA * ty + tx];
          Bs[ty][tx] = B[b + wB * ty + tx];

          __syncthreads();

          for (int k = 0; k < BLOCK_SIZE; ++k)
              Csub += As[ty][k] * Bs[k][tx];

          __syncthreads();
      }

      int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
      // TODO: NN activation
      //C[c + wB * ty + tx] = 1/( 1 + expf(Csub));
      C[c + wB * ty + tx] = Csub;
  }
  """
  )

  msg("Executing fun_map: {0} {1}x{2} images ({3} pels)".format(num_imgs, rows, cols, imgs.size))

  krnl = matrix_mul_proto.get_function("matrix_mul")
  krnl.prepare("PPiiP", block=(block_size, block_size, 1,))


  #imgs_gpu = pycuda.gpuarray.to_gpu(imgs)
  weights = params.get('weights', {})

  if params.has_key('neuron_counts'):
    neurons_prev = rows*cols
    results_prev = imgs
    for layer, neurons in enumerate(params['neuron_counts']):
      if not weights.has_key(layer):
        msg("Generate weights, {0} inputs, {1} outputs".format(neurons_prev, neurons))
        weights[layer] = numpy.random.randn(neurons_prev, neurons).astype(numpy.float32)

      host_results = numpy.zeros((num_imgs,neurons), numpy.float32)
      #results = pycuda.gpuarray.to_gpu(host_results)

      msg("matrix_mul<< {0}, {1}>>".format((block_size, block_size, 1,),(neurons/block_size, num_imgs/block_size,)))

      krnl(
        htod(results_prev), htod(weights[layer]),
        numpy.uint16(neurons_prev), numpy.uint16(neurons),
        dtoh(host_results),
        #grid=(neurons/block_size, num_imgs/block_size),
        #grid=(neurons/block_size, num_imgs/block_size,),
        block=(block_size,block_size,1,),
        ) 
      msg("Layer {0} results: {1}".format(layer, host_results))
      """
      krnl.prepared_call((neurons_prev/block_size, num_imgs/block_size),
        htod(results_prev), htod(weights[layer]), neurons_prev, neurons,
        dtoh(results),
        ) 
      """
      neurons_prev = neurons
      results_prev = host_results
      
      
  #d_weights1 = pycuda.gpuarray.to_gpu(h_weights1)
  #d_weights2 = pycuda.gpuarray.to_gpu(h_weights2)


  # Layer 1
  """
  krnl(
    imgs_arr, weights1_gpu, rows*cols, neurons1, num_imgs,
    results1,
    block=(rows*cols/block_size, num_imgs/block_size),
    ) 

  # Layer 2
  krnl.prepared_call((neurons1/block_size, num_imgs/block_size),
    results1.gpudata, d_weights2.gpudata, neurons1, neurons2, num_imgs,
    results2.gpudata,
    ) 
  
  """
  msg("Completed map task in trivial secs")
  return [("it was fast", host_results)]


master = sys.argv[1]
print "Starting Disco job.."

print "Go to {0} to see status of the job.".format(master)
results = Disco(master).new_job(
  name = "pycuda_disco_test",
  input = ["/Users/paulreimer/Development/data_mining/datasets/train-images.idx3-ubyte"],
  required_modules = ['numpy'],
  map_init = fun_map_init,
  map_reader = mnist_img_reader,
  map = fun_map,
  reduce = func.nop_reduce,
  #status_interval = 0,
  params = {'block_size':16, 'neuron_counts':[128,10]}
  #nr_reduces = 1
  ).wait()

print "Job done. Results:"
for secs, result in result_iterator(results):
  print secs, result
