#!/usr/bin/env python

import sys, math, random
import src.crop as cr
from mpi4py import MPI
import numpy as np
from operator import itemgetter
from util import Timer

comm = MPI.COMM_WORLD
p = comm.size
rank = comm.rank
root = 0

step = 1
done = False

eps = 0.005
crop = cr.CRoP(eps, rank, p)
k = int(1/eps)
with Timer() as t :
	if rank == 0 :
#		A = B = np.load('mat3x3.npy',mmap_mode='r')
		#A = B = np.memmap('stoc_mat_1000.npy',dtype="double", mode='r', shape=(1000,1000))
		A = B = np.load('../data/np_mat__10000_0.001.npy',mmap_mode='r')
		n = 0
		print "nnz ", np.count_nonzero(A)
		while n < len(A[0]) :
			data = ([np.array(A.T[n,:]),MPI.DOUBLE], [np.array(B[n,:]),MPI.DOUBLE])
			comm.bcast(data, root)
			crop.crop(data[0][0], data[1][0])
			n += 1
		comm.bcast("Done", root)
	else :
#		print "proc: ", rank
		data = ""
		done = False
		while not done :
			data = comm.bcast(data, root)
			if data != "Done" :
				crop.crop(data[0][0], data[1][0])
			else :
				done = True

done = False
result = crop.get_summary(type='dict')
#print "finished at %s\n%s" % (rank, result)
print "%s\t%.03f" % (rank,t.interval)

def merge_parallel(D1, D2):
	if len(D1) > len(D2) : # swap to get min length of merge loop
		temp = D1
		D1, D2 = D2, temp

	for key,item in D1.iteritems() :
		if D2.has_key(key) :
			item = {key:(key, D2[key][1] + item[1], D2[jey][2]+item[2])}
		else :
			item = {key:item}
		D2.update(item)

	if len(D2) > k :
		D = sorted(D2.values(), key=itemgetter(1), reverse=True)
		kth = D[k][1]
		D2.clear()
		for item in D :
			if item[1] >= kth :
				D2[item[0]] = (item[0],item[1],item[2])
	return D2

#print ("starting parallel reduction")
while not done :
	if rank % 2 ** step != 0:
		send_msg = {"summary": result, "step": step}
		_dest = rank - 2 ** (step - 1)
#		print rank, "sending to ", _dest

		data = comm.isend(send_msg, dest=_dest)
		done = True
	else :
		_source = rank + 2 ** (step - 1)
		recv_msg = comm.recv(source=_source)
#		print rank, "recv from %s at step %s" % (_source, str(step))
		step = int(recv_msg["step"]) + 1
		result = merge_parallel(result, recv_msg["summary"])
		if step <= math.log(p, 2):
#			print "step < lg p"
			if rank != root :
#				print "sending to next step"
				_dest = rank - 2 ** (step - 1)
				send_msg = {"summary": result, "step": step}
#				print rank, "sending to ", _dest
				data = comm.isend(send_msg, dest=_dest)
		else :
#			print "exiting at ", rank
			done = True


if rank == 0 :
##	print "Exact\n ", np.dot(A,B)
##	print "Rank %s. \nData: %s" % (rank, result)
##	C = np.ndarray(shape=(len(A),len(A)))
##	for key,el in result.iteritems() :
##		C[key[0]][key[1]] = el[1]
##	print C
	print "master %s\t%.03f" % (rank,t.interval)
#else :
