#!/usr/bin/env python
# Algorithm for approximating matrix multiplication
# Cohen1999: Approximating Matrix Multiplication for Pattern Recognition Tasks
# Preprocessing
# calculates accumulated probabilities for each row in a matrix A
import bisect as bi
import numpy as np

def preprocess_sum(A) :
	sum_rows = []
	for row in A :
		sum_rows.append(sum(row))
	return sum_rows

def compute_Wij(A, Wij) :
	Wkj = [] #
	p_accum = []
	
	for r_idx, row in enumerate( A ) :
		p_row = [] # tmp array to hold probabilities
		row_sum = 0 # sum/weight of outgoing edges  
		_accum = 0.0 
		for c_idx, val in enumerate(row) :
			W_u = Wij[-1][c_idx] # get the sum of the i'th row from next layer in prod matrix
			print "W_u: ", W_u
			row_sum += val * W_u
			
			p_row.append(_accum + val * W_u) # add accumulated weight
			_accum = p_row[-1]
		print (p_accum)
		p_accum.append( map(lambda x : x / row_sum, p_row )) # normalize accumulated weights
		print (p_accum)
		print "p_row", p_row
		Wkj.append(row_sum)
	
	
	return Wkj, p_accum
	
def process_csr_sum(A) :
	row_sum = []
	for r_idx in range(A.shape[0]) :
		row_sum.append(A.getrow(r_idx).sum(1)[0,0])
	print ("row sum length " + str(len(row_sum)))
	return row_sum

def compute_csr_Wij(A, Wij) :
	Wkj = [] #
	# todo make accum to a dict (boundary)
	p_accum = []
	
	values = A.data
	col_idx = A.indices
	row_ptr = A.indptr
	
	for r_ptr, ptr in enumerate( row_ptr[:len(row_ptr)-1] ) :
		p_row = [] # tmp array to hold probabilities
		p_col_idx = []
		row_sum = 0 # sum/weight of outgoing edges  
		_accum = 0.0
		prob = np.zeros((2,row_ptr[r_ptr+1] - ptr), dtype=float)
		for i, ptr_idx in enumerate(range (ptr, row_ptr[r_ptr+1])) :
			W_u = Wij[-1][col_idx[ptr_idx]] # sum af row 
			w_uv = values[ptr_idx] * W_u
			
			row_sum += w_uv
			#p_row.append(_accum + w_uv)
			#p_col_idx.append(col_idx[idx])
			prob[0,i] = _accum + w_uv
			prob[1,i] = col_idx[ptr_idx]
			_accum = prob[0,i]
			
		
		prob[0] = map(lambda x : x / row_sum, prob[0] )
		#p_accum.append( map(lambda x : x / row_sum, p_row )) # normalize accumulated weights
		p_accum.append(prob)
		Wkj.append(row_sum)
	print (p_accum[0])
	return Wkj, p_accum

def compute_lil_Wij(A, Wij) :
	Wkj = [] #
	p_accum = []
	
	r_prev = 0
	p_row = [] # tmp array to hold probabilities
	row_sum = 0 # sum/weight of outgoing edges  
	_accum = 0.0
	for i,j,v in zip(A.row, A.col, A.val) :
		W_u = Wij[-1][j]
		w_uv = v * W_u 
		row_sum += w_uv
		p_row.append(_accum + w_uv) # add accumulated weight
		_accum = p_row[-1]
		
def sample(seed, prob_accum):
	i = bi.bisect_left(prob_accum, seed)
	return i # returns insertion index