import numpy as np
from numba import jit,vectorize
import errors


def normalization_useEC(obj,op):
	out = op[1]
	try:
		ECmin[0]
		out.inputValue((obj.value-obj.ECmin)/obj.ECmax-obj.ECmin)
	except TypeError:
		obj.ECmax=obj.value.max(axis=0)
		obj.ECmin=obj.value.min(axis=0)
		out.inputValue((obj.value-obj.ECmin)/obj.ECmax-obj.ECmin)
	return out

def normalization_MinMax(obj,op):
	out = op[1]
	maxi=obj.value.max(axis=0)
	mini=obj.value.min(axis=0)
	out.inputValue((obj.value-mini)/maxi-mini)
	return out

def normalization_ZScore(obj,op):
	out = op[1]
	mean = np.mean(obj.value,axis=0)
	std = np.std(obj.value,axis=0)
	out.inputValue((obj.value-mean)/std)
	return out

def matmul(obj,op):
	b,out = op[1],op[2]
	out.inputValue(np.dot(obj.value,b.value))
	return out

def rmatmul(obj,op):
	a,out = op[1],op[2]
	out.inputValue(np.dot(a.value,obj.value))
	return out

def add(obj,op):
	b,out = op[1],op[2]
	out.inputValue(obj.value+b.value)
	return out

def addnum(obj,op):
	b,out = op[1],op[2]
	out.inputValue(obj.value+b)
	return out

def sub(obj,op):
	b,out = op[1],op[2]
	out.inputValue(obj.value-b.value)
	return out

def subnum(obj,op):
	b,out = op[1],op[2]
	out.inputValue(obj.value-b)
	return out

def rsub(obj,op):
	b,out = op[1],op[2]
	out.inputValue(b.value-obj.value)
	return out

def rsubnum(obj,op):
	b,out = op[1],op[2]
	out.inputValue(b-obj.value)
	return out

def mul(obj,op):
	b,out = op[1],op[2]
	out.inputValue(obj.value*b.value)
	return out

def mulnum(obj,op):
	b,out = op[1],op[2]
	out.inputValue(obj.value*b)
	return out


def simpleConv2d(obj,op):
	kernels,out,padding,stride = op[1],op[2],op[3],op[4]
	img = np.pad(obj.value,((0,0),(0,0),(padding,padding),(padding,padding)))
	out_div_in=out.shape[1]//obj.shape[1]
	outCache = np.zeros(out.shape)
	row = 0
	col = 0
	for channel_in in range(obj.shape[1]):
		for kernelIndex in range(out_div_in):
			channel_out = channel_in*out_div_in+kernelIndex
			kernel = kernels.value[channel_out]
			for arow in kernel:
				for num in arow:
					outCache[:,channel_out,np.newaxis] += (img[:,channel_in,np.newaxis]*num)[:,:,row:row+img.shape[2]-kernel.shape[0]+1:stride,col:col+img.shape[2]-kernel.shape[1]+1:stride]
					col += 1
				row += 1
				col = 0
			row = 0
	out.inputValue(outCache)
	out.comeFrom[4] = img
	return out







def conv2d_im2col(obj,op):
	kernel,out,padding,stride = op[1],op[2],op[3],op[4]
	N,H,W = obj.shape
	kh,kw = kernel.shape
	_,out_h,out_w = out.shape

	#padding
	img = np.pad(obj.value,((0,0),(padding,padding),(padding,padding)),'constant')

	col = np.empty((N,out_h,out_w,kh,kw))

	for y in range(out_h):
		y_start = y*stride
		y_end = y_start+kh
		for x in range(out_w):
			x_start = x*stride
			x_end = x_start+kw
			col[:,y,x,:,:] = img[:,y_start:y_end,x_start:x_end]
	
	k = kernel.value.reshape((1,kh*kw))
	col = col.reshape((N,kh*kw,out_h*out_w))
	out.inputValue(np.dot(k,col).reshape((N,out_h,out_w)))
	out.comeFrom[5] = img
	out.comeFrom[6] = [k,col]
	return out

# def conv2d(obj,op):
# 	kernels,out,padding,stride = op[1],op[2],op[3],op[4]
# 	img = np.zeros((obj.shape[0],
# 		obj.shape[1],
# 		obj.shape[2]+2*padding,
# 		obj.shape[3]+2*padding))
# 	img[:,
# 		:,
# 		padding:obj.shape[2]+padding,
# 		padding:obj.shape[3]+padding]=obj.value

# 	out_div_in=out.shape[1]//obj.shape[1]
# 	outCache = np.zeros(out.shape)
# 	out_row,out_col=0,0
# 	for channel_in in range(obj.shape[1]):
# 		for kernelIndex in range(out_div_in):
# 			channel_out = channel_in*out_div_in+kernelIndex
# 			kernel = kernels.value[channel_out]
# 			for row in range(0,img.shape[2]-kernel.shape[0]+1,stride):
# 				for col in range(0,img.shape[3]-kernel.shape[1]+1,stride):
# 					outCache[:,channel_out,out_row,out_col] = np.sum(kernel*img[:,
# 												channel_in,
# 												row:row+kernel.shape[0],
# 												col:col+kernel.shape[1]],axis=(1,2))
# 					out_col += 1
# 				out_row += 1
# 				out_col = 0
# 			out_row = 0 
# 	out.inputValue(outCache)
# 	out.comeFrom[5] = img
# 	return out

def maxPool2d(obj,op):
	out,poolSize = op[1],op[2]
	out.zeros()
	out_row = 0
	out_col = 0
	for row in range(0,obj.shape[2],poolSize[0]):
		for col in range(0,obj.shape[3],poolSize[1]):
			c = np.max(obj.value[:,:,row:row+poolSize[0],col:col+poolSize[1]],axis=(2,3))
			out.value[:,:,out_row,out_col] = c
			out_col += 1
		out_row += 1
		out_col = 0
	return out

def avgPool2d(obj,op):
	out,poolSize = op[1],op[2]
	out.zeros()
	out_row = 0
	out_col = 0
	for row in range(0,obj.shape[2],poolSize[0]):
		for col in range(0,obj.shape[3],poolSize[1]):
			c = np.mean(obj.value[:,:,row:row+poolSize[0],col:col+poolSize[1]],axis=(2,3))
			out.value[:,:,out_row,out_col] = c
			out_col += 1
		out_row += 1
		out_col = 0
	return out

def reshape(obj,op):
	out = op[1]
	out.inputValue(self.value.reshape(out.shape))
	return out

def ReLU(obj,op):
	out = op[1]
	out.inputValue((abs(obj.value)+obj.value)/2)
	return out

def sigmoid(obj,op):
	out = op[1]
	out.inputValue(1/(1+np.exp(-obj.value)))
	return out

def tanh(obj,op):
	out = op[1]
	exp = np.exp(obj.value)#e^x
	nexp = np.exp(-obj.value)#e^-x
	out.inputValue((exp-nexp)/(exp+nexp))
	return out

def softplus(obj,op):
	out = op[1]
	obj.exp = np.exp(obj.value)#求导用
	out.inputValue(np.log(obj.exp+1))
	return out

def softsign(obj,op):
	out = op[1]
	out.inputValue(obj.value/(1+abs(obj.value)))
	return out

def MSELoss(obj,op):
	feature,out = op[1],op[2]
	out.inputValue((obj.value-feature.value)**2)
	return out


def softmax_crossEntropy(obj,op):
	feature,out = op[1],op[2]
	obj.exp = np.exp(obj.value)
	obj.expsum = np.sum(obj.exp,axis=1)[:,np.newaxis]
	out.inputValue(feature.value*(np.log(obj.expsum)-obj.value))
	return out






#template
def f(obj,op):
	pass




funcDict = {'normalization_useEC':normalization_useEC,
			'normalization':normalization_MinMax,
			'normalization_ZScore':normalization_ZScore,
			'matmul':matmul,
			'rmatmul':rmatmul,
			'add':add,
			'addnum':addnum,
			'sub':sub,
			'subnum':subnum,
			'rsub':rsub,
			'rsubnum':rsubnum,
			'mul':mul,
			'mulnum':mulnum,
			'simpleConv2d':simpleConv2d,
			'conv2d':conv2d_im2col,
			'maxPool2d':maxPool2d,
			'avgPool2d':avgPool2d,
			'reshape':reshape,
			'ReLU':ReLU,
			'sigmoid':sigmoid,
			'tanh':tanh,
			'softplus':softplus,
			'softsign':softsign,
			'MSELoss':MSELoss,
			'softmax_crossEntropy':softmax_crossEntropy}











