import numpy as np
from time import time
import forward as fw
import backward as bw
import pandas as pd               



class Tensor(object):                                                 
	def __init__(self,shape,comeFrom=False,way=None):
		#对象值相关变量
		self.value = None
		self.shape = tuple(shape)
		self.size = 1
		for i in shape:
			self.size *= i
		#dy/dx
		self.grad = 0
		#记录去向，供前向传播使用
		self.goTo = []
		#记录来源，供反向传播使用
		self.comeFrom = comeFrom
		#梯度下降的方法
		self.way = way
		self.momentum = 0 #Momentum
		self.v = 0 #AdaGrad
		#标准化经验常数
		ECmax=False
		ECmin=False

	#print时的显示
	def __str__(self):
		return '(Tensor: \n'+str(self.value)+')'

	#打印平均值
	def show_mean(self,axis=None):
		print(np.mean(self.value,axis=axis))

	#打印总和
	def show_sum(self,axis=None):
		print(np.sum(self.value,axis=axis))

	#输入值
	def inputValue(self,value):
		self.value = np.array(value)

	def to_csv(self,filename):
		pd.to_csv(filename,pd.DataFrame(self.value))

	def read_csv(self,filename):
		self.value = pd.read_csv(filename).values

	#全零初始化
	def zeros(self):
		self.value = np.zeros(self.shape)

	#全一初始化
	def ones(self):
		self.value = np.ones(self.shape)

	#随机初始化
	def random(self):
		self.value = np.random.random(self.shape)

	#随机初始化，正态分布
	def random_normal(self,avg=0,std=1):
		self.value = np.random.normal(avg,std,self.size).reshape(self.shape)


	#矩阵加法
	def __add__(self,b):
		try:
			#b is a Tensor
			if self.size>=b.size:
				out = Tensor(self.shape,comeFrom=['add',self,b])
				self.goTo.append(['add',b,out])
				b.goTo.append(['add',self,out])
				return out
			else:
				out = Tensor(b.shape,comeFrom=['add',self,b])
				self.goTo.append(['add',b,out])
				b.goTo.append(['add',self,out])
				return out
		except AttributeError:
			#b is a number
			out = Tensor(self.shape,comeFrom=['addnum',self,b])
			self.goTo.append(['addnum',b,out])
			return out

	#矩阵减法
	def __sub__(self,b):
		try:
			#b is a Tensor
			out = Tensor(self.shape,comeFrom=['sub',self,b])
			self.goTo.append(['sub',b,out])
			b.goTo.append(['rsub',self,out])
			return out
		except AttributeError:
			#b is a number
			out = Tensor(self.shape,comeFrom=['subnum',self,b])
			self.goTo.append(['subnum',b,out])
			return out

	#按位相乘
	def __mul__(self,b):
		try:
			#b is a Tensor
			out = Tensor(self.shape,comeFrom=['mul',self,b])
			self.goTo.append(['mul',b,out])
			b.goTo.append(['mul',self,out])
			return out
		except AttributeError:
			#b is a number
			out = Tensor(self.shape,comeFrom=['mulnum',self,b])
			self.goTo.append(['mulnum',b,out])
			return out

	#被加
	def __radd__(self,a):
		#normally, a is a number
		out = Tensor(self.shape,comeFrom=['addnum',self,b])
		self.goTo.append(['addnum',a,out])
		return out

	#减数
	def __rsub__(self,a):
		#normally, a is a number
		out = Tensor(self.shape,comeFrom=['rsubnum',self,b])
		self.goTo.append(['rsubnum',a,out])
		return out

	#被乘
	def __rmul__(self,a):
		#normally, a is a number
		out = Tensor(self.shape,comeFrom=['mulnum',self,b])
		self.goTo.append(['mulnum',a,out])
		return out

	#矩阵乘法 matrix multiply
	def matmul(self,b):
		out = Tensor((self.shape[0],b.shape[1]),comeFrom=['matmul',self,b])
		self.goTo.append(['matmul',b,out])
		b.goTo.append(['rmatmul',self,out])#b为右值，用rmatmul表示
		return out

	#激活函数ReLU
	def ReLU(self):
		out = Tensor(self.shape,comeFrom=['ReLU',self])
		self.goTo.append(['ReLU',out])
		return out

	#激活函数sigmoid
	def sigmoid(self):
		out = Tensor(self.shape,comeFrom=['sigmoid',self])
		self.goTo.append(['sigmoid',out])
		return out

	def tanh(self):
		out = Tensor(self.shape,comeFrom=['tanh',self])
		self.goTo.append(['tanh',out])
		return out

	def softsign(self):
		out = Tensor(self.shape,comeFrom=['softsign',self])
		self.goTo.append(['softsign',out])
		return out

	def softplus(self):
		out = Tensor(self.shape,comeFrom=['softplus',self])
		self.goTo.append(['softplus',out])
		return out

	def simpleConv2d(self,padding=0,kernels=None,kernelshape=(1,2,2),stride=1):
		"""
		self.shape=(batch_size,channel,height,width)
		kernel.shape=(out_channle,kernel_height,kernel_width)
		kernel.shape[0]必须为self.shape[1]的整倍数
		"""
		try:
			out = Tensor((self.shape[0],
				kernels.shape[0],
				(self.shape[2]-kernels.shape[1]+2*padding)//stride+1,
				(self.shape[3]-kernels.shape[2]+2*padding)//stride+1),
				comeFrom=['simpleConv2d',self,kernels,padding,0,stride])
			self.goTo.append(['simpleConv2d',kernels,out,padding,stride])
			return out
		except AttributeError:
			#无kernels输入，用kernelshape生成
			kernels=Tensor(kernelshape)
			kernels.random_normal(0,1)
			out = Tensor((self.shape[0],
				kernels.shape[0],
				(self.shape[2]-kernels.shape[1]+2*padding)//stride+1,
				(self.shape[3]-kernels.shape[2]+2*padding)//stride+1),
				comeFrom=['simpleConv2d',self,kernels,padding,0,stride])
			self.goTo.append(['simpleConv2d',kernels,out,padding,stride])
			return out

	def conv2d(self,stride=1,padding=0,kernel=None,kernelshape=(2,2)):
		try:
			out = Tensor((self.shape[0],
				(self.shape[1]+2*padding-kernel.shape[0])//stride+1,
				(self.shape[2]+2*padding-kernel.shape[1])//stride+1),
				comeFrom=['conv2d',self,kernel,padding,stride,0,None])
			self.goTo.append(['conv2d',kernel,out,padding,stride,None])
			return out
		except AttributeError:
			#无kernel输入，用kernelshape生成
			kernel=Tensor(kernelshape)
			kernel.random_normal(0,1)

			out = Tensor((self.shape[0],
				(self.shape[1]+2*padding-kernel.shape[0])//stride+1,
				(self.shape[2]+2*padding-kernel.shape[1])//stride+1),
				comeFrom=['conv2d',self,kernel,padding,stride,0,None])
			self.goTo.append(['conv2d',kernel,out,padding,stride,None])
			return out

	def maxPool2d(self,poolSize=(2,2)):
		out = Tensor((self.shape[0],self.shape[1],self.shape[2]//poolSize[0],self.shape[3]//poolSize[1]),
			comeFrom=['maxPool2d',self,poolSize])
		self.goTo.append(['maxPool2d',out,poolSize])
		return out

	def avgPool2d(self,poolSize=(2,2)):
		out = Tensor((self.shape[0],self.shape[1],self.shape[2]//poolSize[0],self.shape[3]//poolSize[1]),
			comeFrom=['avgPool2d',self,poolSize])
		self.goTo.append(['avgPool2d',out,poolSize])
		return out

	def reshape(self,shape):
		out = Tensor(shape,
			comeFrom=['reshape',self])
		self.goTo.append(['reshape',out])
		return out

	def MSELoss(self,feature):
		out = Tensor(self.shape,comeFrom=['MSELoss',self,feature])
		self.goTo.append(['MSELoss',feature,out])
		return out

	def softmax_crossEntropy(self,feature):
		out = Tensor(self.shape,comeFrom=['softmax_crossEntropy',self,feature])
		self.goTo.append(['softmax_crossEntropy',feature,out])
		return out

	#最大最小标准化 min-max normalization 数据集标准化，不参与反向传播
	def normalization_MinMax(self,useEC=True):#EC = Empirical constant(经验常数)
		if useEC:
			out = Tensor(self.shape)
			self.goTo.append(['normalization_useEC',out])
			return out
		else:
			out = Tensor(self.shape)
			self.goTo.append(['normalization',out])
			return out

	#Z-score normalization 数据集标准化，不参与反向传播
	def normalization_ZScore(self):
		out = Tensor(self.shape)
		self.goTo.append(['normalization_ZScore',out])
		return out

	def forward(self):
		for op in self.goTo:
			out = fw.funcDict[op[0]](self,op)
			out.forward()#激活下一层的forward

	def forward_1step(self):
		for op in self.goTo:
			out = fw.funcDict[op[0]](self,op)
		
	def backward(self):
		if not self.comeFrom:
			pass
		else:
			objs = bw.funcDict[self.comeFrom[0]](self)

		try:
			for obj in objs:
				obj.backward()
		except TypeError:
			objs.backward()
		except UnboundLocalError:
			pass

	def step(self,lr,way=None,b1=0.9,b2=0.999):
		if way:
			self.way = way

		if self.way==None or self.way=='SGD':
			self.value -= lr*self.grad

		elif self.way == 'Adam':
			self.momentum = b1*self.momentum+(1-b1)*self.grad
			self.v = b2*self.v+(1-b2)*(self.grad**2)
			self.value -= lr*self.momentum/(1e-8+self.v**0.5)

		elif self.way=='Momentum':
			self.momentum = b1*self.momentum-lr*self.grad
			self.value += self.momentum

		elif self.way=='RMSProp':
			self.v = b1*self.v + (1-b1)*(self.grad**2)
			self.value -= (lr*self.grad/(1e-8+self.v**0.5))

		elif self.way=='AdaGrad':
			self.v = self.v+(self.grad)**2
			self.value -= lr*self.grad/(1e-8+self.v**0.5)

	def dropout(self,dropoutRate):
		size = int(self.size*dropoutRate)
		cache = self.value.ravel()
		cache[np.random.randint(self.size,size=size)] = 0
		self.value = cache.reshape(self.shape)
			
		
			



	def zeroGrad(self):
		self.grad=0

def zeroGrad(*tensors):
	for t in tensors:
		t.zeroGrad()










def timetest():
	st = time()
	print('SGD:')
	a = Tensor((2,2))
	b = Tensor((2,2))
	y = Tensor((2,2))
	c = a.matmul(b)
	loss = c.MSELoss(y)
	a.inputValue([
		[1,2],
		[3,4]])
	b.inputValue([
		[5,6],
		[7,8]])
	y.inputValue([
		[1,1],
		[1,1]])
	lr = 0.01
	l = 1
	while l<=0.0001:
		zerograd(zeroall=True)
		a.forward()
		loss.backward()
		b.step(lr)
		l = np.mean(loss.value)
	print(time()-st)

	st = time()
	print('Momentum:')
	a = Tensor((2,2))
	b = Tensor((2,2),way='Momentum')
	y = Tensor((2,2))
	c = a.matmul(b)
	loss = c.MSELoss(y)
	a.inputValue([
		[1,2],
		[3,4]])
	b.inputValue([
		[5,6],
		[7,8]])
	y.inputValue([
		[1,1],
		[1,1]])
	lr = 0.01
	b1 = 0.3
	while l<=0.0001:
		zerograd(zeroall=True)
		a.forward()
		loss.backward()
		b.step(lr,b1=b1)
		l = np.mean(loss.value)
	print(time()-st)



if __name__=='__main__':
	a = Tensor((2,2))
	b = Tensor((2,2),way='Momentum')
	y = Tensor((2,2))

	c = a.matmul(b)
	loss = c.MSELoss(y)

	a.inputValue([
		[1.,2],
		[3,4]])
	b.inputValue([
		[5.,6],
		[7,8]])
	y.inputValue([
		[1.,1],
		[1,1]])
	lr = 0.01
	b1 = 0.7
	b2 = 0.5
	for i in range(1000000):
		if i%10000==0:
			zerograd(zeroall=True)
			a.forward()
			loss.backward()
			b.step(lr,b1=b1,b2=b2)
			print(np.mean(loss.value))
	#print(loss)








	




























