import numpy as np
from time import time
import sys
sys.path.append("..")
import tensor as ts
from tensor import Tensor
import nn
from optimizer import Adam

class A:
	def __init__(self,value):
		self.value = value

	def __mul__(self,b):
		return self.value*b.value

def func(*args,arg=False):
	print(args)
	print(arg)

if __name__=='__main__':
	x = Tensor((60,784))
	y = Tensor((60,10))
	lossFunc = nn.softmax_crossEntropy()
	x.random()
	y.random()
	out = nn.linear(784,10)(x)
	loss = lossFunc(out,y)
	optimizer = Adam(0.2)

	for i in range(10):
		optimizer.zeroGrad()
		x.forward()
		loss.backward()
		optimizer.step()
		loss.show_mean()

	# a = np.array([
	# [[[1.,2,3,4],
	# [5,6,7,8],
	# [9,1,2,3],
	# [4,5,6,7]],
	# [[1.,2,3,4],
	# [5,6,7,8],
	# [9,1,2,3],
	# [4,5,6,7]]],


	# [[[1.,2,3,4],
	# [5,6,7,8],
	# [9,1,2,3],
	# [4,5,6,7]],
	# [[1.,2,3,4],
	# [5,6,7,8],
	# [9,1,2,3],
	# [4,5,6,7]]],


	# [[[1.,2,3,4],
	# [5,6,7,8],
	# [9,1,2,3],
	# [4,5,6,7]],
	# [[1.,2,3,4],
	# [5,6,7,8],
	# [9,1,2,3],
	# [4,5,6,7]]]
	# ])

	
	# A = Tensor(a.shape)
	# A.inputValue(a)
	# A.dropout(0.5)
	# print(A)
	# out = A.maxPool2d()
	# A.forward()
	# out.grad = np.ones(out.shape)*2
	# out.backward()

	# print(A.grad)	


	







