import numpy as np
import scipy.linalg as linalg
import imp

def gradientDescent(x, y, stupenPolynomu, presnost, alpha):
	'''
	Prolozeni dat polynomem N-TEHO stupne:
	y=theta[0]*x^0+theta[1]*x^1+...+theta[n]*x^n=np.dot(THETA,X)
	Parametry hleda metodou gradient descent. Vrati pole parametru theta a chyby E v kazde iteraci + pocet iteraci.
	pouziti:
	[theta,E,it]=gradientDescent.gradientDescent(x, y, 3, 1, [1,1,1,1])
	'''
	#standardizace dat
	xMean=np.mean(x)
	xStd=np.std(x)
	xS=(x-xMean)/xStd

	THETA=[] #list of theta for each iteration
	E=[]
	it=0
	rozsah=1
	
	THETA.append(np.zeros(stupenPolynomu+1))
	X=np.zeros((stupenPolynomu+1,len(x)))
	XS=np.zeros((stupenPolynomu+1, len(xS)))
        for i in range(0,stupenPolynomu+1):
                THETA[0][i]=np.random.randn()*rozsah	#co radek to iterace
                X[i]=pow(x,i)				#co radek to x^i
        	XS[i]=pow(xS,i)
	
#	print(THETA[it])	

	def countError():	#pocatecni chyba
		#return sum(sum(abs(k[it]*x-y)))
		return 1.0/(2*len(xS))*sum((np.dot(THETA[it],XS)-y)**2)

	E.append(countError())
	while True:
		it+=1
		#simultaneous update
		THETA.append(np.zeros(stupenPolynomu+1))
        	for i in range(0,stupenPolynomu+1):
                	THETA[it][i]=\
			THETA[it-1][i]-(alpha*1.0/len(xS))\
			*sum((np.dot(THETA[it-1],XS)-y)*XS[i])
#		print(THETA[it])	
		E.append(countError())
	
		if(E[it]>E[it-1]):
			print('algoritmus diverguje, nastavte mensi alpha')
			break
		
		if((E[it-1]-E[it])<presnost):
			print('Splnena presnost. Chyba: '+str(round(E[it],2)))
			break

	thetaS=np.array(THETA) #casting list values into numpy array	

	#prevod ze standardnich koeficientu na puvodni	
	Xinv=linalg.pinv(X)
	theta=np.dot(np.dot(thetaS,XS),Xinv)

	return theta, E, it, thetaS, XS

def gradientDescentMultivar(X, y, presnost, alpha):
	'''
	Prolozeni dat polynomem N-TEHO stupne:
	y=theta[0]*x^0+theta[1]*x^1+...+theta[n]*x^n=np.dot(THETA,X)
	Parametry hleda metodou gradient descent. Vrati pole parametru theta a chyby E v kazde iteraci + pocet iteraci.
	pouziti:
	[theta,E,it]=gradientDescent.gradientDescent(x, y, 3, 1, [1,1,1,1])
	'''
	#standardizace dat
	XS=np.zeros(X.shape)
	XS[0]=X[0]
	for i in range(1,X.shape[0]):
		xMean=np.mean(X[i])
		xStd=np.std(X[i])
		XS[i]=(X[i]-xMean)/xStd
	
	THETA=[] 	#python list of theta for each iteration
	E=[]			#python list of error for each iteration
	it=0
	rozsah=1
	
	THETA.append(np.random.rand(X.shape[0]))	#first random theta
	def countError(XS,y,THETA,it):	#pocatecni chyba
		return 1.0/(2*XS.shape[1])*sum((np.dot(THETA[it],XS)-y)**2)

	E.append(countError(XS,y,THETA,it))
	
	while True:
		it+=1
		#simultaneous update
		THETA.append(np.zeros(XS.shape[0]))
		for i in range(0,XS.shape[0]):
			THETA[it][i]=\
			THETA[it-1][i]-(alpha*1.0/XS.shape[1])\
			*sum((np.dot(THETA[it-1],XS)-y)*XS[i])
		E.append(countError(XS,y,THETA,it))
		
		if(E[it]>E[it-1]):
			print('algoritmus diverguje, nastavte mensi alpha')
			break
		
		if((E[it-1]-E[it])<presnost):
			print('Splnena presnost. Chyba: ' + str(round(E[it],2)) + '. Iteraci: ' + str(it))
			break

	thetaS=np.array(THETA) #casting list values into numpy array	

	#prevod ze standardnich koeficientu na puvodni	
	Xinv=linalg.pinv(X)
	theta=np.dot(np.dot(thetaS,XS),Xinv)

	return theta, E, it, thetaS, XS
	
def puleniIntervalu(x, y, presnost, krok):
	'''
	Prolozeni dat polynomem PRVNIHO stupne (y=ax+b). Parametry hleda metodou puleni intervalu. Vrati pole parametru theta a chyby E v kazde iteraci + pocet iteraci.
	pouziti:
	[theta,E,it]=regrese.puleniIntervalu(x, y, 1, [1,1])
	'''
	theta=[]#python list
	E=[]
	it=0
	theta.append(np.random.rand(2)*1)
	def countError():
		#return sum(sum(abs(k[it]*x-y)))
		return 1.0/(2*len(x))*sum(((theta[it][0]*x+theta[it][1])-y)**2)

	E.append(countError())
	
	while True:
		it+=1
		theta.append([theta[it-1][0]+krok[0],theta[it-1][1]])
		Ep=countError()
	#	print('==========it: '+str(it)+' ===========')
	#	print(theta[it-1])
	#	print(theta[it])
	#	print(Ep)
		if(Ep>=E[it-1]):
			krok[0]*=-0.5

		theta[it]=([theta[it-1][0],theta[it-1][1]+krok[1]])
		Ep=countError()
	#	print(theta[it-1])
	#	print(theta[it])
	#	print(Ep)
		if(Ep>=E[it-1]):
			krok[1]*=-0.5
	
		theta[it]=([theta[it-1][0]+krok[0],theta[it-1][1]+krok[1]])
		E.append(countError())
	#	print(theta[it-1])
	#	print(theta[it])
	#	print(E[it])
	#	raw_input()
		if(abs(E[it]-E[it-1])<presnost):
			break
	
	return theta, E, it
