import numpy as np
import pickle
from sys import argv
import utils as ut
import math
#we have mu_u, mu_v, rho_u, rho_v who are np.array of N_u*K or N_v*K

#Call to the reading function. Return a np.array of size N_u*N_v


#RMSE function
def RMSE(x , muU , muV , muAlpha , muBeta) : 
	count = 0
	s = 0.0
	N_u = x.shape[0]
	N_v = x.shape[1]
	
	for i in range(0,N_u) : 
		for j in range(0,N_v) : 
			if x[i,j] != -1 : 
				count = count + 1
				value = (x[i,j] - muAlpha[i] - muBeta[j] - np.dot(muU[:,i] , muV[:,j]))
				s = s + value * value
	E = math.sqrt(s/count)
	return E


#X = read()
script , filename = argv
infile = open(filename , "r")
print "Loading pickled dump file"
x = pickle.load(infile)
infile.close()

x = x[0:50,0:50]

N_u = x.shape[0]
N_v = x.shape[1]

print N_u , N_v

#Scale the X matrix by the maximum value to avoid over flow
m = x[0,0]
for i in range(0,N_u) : 
	for j in range(0,N_v) : 
		if x[i,j] > m : 
			m = x[i,j]
print "Max value = " , m
print "Scaling by max"
for i in range(0,N_u) : 
	for j in range(0,N_v) : 
		if x[i,j] != -1 :
			x[i,j] = x[i,j]/m


K = 20 #Here is our K value.
m = 5  #Here is our m value.


muU = np.ones((K,N_u),dtype=float)
sigSqU = np.ones((K,N_u),dtype=float)


muV = np.ones((K,N_v),dtype=float)
sigSqV = np.ones((K,N_v),dtype=float)

muAlpha = np.ones(N_u,dtype=float)
sigSqAlpha = np.ones(N_u,dtype=float)

muBeta = np.ones(N_v,dtype=float)
sigSqBeta = np.ones(N_v,dtype=float)

zeta1 = np.ones(K,dtype=float)
zeta2 = np.ones(K,dtype=float)


#PARAMETERS
eta = 2.5e-7 #learning rate
t = 0.9 #factor by which eta is increased every time step
SigmaSqEpsilon = 1e-6 #variance of the residual
tauSqAlpha = 1 #some parameter for alpha bias
tauSqBeta = 1 #some parameter for beta bias
kappa1 = 1e-6
kappa2 = 1e-6

A_u = []
A_v = []

epoch = 0
Cov_u = np.zeros((N_u,N_u),dtype=float)
Cov_v = np.zeros((N_v,N_v),dtype=float)


while(epoch < 1):
	print "Epoch = " , epoch
	#What's the complementary of our subset?
	nA_u = [i for i in range(N_u) if i not in A_u]
	nA_v = [i for i in range(N_v) if i not in A_v]


	#Selection of the best subset using active learning
	#S_u = ut.select_active(A_u, K, N_u, muU, sigSqU, Cov_u, nA_u)
	#S_v = ut.select_active(A_v, K, N_v, muV, sigSqV, Cov_v, nA_v)
	S_u = ut.select_variance(A_u , K , muU , sigSqU)
	S_v = ut.select_variance(A_v , K , muV , sigSqV)
	
			
	#NOW TIME TO DO THE UPDATES
	print "Updates!!!"
	###########################################################################################################################################
	for k in range(0,K) : 
		#First update muU and sigSqU
		for i in S_u : 
			sumMuj = 0
			sumSigj = 0
			muUi = muU[:,i]
			for j in S_v :
				muVj = muV[:,j]
				if x[i,j] != -1 : 
					sumMuj = sumMuj - (1/SigmaSqEpsilon) * ((x[i,j] - muAlpha[i] - muBeta[j] - np.dot(muUi,muVj)) * muV[k,j] + muU[k,i] * sigSqV[k,j])
					sumSigj = sumSigj + (1/SigmaSqEpsilon) * (muV[k,j] * muV[k,j] + sigSqV[k,j])
			muU[k,i] = muU[k,i] + eta * (muU[k,i] + sumMuj)
			sigSqU[k,i] = (1-eta) * sigSqU[k,i] + eta * (1/(1 + sumSigj))
	
		#Now update muV and sigSqV
		for j in S_v : 
			sumMui = 0
			sumSigi = 0
			muVj = muV[:,j]
			for i in S_u : 
				muUi = muU[:,i]
				if x[i,j] != -1 : 
					sumMui = sumMui - (1/SigmaSqEpsilon) * ((x[i,j] - muAlpha[i] - muBeta[j] - np.dot(muUi,muVj)) * muU[k,i] + muV[k,j] * sigSqU[k,i])
					sumSigi = sumSigi + (1/SigmaSqEpsilon) * (muU[k,i] * muU[k,i] + sigSqU[k,i])
			muV[k,j] = muV[k,j] + eta * (((muV[k,j] * zeta1[k])/zeta2[k]) + sumMui)
			sigSqV[k,j] = (1-eta) * sigSqV[k,j] + eta * (1/((zeta1[k]/zeta2[k]) + sumSigi))

		sumZ1 = 0
		sumZ2 = 0
		for j in S_v :
			sumZ1 = sumZ1 + K/2
			sumZ2 = sumZ2 + muV[k,j] * muV[k,j]
		zeta1[k] = (1-eta) * zeta1[k] + eta * (kappa1 + sumZ1)
		zeta2[k] = (1-eta) * zeta2[k] + eta * (kappa2 + 0.5 * sumZ2) 
			
	###########################################################################################################################################
		
	#Now perform the updates for muAlpha and sigSqAlpha
	for i in S_u : 
		muUi = muU[:,i]
		sumMui = 0
		sumSigi = 0
		for j in S_v : 
			muVj = muV[:,j]
			if x[i,j] != -1 : 
				sumMui = sumMui - (1/SigmaSqEpsilon) * (x[i,j] - muAlpha[i] - muBeta[j] - np.dot(muUi,muVj))
				sumSigi = sumSigi + (1/SigmaSqEpsilon)
		print "\n\nNon Updated muAlpha_i = ", muAlpha[i]
		muAlpha[i] = muAlpha[i] + eta * ((muAlpha[i]/tauSqAlpha) + sumMui)
		print "SumMui = ",sumMui
		print "Updated muAlpha_i = ", muAlpha[i]
		sigSqAlpha[i] = (1-eta) * sigSqAlpha[i] + eta * (1/(sumSigi + (1/math.sqrt(tauSqAlpha))))
	
	#Now perform the updates for muBeta and sigSqAlpha
	for j in S_v : 
		muVj = muV[:,j]
		sumMuj = 0
		sumSigj = 0
		for i in S_u : 
			muUi = muU[:,i]
			if x[i,j] != -1 : 
				sumMuj = sumMuj + (1/SigmaSqEpsilon) * (x[i,j] - muAlpha[i] - muBeta[j] - np.dot(muUi,muVj))
				sumSigj = sumSigj + (1/SigmaSqEpsilon)
		muBeta[j] = muBeta[j] + eta * ((muBeta[j]/tauSqBeta) + sumMuj)
		sigSqBeta[j] = (1-eta) * sigSqBeta[j] + eta * (1/(sumSigj + (1/math.sqrt(tauSqBeta))))
				
	
	#updating A
	A_u.extend(S_u)
	A_v.extend(S_v)
	
	eta = t * eta
	epoch = epoch + 1
	err = RMSE(x , muU , muV , muAlpha , muBeta)
	print "RMSE = ", err




