import numpy as np
import math

#TODO case where we have left then K variable left.	
def select_variance(A_u,K,mu,Phi):
	delta_u = np.sum(Phi, axis=0)
	for i in A_u:
		delta_u[i]=float(0)
	
	S_u = np.argsort(delta_u)[:K]
	return S_u
	
	
#TODO case where we have left then K variable left.
#TODO optimize the disgusting coefficient computation.
def select_active(A, K, N, mu, Phi, Cov, nA):
	delta = []
	#Updating the Covariance matrix.
	for i in range(N):
		for j in range(N):
			Cov[i,j] = KL_divergence(mu[i], Phi[i], mu[j], Phi[j],K)
	
	#Then search the best S according to that Covariance matrix
	#We will grade every potential coordinates.
	for y in range(N):
		if y in nA:
			current_nA = nA[:]
			current_nA.remove(y)
			if A ==[]:
				up = 1.
			else:
				Sigma_yA = Cov[y,:][:,A]
				Sigma_AA = np.linalg.pinv(Cov[A,:][:,A])
				up = 1. - np.dot(Sigma_yA, np.dot(Sigma_AA, Sigma_yA))
			
			if current_nA==[]:
				down = 1.
			else:
				Sigma_ynA = Cov[y,:][:,current_nA]
				Sigma_nAnA = np.linalg.pinv(Cov[current_nA,:][:,current_nA])
				down = 1. - np.dot(Sigma_ynA, np.dot(Sigma_nAnA, Sigma_ynA))

			delta.append(up/down)
		else:
			delta.append(float(0))
	S = np.argsort(delta)[:K]
	return S
	
def KL_divergence(mu_i, Phi_i, mu_j, Phi_j, K):
	traces = 1/2*(np.sum(np.divide(Phi_j,Phi_i))+np.sum(np.divide(Phi_i,Phi_j)))
	delta_ij = mu_i-mu_j
	delta_ji = mu_j-mu_i
	quadra = 1/2*(np.dot(delta_ji,np.dot(np.diag(1/Phi_j),delta_ji))+np.dot(delta_ij,np.dot(np.diag(1/Phi_i),delta_ij)))
	determ = 1/2*(math.log(np.sum(Phi_i)/np.sum(Phi_j))+math.log(np.sum(Phi_j)/np.sum(Phi_i)))
	return math.exp(traces+quadra+determ-K)