import numpy as np
import math
from numpy.linalg import eigvalsh
from numpy.linalg import norm
from numpy import tile
from scipy.linalg import block_diag
from sympy import symarray,solve
import os


def agentFunction(K_f):
	global w,W
	global N,K
	global bias_v_n
	global B,u_b
	
	if os.path.exists("./1w_%d_%d.npy"%(K_f,N))==True:
		w=np.load("./1w_%d_%d.npy"%(K_f,N))
		W=np.load("./2W_%d_%d.npy"%(K_f,N))
		bias_v_n=np.load("./vnbias_%d_%f_%d.npy"%(B,u_b,N))


# 将任务相似度高的节点划分到同一个分区中
def Lipschitz_Convexity():
	global W
	M=0
	m=100000
	N_m=[]
	for n in range(0,N):
		W_n=np.dot(W[n,:,:].T,W[n,:,:])
		eigenvalue=eigvalsh(W_n)

		M_n=np.max(eigenvalue)
		m_n=np.min(eigenvalue)
		
		N_m.append(m_n)
		
		if M_n>M:
			M=M_n
		
		if m_n<m:
			m=m_n
		
	return M,m,N_m


def graph(num_zone):
	adjacency_matrix=np.ones(num_zone)-np.eye(num_zone)
	degree_matrix=np.diag(np.sum(adjacency_matrix,axis=1))
	laplacian_matrix=degree_matrix-adjacency_matrix
	signless_laplacian_matrix=degree_matrix+adjacency_matrix
	
	return laplacian_matrix,signless_laplacian_matrix,degree_matrix


def hypergraph(num_node,num_zone,N_m):
	A_N=np.eye(num_node)
# 	np.random.shuffle(A_N)
# 	s_N_m = sorted(enumerate(N_m), key=lambda N_m:N_m[1])
# 	s_index = [s[0] for s in s_N_m]
# 	A_N=A_N[s_index,:]
	
	c=math.floor(num_node/num_zone)
	C_l_1=tile(np.ones((c+1,1)),num_node-c*num_zone)
	C_l_2=tile(np.ones((c,1)),num_zone-num_node+c*num_zone)

	C_L=[]
	for i in range(0,num_node-c*num_zone):
		C_L=block_diag(C_L,C_l_1[:,i].reshape(-1,1))
	for i in range(0,num_zone-num_node+c*num_zone):
		C_L=block_diag(C_L,C_l_2[:,i].reshape(-1,1))
	C_L=np.delete(C_L,0,axis=0)
	
	return C_L,A_N


def graph_const(num_zone):
	l_max=2*num_zone-2
	l_min=num_zone
	return l_max,l_min


def penalty_const(num_node,num_zone):
	l_max,l_min=graph_const(num_zone)
	M,m,N_m=Lipschitz_Convexity()
	
	c=math.floor(num_node/num_zone)
	
	M_m=M/m
	l=l_max/l_min
# 	tau=M*math.sqrt(l+1)
# 	print(tau)
# # 	rho=tau
# 	
# 	rho=10*math.sqrt(c*M/l_min*(M_m+2*math.sqrt(1+l)))
# 	print(rho)

# 	tau=M*math.sqrt((l_max+c)/l_min+1)
	rho=tau=1
	print(tau)
	print(rho)

	return tau,rho,N_m


def func_value(v_vector):
	func_value=0
	for n in range(0,N):
		func_value=func_value+1/2*math.pow(norm(w[n,:]-np.dot(W[n,:,:],v_vector[n,:])), 2)
	return func_value


def agent_v_update(z_vector,x_vector):
	global A_N,C_L
	global tau
	
	v_new_vector=symarray('v',(N,K))
	
	v_z_vector=np.dot(np.dot(A_N.T,C_L),z_vector)

	v_new_update=np.zeros((N,K))
	for n in range(0,N):
		W_n=np.dot(W[n,:,:].T,W[n,:,:])
		equation_n=np.dot((W_n+tau*np.eye(K)),v_new_vector[n,:])-(np.dot(W[n,:,:].T,w[n,:])+tau*v_z_vector[n,:]-x_vector[n,:])
		v_new_update_struct=solve(equation_n)
		
		keys=list(v_new_update_struct.keys())
		for k in range(0,K):
			v_new_update[n,k]=v_new_update_struct[keys[k]]
	
	return v_new_update


def agent_x_update(v_new_vector,z_new_vector,x_vector):
	x_z_vector=np.dot(np.dot(A_N.T,C_L),z_new_vector)
	
	x_new_update=x_vector+tau*(v_new_vector-x_z_vector)
	return x_new_update


# 先使用 CTM 来进行聚合，再分区
# 数量大的情况下，CTM 效果好
def aggregation_rule(v_new_vector,b):
	if b==0:
		return v_new_vector
	
	for k in range(0,K):
		max_idx=v_new_vector[:,k].argsort()[-b:][::-1]
		min_idx=v_new_vector[:,k].argsort()[:b]
		
		mean=np.mean(np.delete(v_new_vector[:,k],np.concatenate((max_idx, min_idx))))
		
		v_new_vector[max_idx,k]=mean
		v_new_vector[min_idx,k]=mean
		
	return v_new_vector


def coordinator_u_update(v_new_vector,z_new_vector,u_vector):
	u_v_vector=np.dot(np.dot(C_L.T,A_N),v_new_vector)
	u_z_vector=np.dot(np.dot(C_L.T,C_L),z_new_vector)
	
	u_new_update=u_vector+tau*(u_v_vector-u_z_vector)
	return u_new_update


def coordinator_z_update(v_new_vector,z_vector,u_vector,r_vector):
	global L,K
	global UL_L,C_L,D_L
	global rho
	
	z_new_vector=symarray('z',(L,K))
	
	N=np.dot(C_L.T,C_L)
	
	z_v_vector=np.dot(np.dot(C_L.T,A_N),v_new_vector)
	z_z_vector=np.dot(UL_L,z_vector)
	
	z_new_update=np.zeros((L,K))
	for l in range(0,L):
		equation_l=np.dot((tau*N[l,l]+2*rho*D_L[l,l]),z_new_vector[l,:])-(tau*z_v_vector[l,:]+u_vector[l,:]+rho*z_z_vector[l,:]-r_vector[l,:])
		print(equation_l)
		
		z_new_update_struct=solve(equation_l)
		
		keys=list(z_new_update_struct.keys())
		for k in range(0,K):
			z_new_update[l,k]=z_new_update_struct[keys[k]]
	
	return z_new_update


def coordinator_r_update(z_new_vector,r_vector):
	global OL_L
	r_new_update=r_vector+rho*np.dot(OL_L,z_new_vector)
	return r_new_update


def convergence_experiments(t,r,K_f,L,b):
	global tau,rho
	global OL_L,UL_L,D_L
	global v_n,x_n
	global A_N,C_L
	
	tau,rho,N_m=penalty_const(N,L)
	tau=t*tau
	rho=r*rho
	
	C_L,A_N=hypergraph(N,L,N_m)
	OL_L,UL_L,D_L=graph(L)
	
	print("%f_%f_%d_%d"%(tau,rho,L,K_f))

	honest_func=np.zeros(iteration)
	
	honest_v_n=np.zeros((N,K,iteration))
	honest_z_l=np.zeros((L,K,iteration))
	honest_x_n=np.zeros((N,K,iteration))
	honest_u_l=np.zeros((L,K,iteration))
	honest_r_l=np.zeros((L,K,iteration))
	
	for k in range(0,iteration-1):
		print("%d\n"%(k))
		honest_v_n[:,:,k+1]=agent_v_update(honest_z_l[:,:,k],honest_x_n[:,:,k])
		
		honest_func[k+1]=func_value(honest_v_n[:,:,k+1])
		print("%f\n"%(honest_func[k+1]))
		
		if abs(honest_func[k+1]-honest_func[k])<10**(-15):
			break;
		
		honest_z_l[:,:,k+1]=coordinator_z_update(honest_v_n[:,:,k+1],honest_z_l[:,:,k],honest_u_l[:,:,k],honest_r_l[:,:,k])
		
		honest_x_n[:,:,k+1]=agent_x_update(honest_v_n[:,:,k+1],honest_z_l[:,:,k+1],honest_x_n[:,:,k])
		
		honest_u_l[:,:,k+1]=coordinator_u_update(honest_v_n[:,:,k+1],honest_z_l[:,:,k+1],honest_u_l[:,:,k])
		
		honest_r_l[:,:,k+1]=coordinator_r_update(honest_z_l[:,:,k+1],honest_r_l[:,:,k])
		
	np.savez("./convergence%d_%f_%f_%d_%d_%d.npz"%(b,t,r,L,K_f,N),honest_v_n=honest_v_n,honest_func=honest_func)


def robust_experiments(B,t,r,K_f,u_b,L,b):
	global tau,rho
	global OL_L,UL_L,D_L
	global v_n,x_n
	global A_N,C_L

	tau,rho,N_m=penalty_const(N,L)
	tau=t*tau
	rho=r*rho

	C_L,A_N=hypergraph(N,L,N_m)
	OL_L,UL_L,D_L=graph(L)
	
	print("%f_%f_%d_%d"%(tau,rho,L,K_f))
	
	honest_func=np.zeros(iteration)
	
	honest_v_n=np.zeros((N,K,iteration))
	honest_z_l=np.zeros((L,K,iteration))
	honest_x_n=np.zeros((N,K,iteration))
	honest_u_l=np.zeros((L,K,iteration))
	honest_r_l=np.zeros((L,K,iteration))
	
	for k in range(0,iteration-1):
		
		print("%d\n"%(k))
		honest_v_n[:,:,k+1]=agent_v_update(honest_z_l[:,:,k],honest_x_n[:,:,k])
		
		honest_func[k+1]=func_value(honest_v_n[:,:,k+1])
		print("%f\n"%(honest_func[k+1]))
		
		if abs(honest_func[k+1]-honest_func[k])<10**(-15):
			break;
		
		byzantine_v_n=honest_v_n[:,:,k+1]+bias_v_n[:,:,k+1]
		
		ctm_v_n=aggregation_rule(byzantine_v_n,b)
		
		honest_z_l[:,:,k+1]=coordinator_z_update(ctm_v_n,honest_z_l[:,:,k],honest_u_l[:,:,k],honest_r_l[:,:,k])
		
		honest_x_n[:,:,k+1]=agent_x_update(honest_v_n[:,:,k+1],honest_z_l[:,:,k+1],honest_x_n[:,:,k])
		
		honest_u_l[:,:,k+1]=coordinator_u_update(ctm_v_n,honest_z_l[:,:,k+1],honest_u_l[:,:,k])
		
		honest_r_l[:,:,k+1]=coordinator_r_update(honest_z_l[:,:,k+1],honest_r_l[:,:,k])
	
	np.savez("./robust%d_%d_%f_%f_%f_%d_%d_%d.npz"%(b,B,u_b,t,r,L,K_f,N),honest_v_n=honest_v_n,honest_func=honest_func)

t=1
r=1

global N,K,L
N=1000
K=3

global iteration
iteration=4000

global w,W
w=np.zeros((N,K))
W=np.zeros((N,K,K))

global bias_v_n
bias_v_n=np.zeros((N,K,iteration))

global B,u_b
u_b=0.1
B=int(0.3*N)
b=B



K_f=300
agentFunction(K_f)



L=1
# convergence_experiments(t,r,K_f,L,b)
robust_experiments(B,t,r,K_f,u_b,L,b)
# convergence_experiments(t,r,K_f,L,0)