import random
from judge_perfor import Performance_Estimation_Strategy
from ma_config import _CONV_Space, _POOL_Space

import torch.nn

# 定义城市及其距离矩阵
num_cities = 20

# 参数设置
pop_size = 20
num_generations = 20
Cros_prob = 0.5
Muta_prob = 0.5
Local_search_prob = 0.5
Local_search_itr = 3
Data_compression_rate = 0.5
Training_set_prop = 0.5
Neural_nodes = 4
Initial_conv_chan = 16
Batch_size = 64
Epoch = 1
in_channel = 16
out_channel = 16

_CONV_STRING = {'a', 'b', 'c', 'd'}
_POOL_STRING = {'e', 'f', 'g', 'h'}


# 初始化种群
def init_population(pop_size, Neural_nodes, Con_Search_space, Pool_Search_space):
	population = []
	S_flow = int((3 + Neural_nodes) * Neural_nodes / 2)
	s = 0
	while s < pop_size:
		inits = []
		for i in range(2):
			Temp_flows = []
			for i in range(Neural_nodes):
				flows = get_conv_encode_flows(i, Conv_Space=Con_Search_space)
				Temp_flows.extend(flows)
			# if s < S_flow:
			new_flows = check_reassign_flow(Temp_flows, Neural_nodes, Con_Search_space)

			true_flows = get_pool_encode_flows(new_flows, Neural_nodes, Pool_Space=Pool_Search_space)
			inits.append(true_flows)
		population.append(inits)
		s += 1
	return population

def get_pool_encode_flows(flows, Neural_nodes, Pool_Space):
	temp = 0
	for i in range(Neural_nodes):
		_FA = []
		for j in range(temp, temp + i + 2):
			if flows[j] == 'FA':
				_FA.append(j)
		if len(_FA) != 0:
			flows[random.choice(_FA)] = Pool_Space.random_select()[0]
		temp = temp + i + 2
	return flows

def get_conv_encode_flows(node_i, Conv_Space):
	flows_node = []
	conv = Conv_Space.random_select()
	rn = random.randrange(0, node_i + 2)
	# every node has two bits
	for i in range(node_i + 2):
		if i == rn:
			flows_node.append(conv[0])
		else:
			flows_node.append('FA')
	return flows_node

# make sure one node has one flow
def check_reassign_flow(flows, nodes, Con_Search_space):
	temp = 0
	for i in range(nodes):
		_Fa = []
		for j in range(temp, temp + i + 2):
			if (flows[j] == 'FA'):
				_Fa.append(j)
		if len(_Fa) == i + 2:
			chan_num = random.randrange(temp, temp + i + 2)
			flows[chan_num] = Con_Search_space.random_select_NoFA()[0]
		temp = temp + i + 2

	return flows


def calculate_fitness(population,Neural_nodes,_conv,_pool):
	fitness_values = []
	for i in range(len(population)):
		fitness_values.append(Performance_Estimation_Strategy(population[i],Neural_nodes,1,_conv,_pool,False))
	return fitness_values

def separate_c_p(OF):
	OF_C = []
	OF_P = []
	for i in range(len(OF)):
		if OF[i] == 'FA':
			OF_C.append('FA')
			OF_P.append('FA')
			continue

		if OF[i] in _CONV_STRING:
			OF_C.append(OF[i])
			OF_P.append('FA')
		elif OF[i] in _POOL_STRING:
			OF_P.append(OF[i])
			OF_C.append('FA')


	return OF_C, OF_P

def count_fa(OF):
	OF_FA_INDEX = [index for index, value in enumerate(OF) if value == 'FA']
	return OF_FA_INDEX

# 交叉
def crossover(parent1, parent2, Cros_prob):
	temp = 0
	for i in range(Neural_nodes):
		rounded_value = Cros_prob
		while rounded_value == Cros_prob:
			# 生成一个从0到1之间的随机数
			random_value = random.uniform(0, 1)
			# 将随机数四舍五入到一位小数
			rounded_value = round(random_value, 1)
		# cross if exceed Cros_prob
		if rounded_value > Cros_prob:
			for j in range(temp, temp + i + 2):
				parent1[j], parent2[j] = parent2[j], parent1[j]
		temp = temp + i + 2

	return parent1, parent2
# 变异
def mutation(OF, Muta_prob, _operator, Neural_nodes, FA_INDEX):
	temp = 0
	for i in range(Neural_nodes):
		for j in range(temp, temp + i + 2):
			if OF[j] == 'FA':
				continue
			rounded_value = Muta_prob
			while rounded_value == Muta_prob:
				# 生成一个从0到1之间的随机数
				random_value = random.uniform(0, 1)
				# 将随机数四舍五入到一位小数
				rounded_value = round(random_value, 1)
			# change flow info if exceed Cros_prob otherwise change flow location
			if rounded_value > Muta_prob:
				OF[j] = _operator.random_select_NoFA()[0]
			else:
				OF = move_flow(OF, temp, i, FA_INDEX)
		temp = temp + i + 2
	return OF

def move_flow(OF, temp, i, FACOUNT_INDEX):
	FA_INDEX = []
	FLOW_INDEX = []
	new_OF = OF
	for j in range(temp, temp + i + 2):
		if OF[j] == 'FA':
			FA_INDEX.append(j)
		else:
			FLOW_INDEX.append(j)

	if len(FA_INDEX) == 0:
		return new_OF
	while len(FA_INDEX) != 0 and len(FLOW_INDEX) != 0:
		random_FA_Index = random.choice(FA_INDEX)
		random_FA_value_index = FA_INDEX.index(random_FA_Index)

		random_Flow_Index = random.choice(FLOW_INDEX)
		random_Flow_value_index = FLOW_INDEX.index(random_Flow_Index)
		# 交换
		if random_FA_Index in FACOUNT_INDEX:
			OF[random_Flow_Index], OF[random_FA_Index] = OF[random_FA_Index], OF[random_Flow_Index]

		FA_INDEX.pop(random_FA_value_index)
		FLOW_INDEX.pop(random_Flow_value_index)
	return new_OF

def merge_c_p(OF_C, OF_P):
	new_of = []
	for i in range(len(OF_C)):
		if OF_C[i]=='FA' and OF_P[i]=='FA':
			new_of.append('FA')
		elif OF_C[i] != 'FA' and OF_P[i] != 'FA':
			print('位置重合')
			print("重合字符为")
			print(OF_C)
			print(OF_P)
			print("重合位置为")
			print(i)
			raise ValueError("发生了位置重合错误")
		elif OF_C[i]!='FA' or OF_P[i]!='FA':
			new_of.append(OF_P[i] if OF_P[i]!='FA' else OF_C[i])
	return new_of

def merge_n_r(individual,flag,OF):
	if flag in individual:
		origin_index = 1 if individual[0] == flag else 0
	else:
		raise ValueError("Parameter not found in list")
	return [OF,individual[origin_index]] if origin_index == 1 else [individual[origin_index],OF]


def split_columns(data):
	"""
	将二维列表按列拆分为两个列表。

	参数:
	data (list of lists): 输入的二维列表。

	返回:
	tuple: 包含两个列表的元组，第一个列表包含所有行的第一列元素，第二个列表包含所有行的第二列元素。
	"""
	col1 = [row[0] for row in data]
	col2 = [row[1] for row in data]

	return col1, col2

def roulette_wheel_selection(population, fitness_values, num_selections):
	"""
	轮盘选择算法。

	参数：
	population: list，种群中的个体列表
	fitness_values: list，对应每个个体的适应度值列表
	num_selections: int，要选择的个体数量

	返回：
	selected_individuals: list，被选择的个体列表
	"""

	# 计算总适应度
	total_fitness = sum(fitness_values)

	# 计算个体的选择概率
	selection_probs = [fitness / total_fitness for fitness in fitness_values]

	# 计算累积概率
	cumulative_probs = []
	cumulative_sum = 0
	for prob in selection_probs:
		cumulative_sum += prob
		cumulative_probs.append(cumulative_sum)
	# 选择个体
	selected_individuals = []
	for _ in range(num_selections):
		rand = random.random()  # 生成一个0到1之间的随机数
		for i, cumulative_prob in enumerate(cumulative_probs):
			if rand <= cumulative_prob:
				selected_individuals.append(population[i])
				break

	return selected_individuals

def elitism_selection(population, fitness_scores, elite_size):
	"""
	精英选择方法。

	参数:
	population (list): 当前种群，包含所有个体的列表。
	fitness_scores (list): 与种群中的个体相对应的适应度分数列表。
	elite_size (int): 精英个体的数量。

	返回:
	list: 经过精英选择后保留的个体。
	"""
	# 将适应度分数与种群个体配对，并按适应度从大到小排序
	sorted_population = sorted(zip(fitness_scores,population), reverse=True)

	# 选择前 elite_size 个个体及其适应度值
	elites = sorted_population[:elite_size]

	return split_columns(elites)

def tournament_selection(population, fitness_scores, tournament_size,n):
	"""
	锦标赛选拔方法。

	参数:
	population (list): 当前种群，包含所有个体的列表。
	fitness_scores (list): 与种群中的个体相对应的适应度分数列表。
	tournament_size (int): 每次锦标赛的个体数量。

	返回:
	object: 锦标赛中选出的个体。
	"""
	winner = []
	# 随机选择若干个个体组成锦标赛子集
	for i in range(n):
		tournament_indices = random.sample(range(tournament_size,len(population)), tournament_size)
		tournament = [(fitness_scores[i], population[i]) for i in tournament_indices]
		# 从子集中选择适应度最好的个体
		winner.append(max(tournament, key=lambda x: x[0]))
	return split_columns(winner)

# 全局搜索
def globle_search(population, all_fitness, _conv,Muta_prob,Neural_nodes):
	OFs = roulette_wheel_selection(population, all_fitness, 2)
	Init_cell = []
	Normal_cell = []
	Reduce_cell = []
	for i in range(len(OFs[0])):
		OF_1, OF_2 = crossover(OFs[0][i], OFs[1][i], Cros_prob)
		OF_1_C, OF_1_P = separate_c_p(OF_1)
		OF_2_C, OF_2_P = separate_c_p(OF_2)
		# 得到OF_P中的FA的index 确保后续的mutation移动卷积算子位置时不会和OF_P中的池化算子重合
		FAP1_INDEX = count_fa(OF_1_P)
		FAP2_INDEX = count_fa(OF_2_P)

		NEW_OF_1_C = mutation(OF_1_C, Muta_prob, _conv, Neural_nodes, FAP1_INDEX)
		NEW_OF_2_C = mutation(OF_2_C, Muta_prob, _conv, Neural_nodes, FAP2_INDEX)
		OF_1 = merge_c_p(NEW_OF_1_C, OF_1_P)
		OF_2 = merge_c_p(NEW_OF_2_C, OF_2_P)
		if i ==0:
			Normal_cell.append(OF_1)
			Normal_cell.append(OF_2)
		else:
			Reduce_cell.append(OF_1)
			Reduce_cell.append(OF_2)
	for i in range(len(Normal_cell)):
		Init_cell.append([Normal_cell[i],Reduce_cell[i]])
	return Init_cell

# 局部搜索
def local_search(individual, itro,Local_search_prob,_pool,_conv,Neural_nodes):
	OF_P_best = individual
	fite_ness_best = None
	for indiv in individual:
		OF_C,OF_P = separate_c_p(indiv)
		FAC_INDEX = count_fa(OF_C)
		fite_ness_best = Performance_Estimation_Strategy(OF_P_best,Neural_nodes,1,_conv,_pool,False)
		for i in range(itro):
			OF_P_temp = mutation(OF_P, Local_search_prob, _pool, Neural_nodes, FAC_INDEX)
			OF_P_temp = merge_c_p(OF_C, OF_P_temp)
			new_indivi = merge_n_r(individual,indiv,OF_P_temp)
			fite_ness_temp=Performance_Estimation_Strategy(new_indivi,Neural_nodes,1,_conv,_pool,False)
			if fite_ness_temp >= fite_ness_best:
				OF_P_best = new_indivi
				fite_ness_best = fite_ness_temp
	return OF_P_best,fite_ness_best

def get_new_population(population,fitness,size):
	new_population = []
	new_fitness = []
	half_befor = int(size/2)
	els_fit,els_pop = elitism_selection(population,fitness,half_befor)
	tour_fit,tour_pop = tournament_selection(population,fitness,half_befor,size)

	new_population.extend(els_pop)
	new_population.extend(tour_pop)

	new_fitness.extend(els_fit)
	new_fitness.extend(tour_fit)
	return new_population,new_fitness

# 主函数
def memetic_algorithm(pop_size, Neural_nodes, num_generations):
	_conv = _CONV_Space(in_channel, out_channel)
	_pool = _POOL_Space()

	population = init_population(pop_size, Neural_nodes, _conv, _pool)
	new_population = population
	original_pop = population

	all_fitness = calculate_fitness(new_population,Neural_nodes,_conv,_pool)
	new_fitness = all_fitness

	for generation in range(num_generations):
		print(f"generation is : {generation}")
		# fitnesses = [calculate_fitness(ind, distances) for ind in population]
		# for _ in range(pop_size):
		# 	# 两个亲本解
		# 	individuals = globle_search(new_population, new_fitness, _conv,Muta_prob,Neural_nodes)
		# 	print(f"globle individuals is : {individuals}")
		# 	for individual in individuals:
		# 		OF_P_best,fitness_best = local_search(individual,Local_search_itr,Local_search_prob,_pool,_conv,Neural_nodes)
		# 		original_pop.append(OF_P_best)
		# 		all_fitness.append(fitness_best)
		# 	new_population,new_fitness = get_new_population(original_pop,all_fitness,pop_size)
		# 	print(f"new_population is : {new_population}")
		# 	print(f"new_fitness is : {new_fitness}")
		individuals = globle_search(new_population, new_fitness, _conv,Muta_prob,Neural_nodes)
		print(f"globle individuals is : {individuals}")
		for individual in individuals:
			OF_P_best,fitness_best = local_search(individual,Local_search_itr,Local_search_prob,_pool,_conv,Neural_nodes)
			original_pop.append(OF_P_best)
			all_fitness.append(fitness_best)
		new_population,new_fitness = get_new_population(original_pop,all_fitness,pop_size)
		print(f"new_population is : {new_population}")
		print(f"new_fitness is : {new_fitness}")
	my_fitness = []
	for individual in new_population:
		fitness = Performance_Estimation_Strategy(individual,Neural_nodes,5,_conv,_pool,True)
		my_fitness.append(fitness)

# 运行算法
if __name__ == "__main__":
	memetic_algorithm(pop_size, Neural_nodes, num_generations)
