# a=10
# print(a)
#
# b=10
# c=20
# b,c=c,b
# print(b,c)

#打印
# for m in range(1,10):
#     print(m)


# a=[1,3,5,7]
# a.append(12)
# print(a)
# a.append('hhhhhh')
# print(a)
# a.insert(3,'hello')
# print(a)

# c=[2,4,6,8]
# print(5 in c)
# print(6 in c)
# print(1 not in c)
# print(2 not in c)
#
# print(c.index(6))
# print(c.index(8))

# a=[1,2,3,4]
# print(a[0])
# print(a[-1]) #取倒数第x个数字
# a.append(10)
# print(a)
# str='hello'
# str +='skdksfdwef'
# print(str)

# print(2==3)
# print(1>0)

# x1='hello world'
# x1+=' what???'
# x1+=' happened???'
# x1+=' haoa'
# print(x1)

# List={'小黑','小红','小白','小蓝','小绿'}
# print(List)

# x={1:3,2:1,3:1} #定义字典
# y={1,3,4}       #定义集合
# print(x.keys()-y)

# alist=[24,5,4,2]
# print(alist.insert(1,13))
# s= {1:'s',2:'d',3:'v'}
# print(alist.extend(s))
# print(alist)
#extend()用法，将多个元素值追加到列表末尾，可以是字符串/列表/元组/字典/集合
# print(alist.remove(4))  #remove(x)删除元素x
# print(f'before:alist:{alist}')
# print(alist.pop(1))     #pop(x)删除下标x对应元素
# print(f'after:alist:{alist}')
# s='hello'
# print(s[1]

# import heapq
# import copy
# def manhattan_distance(state, goal_map):
#     #计算曼哈顿距离
#     distance = 0
#     for i in range(len(state)):
#         for j in range(len(state[0])):
#             value = state[i][j]
#             if value != 0:
#                 goal_pos = goal_map[value]
#                 distance += abs(i - goal_pos[0]) + abs(j - goal_pos[1])
#
#     return distance
#
# def index_mapping_2d(lst):
#
#     index_map = {}
#     for i in range(len(lst)):
#         for j in range(len(lst[i])):
#             val = lst[i][j]
#             index_map[val] = (i, j)
#     return index_map
#
# def find_element_in_2d_list(arr, target):
#
#     for i in range(len(arr)):
#         for j in range(len(arr[i])):
#             if arr[i][j] == target:
#                 return i, j
#     return None
#
#
# def AStar(initial_state, goal_state):
#     print("__开始进行AStar搜索__")
#     goal_map = index_mapping_2d(goal_state)
#     frontier = []  # 待探索节点（边界），以空的优先队列实现，成本低的先探索
#     heapq.heappush(frontier, (0, initial_state))
#     came_from = dict()  # 记录每个节点的前驱节点
#     min_cost = dict()  # 记录目前位置探索过的节点的最小成本
#     initial_state_tuple = tuple(map(tuple, initial_state))
#     came_from[initial_state_tuple] = None  # 起始状态的前驱状态设置为0
#     min_cost[initial_state_tuple] = 0  # 到达起始状态的成本设置为0
#     zero_move_direcs = [[-1, 0], [1, 0], [0, -1], [0, 1]]  # 0的移动方向
#     while frontier:  # 进行探索，直到 frontier 中没有待探索的状态
#         _, current = heapq.heappop(frontier)  # 探索优先级最高的状态
#         if current == goal_state:
#             break
#         # 遍历当前状态的相邻状态
#         current_state_tuple = tuple(map(tuple, current))  # 当前状态转为tuple以便哈希
#         x_zero, y_zero = find_element_in_2d_list(current, 0)  # 找到0所在位置
#         for direc in zero_move_direcs:
#             # 计算下一个状态0所在的位置
#             new_x_zero = x_zero + direc[0]
#             new_y_zero = y_zero + direc[1]
#             # 检查该状态0的位置是否合法
#             if new_x_zero < 0 or new_y_zero < 0 or new_x_zero >= len(initial_state) or new_y_zero >= len(
#                     initial_state[0]):
#                 continue
#             # 计算从起始状态到next状态的成本，这里由于0不管往哪个方向移动成本都一致，所以next状态成本直接+1即可
#             new_cost = min_cost[current_state_tuple] + 1
#             next = copy.deepcopy(current)
#             # 将0移动到下一个状态的位置
#             next[new_x_zero][new_y_zero], next[x_zero][y_zero] = next[x_zero][y_zero], next[new_x_zero][new_y_zero]
#             next_state_tuple = tuple(map(tuple, next))
#             if next_state_tuple not in min_cost or new_cost < min_cost[next_state_tuple]:
#                 # 更新next状态的成本
#                 min_cost[next_state_tuple] = new_cost
#                 # 使用曼哈顿距离计算next的启发式估计成本（initial到next的准确成本 + next到goal的估计成本）
#                 priority_cost = new_cost + manhattan_distance(next, goal_map)
#                 # 将next状态以计算出的启发式估计成本加入优先队列中
#                 heapq.heappush(frontier, (priority_cost, next))
#                 came_from[next_state_tuple] = tuple(map(tuple, current))
#     return came_from
#
# def build_path(initial_state, goal_state, came_from):
#     # 将二维列表转换为元组
#     initial_state_tuple = tuple(map(tuple, initial_state))
#     goal_state_tuple = tuple(map(tuple, goal_state))
#     current_tuple = goal_state_tuple
#     path = []
#     have_solution = True
#
#     # 回溯找到路径
#     while current_tuple != initial_state_tuple:
#         if goal_state_tuple not in came_from:
#             have_solution = False
#             print("无解")
#             break
#         else:
#             path.append(current_tuple)
#             current_tuple = came_from[current_tuple]
#
#     # 如果有解，则输出路径
#     if have_solution:
#         path.append(initial_state_tuple)
#         path.reverse()
#         step = 0
#         for state in path:
#             print(str(step))
#             step += 1
#             for row in state:
#                 print(row)
#             print()
#
# # 初始状态
# start_state = [
#     [2,8,3],
#     [1,6,4],
#     [7,0,5]
# ]
# # 目标状态
# goal_state = [
#     [1,2,3],
#     [8,0,4],
#     [7,6,5]
# ]
# # 使用A*算法求解路径
# came_from = AStar(start_state, goal_state)
# # 进行路径构建
# build_path(start_state, goal_state, came_from)
#
# ##################################################################################################################
# import numpy as np
# import tensorflow as tf
# from tensorflow import keras
# from tensorflow.keras import layers
# import matplotlib.pyplot as plt
#
# # 设置随机种子
# tf.random.set_seed(42)
#
# # 超参数
# latent_dim = 100
# num_epochs = 10000
# batch_size = 128
# sample_interval = 1000
#
# # 加载 MNIST 数据集
# (X_train, _), (_, _) = keras.datasets.mnist.load_data()
# X_train = X_train / 255.0  # 归一化到 [0, 1]
# X_train = np.expand_dims(X_train, axis=-1)  # 增加通道维度
#
# # 构建生成器
# def build_generator():
#     model = keras.Sequential()
#     model.add(layers.Dense(256, activation='relu', input_dim=latent_dim))
#     model.add(layers.Dense(512, activation='relu'))
#     model.add(layers.Dense(1024, activation='relu'))
#     model.add(layers.Dense(28 * 28 * 1, activation='tanh'))
#     model.add(layers.Reshape((28, 28, 1)))
#     return model
#
# # 构建判别器
# def build_discriminator():
#     model = keras.Sequential()
#     model.add(layers.Flatten(input_shape=(28, 28, 1)))
#     model.add(layers.Dense(512, activation='relu'))
#     model.add(layers.Dense(256, activation='relu'))
#     model.add(layers.Dense(1, activation='sigmoid'))  # 输出真实概率
#     return model
#
# # 实例化生成器和判别器
# generator = build_generator()
# discriminator = build_discriminator()
#
# # 构建 GAN
# discriminator.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# discriminator.trainable = False  # 冻结判别器的参数
#
# gan_input = layers.Input(shape=(latent_dim,))
# generated_image = generator(gan_input)
# gan_output = discriminator(generated_image)
#
# gan = keras.Model(gan_input, gan_output)
# gan.compile(loss='binary_crossentropy', optimizer='adam')
#
# # 训练 GAN
# for epoch in range(num_epochs):
#     # ---------------------
#     # 训练判别器
#     # ---------------------
#     idx = np.random.randint(0, X_train.shape[0], batch_size)
#     real_images = X_train[idx]
#
#     noise = np.random.normal(0, 1, (batch_size, latent_dim))
#     fake_images = generator.predict(noise)
#
#     real_labels = np.ones((batch_size, 1))
#     fake_labels = np.zeros((batch_size, 1))
#
#     discriminator_loss_real = discriminator.train_on_batch(real_images, real_labels)
#     discriminator_loss_fake = discriminator.train_on_batch(fake_images, fake_labels)
#     discriminator_loss = 0.5 * np.add(discriminator_loss_real, discriminator_loss_fake)
#
#     # ---------------------
#     # 训练生成器
#     # ---------------------
#     noise = np.random.normal(0, 1, (batch_size, latent_dim))
#     generator_loss = gan.train_on_batch(noise, real_labels)
#
#     # 打印损失和保存生成图像
#     if epoch % sample_interval == 0:
#         print(f"Epoch: {epoch}, D Loss: {discriminator_loss[0]:.4f}, G Loss: {generator_loss:.4f}")
#         noise = np.random.normal(0, 1, (25, latent_dim))
#         generated_images = generator.predict(noise)
#         generated_images = 0.5 * generated_images + 0.5  # 反归一化到 [0, 1]
#
#         # 绘制生成图像
#         plt.figure(figsize=(5, 5))
#         for i in range(generated_images.shape[0]):
#             plt.subplot(5, 5, i + 1)
#             plt.imshow(generated_images[i, :, :, 0], cmap='gray')
#             plt.axis('off')
#         plt.tight_layout()
#         plt.show()
#
# #生成对抗网络

# import numpy as np
# import matplotlib.pyplot as plt
#
# # 目标函数
# def objective_function(x, y):
#     return ((6.452 * (x + 0.125 * y) * (np.cos(x) - np.cos(2 * y)) ** 2) / np.sqrt(
#         (0.8 + (x - 4.2) ** 2 + 2 * (y - 7)) ** 2)) + 3.226 * y
#
# # 适应度函数
# def fitness_function(x, y):
#     return -objective_function(x, y)
#
# # 遗传算法框架
# def genetic_algorithm(population_size, generations, crossover_rate, mutation_rate, search_range):
#     # 初始化种群
#     population = np.random.uniform(low=search_range[0], high=search_range[1], size=(population_size, 2))
#
#     best_fitness_history = []
#     best_individual_history = []
#
#     for generation in range(generations):
#         # 计算适应度
#         fitness_values = np.array([fitness_function(x, y) for x, y in population])
#
#         # Check for NaN values and handle them
#         if np.isnan(fitness_values).any() or np.ptp(fitness_values) == 0:
#             print(f"Warning: Invalid fitness values encountered in generation {generation}.")
#             break
#
#         # 选择操作：使用适应度函数正规化版本作为选择概率
#         normalized_fitness = (fitness_values - np.min(fitness_values)) / (
#                     np.max(fitness_values) - np.min(fitness_values))
#
#         # Check for NaN values after normalization
#         if np.isnan(normalized_fitness).any():
#             print(f"Warning: NaN values encountered in normalized fitness in generation {generation}.")
#             break
#
#         # Continue with the selection operation
#         selection_probabilities = normalized_fitness / np.sum(normalized_fitness)
#
#         # 修正选择操作
#         selected_indices = np.random.choice(np.arange(len(population)), size=population_size, replace=True,
#                                             p=selection_probabilities)
#         selected_population = population[selected_indices]
#
#         # 交叉操作：单点交叉
#         crossover_indices = np.random.choice(population_size, size=population_size // 2, replace=False)
#         crossover_pairs = selected_population[crossover_indices]
#         crossover_points = np.random.rand(population_size // 2, 1)
#
#         # 修正交叉操作
#         crossover_offspring = np.zeros_like(crossover_pairs)
#         for i in range(crossover_pairs.shape[0]):
#             crossover_offspring[i] = crossover_pairs[i, 0] * (1 - crossover_points[i]) + crossover_pairs[i, 1] * \
#                                      crossover_points[i]
#
#         # 变异操作：均匀变异
#         mutation_mask = np.random.rand(population_size, 2) < mutation_rate
#         mutation_offspring = selected_population + mutation_mask * np.random.uniform(low=-0.5, high=0.5,
#                                                                                      size=(population_size, 2))
#
#         # 合并新一代种群
#         population = np.concatenate([crossover_offspring, mutation_offspring], axis=0)
#
#         # 保留最优个体
#         best_index = np.argmax(fitness_values)
#         best_fitness = fitness_values[best_index]
#         best_individual = population[best_index]
#
#         best_fitness_history.append(best_fitness)
#         best_individual_history.append(best_individual)
#
#     return best_fitness_history, best_individual_history
#
# # 表2 不同的种群规模的GA运行结果
# population_sizes = [10,20,30,40,50]
#
# # 初始化表2
# table2 = np.zeros((len(population_sizes), 4))
#
# for i, population_size in enumerate(population_sizes):
#     best_fitness_history, best_individual_history = genetic_algorithm(population_size, generations=100,
#                                                                       crossover_rate=0.8, mutation_rate=0.01,
#                                                                       search_range=[0, 10])
#
#     # 计算平均适应度
#     average_fitness = np.mean([fitness_function(x, y) for x, y in best_individual_history])
#
#     # 打印结果
#     print(f"种群规模: {population_size}")
#     print(f"最佳适应度: {best_fitness_history[-1]}")
#     print(f"平均适应度: {average_fitness}")
#     print(f"最佳个体: {best_individual_history[-1]}")
#     print("\n")
#
#     # 将结果填入表2
#     table2[i, 0] = best_fitness_history[-1]
#     table2[i, 1] = average_fitness
#     table2[i, 2:] = best_individual_history[-1]
#
# # 打印表2
# print("表2 不同的种群规模的GA运行结果")
# print("种群规模\t最佳适应度\t平均适应度\t最佳个体")
# for i in range(len(population_sizes)):
#     print(f"{population_sizes[i]}\t{table2[i, 0]}\t{table2[i, 1]}\t{table2[i, 2:]}")
# print("\n")


