"""
    @brief:Adaptive Particle filter algorithm
    @Editor:CJH
    @Date:2025/6/4
"""

import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans


# 真实气体源位置（用于仿真）
class ParticleFilter:
    def __init__(self, terminal, particle_nums=5000, canvas_size=(900, 540)):
        self.peak_ratio = None
        self.weights, self.particles = None, None
        self.particle_history = None
        self.peak_ = (0, 0)
        # 构建一些机器人
        self.terminal = terminal
        self.H, self.W = canvas_size
        self.N = particle_nums  # 粒子数量
        self.K_ratio = 2  # 测量噪声的影响系数
        self.sigma = 20  # 扩散速度参数
        self.clusters_target = [int(terminal.grid_map.shape[1] / 2), int(terminal.grid_map.shape[0] / 2)]
        # 初始化粒子

    # 初始化粒子
    def initialize_particles(self):
        if self.terminal.obstacle_map_normalized is not None:
            free_cells = np.where(self.terminal.obstacle_map_normalized < 10)  # 自由区域的坐标
            indices = np.random.choice(len(free_cells[0]), size=self.N)
            self.particles = np.vstack((free_cells[1][indices], free_cells[0][indices])).T
            self.weights = np.ones(self.N) / self.N
            self.particle_history = [self.particles.copy()]

    def compute_entropy(self):
        # 计算粒子分布的熵
        hist, _, _ = np.histogram2d([p[0] for p in self.particles], [p[1] for p in self.particles],
                                    bins=(self.W, self.H))
        hist = hist / np.sum(hist)
        entropy = -np.sum(hist * np.log(hist + 1e-10))
        max_entropy = np.log(self.W * self.H)  # 均匀分布的最大熵
        return entropy / max_entropy

    # 法雷尔烟羽扩散模型
    def farrell_concentration(self, r, s, C_max, sigma):
        d = np.sqrt((r[0] - s[0]) ** 2 + (r[1] - s[1]) ** 2)
        return C_max * np.exp(- (d ** 2) / (2 * (sigma ** 2)))

    # 改进的似然函数
    def likelihood(self, z, r, s, C_max, sigma, sigma_m, lambda_penalty=0.5):
        # 计算每一个粒子可能的预测值，并向预测值高的靠拢
        # pred_concentration = self.concentration(r, s, C_max, sigma)
        pred_concentration = self.farrell_concentration(r, s, C_max, sigma)
        # 高斯传感器模型
        gaussian_term = np.exp(-((z - pred_concentration) ** 2) / (2 * sigma_m ** 2)) / np.sqrt(
            2 * np.pi * sigma_m ** 2)
        # 设置惩罚函数，限制pred_concentration≤Cmax，则比值 ≤1，表示预测浓度在合理范围内
        penalty = np.exp(-lambda_penalty * max(0, (pred_concentration / C_max - 1) * (1 + sigma_m)) ** 2)
        if self.terminal.quality_scores_map[tuple(np.array(s / self.terminal.box_min_step, dtype=np.int32)[::-1])] > 0:
            penalty *= 0.1

        return gaussian_term * penalty

    # 重采样
    def resample(self):
        # visited_non_target = self.mark_non_source()
        indices = np.random.choice(np.arange(self.N), size=self.N, p=self.weights)
        self.particles = self.particles[indices]
        # 添加扰动
        # 移除非目标位置的粒子
        for i in range(self.N):
            x, y = self.particles[i]
            if np.random.rand() < 0.1:  # 20% 的粒子添加扰动
                dx, dy = np.random.normal(0, 10, 2)  # 扰动范围可调整
                new_x = max(0, min(self.W - 1, int(x + dx)))
                new_y = max(0, min(self.H - 1, int(y + dy)))
                self.particles[i] = (new_x, new_y)

        self.weights = np.ones(self.N) / self.N

    # def mark_non_source(self):
    #     # 标记非气体源栅格
    #     visited_non_source = []
    #     max_mass_thresh = np.max(self.terminal.mass_fraction_map)  # 最大的质量分数，相当于最低的测量阻值
    #     quality_scores_map = self.terminal.grid_map * self.terminal.mass_fraction_map
    #     indexes = np.array(
    #         np.where(np.bitwise_and(quality_scores_map > 0, quality_scores_map < max_mass_thresh * 0.9)))[::-1].T
    #     # 将grid转换xyxy的矩形
    #     for index in indexes:
    #         visited_non_source.append(
    #             [*(index * self.terminal.box_min_step), *(index + np.array([1, 1])) * self.terminal.box_min_step])
    #     return visited_non_source

    def get_cluster_centers(self, particles, num_clusters):
        """使用 KMeans 聚类获取粒子分布的中心，并返回每个聚类的比例"""
        if len(particles) < num_clusters:
            return [], []

        kmeans = KMeans(n_clusters=num_clusters)
        kmeans.fit(particles)
        cluster_centers = kmeans.cluster_centers_
        # 获取每个粒子的聚类标签
        labels = kmeans.labels_
        # 统计每个聚类的粒子数量
        cluster_counts = np.bincount(labels, minlength=num_clusters)
        # 计算每个聚类的比例
        cluster_ratios = cluster_counts / len(particles)
        # self.peak_ = np.array(cluster_centers[cluster_ratios.argmax()], dtype=np.int32)

        return cluster_centers, cluster_ratios

    def greedy_policy(self, current_grid_pos):
        step_choice = [[0, -1], [0, 1], [-1, 0], [1, 0], [1, -1], [-1, -1], [1, 1], [-1, 1]]
        score = []
        # action = list(self.action_dict.keys())[action]
        for index, choice in enumerate(step_choice):
            next_pos = (np.array(current_grid_pos) + choice) * self.terminal.box_min_step

            Prior_distance = np.linalg.norm(np.array(self.peak_) - next_pos)
            score.append(Prior_distance)
            # distance.append(np.linalg.norm(np.array(Prior_known_max_pos) - next_pos))
        probabilities = np.array(score) / np.sum(score)
        # return np.random.choice(len(probabilities), p=probabilities)
        return np.argmin(probabilities)

    def get_dense_particle_position(self, particles, bin_size=50):
        """
        获取粒子分布最密集的位置，并计算峰值占比
        Returns:
            (x, y): 最密集位置
            peak_ratio: 最大峰值占总粒子的比例
        """
        print("update dense peak!!!")
        x_coords = [p[0] for p in particles]
        y_coords = [p[1] for p in particles]
        hist, x_edges, y_edges = np.histogram2d(
            x_coords, y_coords,
            bins=[int(self.W / bin_size), int(self.H / bin_size)],
            range=[[0, self.W], [0, self.H]]
        )
        max_idx = np.unravel_index(np.argmax(hist, axis=None), hist.shape)
        max_x_bin = x_edges[max_idx[0]]
        max_y_bin = y_edges[max_idx[1]]
        dense_x = max_x_bin + bin_size / 2
        dense_y = max_y_bin + bin_size / 2

        # 计算最大峰值占总粒子的比例
        peak_count = hist[max_idx]
        peak_ratio = peak_count / self.N
        self.peak_ = (int(dense_x), int(dense_y))
        self.peak_ratio = peak_ratio

    # 粒子滤波主函数
    def particle_filter(self, z_t, current_pixel_pos):
        # 更新粒子滤波
        max_mass_thresh = np.max(self.terminal.mass_fraction_map)  # 最大的质量分数，相当于最低的测量阻值
        # 机器人当前的位置的气体质量分数
        normalized_value = z_t / max_mass_thresh
        sigma_m = np.exp(-(self.K_ratio * normalized_value) ** 2)
        # 通过似然函数更新权重
        for i in range(self.N):
                self.weights[i] *= self.likelihood(z_t, current_pixel_pos, self.particles[i], max_mass_thresh,
                                               self.sigma, sigma_m)
        # 归一化权重
        self.weights /= np.sum(self.weights)
        self.resample()

    def visualize(self):
        plt.figure(figsize=(10, 8))
        # 绘制地图和真实源
        plt.imshow(np.zeros((self.H, self.W)), cmap='gray', origin='lower')
        known_max_pos = np.unravel_index(np.argmax(self.terminal.mass_fraction_map),
                                         self.terminal.mass_fraction_map.shape)[::-1] * self.terminal.box_min_step

        plt.scatter(known_max_pos[0], known_max_pos[1], c='red', marker='x', s=200, label='True Source')
        # 可视化机器人的位置
        for bot in self.terminal.element_collection.values():
            plt.scatter(bot.x, bot.y, c='green', marker='o', s=200, label=f'BOT-{bot.name} Source')
        # 绘制初始和最终粒子
        initial_particles = self.particle_history[0]
        final_particles = self.particle_history[-1]

        clusters, ratios = self.get_cluster_centers(final_particles, 5)
        clusters = np.array(clusters, np.int32)

        # 绘制聚类中心并标注比例
        for peak, ratio in zip(clusters, ratios):
            plt.scatter(peak[0], peak[1], c='yellow', marker='x', s=250)
            plt.text(peak[0], peak[1] + 10, f'{ratio:.2f}', color='yellow', fontsize=18)

        for direction, path in self.terminal.path_dict.items():
            if path:  # Check if the path is not empty
                x, y = zip(*path)  # Separate x and y coordinates
                x = np.array(x) * self.terminal.box_min_step[0]
                y = np.array(y) * self.terminal.box_min_step[1]
                print(x, y)
                # print(self.terminal.box_min_step)
                # print(x * self.terminal.box_min_step[0], y * self.terminal.box_min_step[1])

                plt.plot(x, y, marker='o',
                         label=direction)  # Plot path with points and lines

        plt.scatter(initial_particles[:, 0], initial_particles[:, 1], c='blue', alpha=0.3, s=10,
                    label='Initial Particles')
        plt.scatter(final_particles[:, 0], final_particles[:, 1], c='green', alpha=0.5, s=10,
                    label='Final Particles')

        plt.title('Particle Filter with Distance Decay Model')
        plt.xlabel('X')
        plt.ylabel('Y')
        plt.gca().invert_yaxis()  # 反转 y 轴
        plt.grid(True)
        plt.legend()
        plt.show()
