"""
    @brief:Particle filter algorithm
    @Editor:CJH
    @Date:2025/4/24
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans

import cv2
import torch

from utils import Map_init4, DRQN2
from collections import deque

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# 真实气体源位置（用于仿真）
class ParticleFilter:
    def __init__(self, terminal, particle_nums=5000, canvas_size=(900, 540)):
        self.peak_ = (100, 100)
        # 构建一些机器人
        self.terminal = terminal
        self.H, self.W = canvas_size
        self.N = particle_nums  # 粒子数量
        self.sigma = 20  # 扩散速度参数
        self.sigma_m = 0.8  # 测量噪声标准差
        self.clusters_target = [int(terminal.grid_map.shape[1] / 2), int(terminal.grid_map.shape[0] / 2)]
        # 初始化粒子
        self.terminal.map_init(rf"D:\Gas_detector\data\gas_mass_data\export{15}.csv", update_=True, random_flip=0)
        self.particles, self.weights = self.initialize_particles()
        self.particle_history = [self.particles.copy()]
        # 初始化DRQN
        self.agent = DRQN2.DQNAgent(state_size=10, action_size=len(terminal.action_dict.keys()),
                                    sequence_length=10, batch_size=128)
        # # 读取模型，进行测试fwrite
        self.agent.model_read(r"D:\Gas_detector\weights\25-07-02\exp-1")

    # 初始化粒子
    def initialize_particles(self):
        if self.terminal.obstacle_map_normalized is not None:
            free_cells = np.where(self.terminal.obstacle_map_normalized < 10)  # 自由区域的坐标
            indices = np.random.choice(len(free_cells[0]), size=self.N)
            particles = np.vstack((free_cells[1][indices], free_cells[0][indices])).T
            weights = np.ones(self.N) / self.N
            return particles, weights

    def compute_entropy(self):
        # 计算粒子分布的熵
        hist, _, _ = np.histogram2d([p[0] for p in self.particles], [p[1] for p in self.particles],
                                    bins=(self.W, self.H))
        hist = hist / np.sum(hist)
        entropy = -np.sum(hist * np.log(hist + 1e-10))
        max_entropy = np.log(self.W * self.H)  # 均匀分布的最大熵
        return entropy / max_entropy

    # 法雷尔烟羽扩散模型
    def farrell_concentration(self, r, s, C_max, sigma):
        d = np.sqrt((r[0] - s[0]) ** 2 + (r[1] - s[1]) ** 2)
        return C_max * np.exp(- (d ** 2) / (2 * (sigma ** 2)))

    # 改进的似然函数
    def likelihood(self, z, r, s, C_max, sigma, sigma_m, lambda_penalty=0.5):
        # 计算每一个粒子可能的预测值，并向预测值高的靠拢
        # pred_concentration = self.concentration(r, s, C_max, sigma)
        pred_concentration = self.farrell_concentration(r, s, C_max, sigma)
        gaussian_term = np.exp(-((z - pred_concentration) ** 2) / (2 * sigma_m ** 2)) / np.sqrt(
            2 * np.pi * sigma_m ** 2)
        # 设置惩罚函数，限制pred_concentration≤Cmax，则比值 ≤1，并惩罚检测过的区域
        penalty = np.exp(-lambda_penalty * max(0, (pred_concentration / C_max - 1) * (1 + sigma_m)) ** 2)
        if terminal.quality_scores_map[tuple(np.array(s / terminal.box_min_step, dtype=np.int32)[::-1])] > 0:
            penalty *= 0.2
        # print(penalty, '惩罚值')

        return gaussian_term * penalty

    # 重采样
    def resample(self):
        # visited_non_target = self.mark_non_source()
        indices = np.random.choice(np.arange(self.N), size=self.N, p=self.weights)
        self.particles = self.particles[indices]
        # 添加扰动
        # 移除非目标位置的粒子
        for i in range(self.N):
            x, y = self.particles[i]
            if np.random.rand() < 0.1:  # 20% 的粒子添加扰动
                dx, dy = np.random.normal(0, 15, 2)  # 扰动范围可调整
                new_x = max(0, min(self.W - 1, int(x + dx)))
                new_y = max(0, min(self.H - 1, int(y + dy)))
                self.particles[i] = (new_x, new_y)

        self.weights = np.ones(self.N) / self.N

    # def mark_non_source(self):
    #     # 标记非气体源栅格
    #     visited_non_source = []
    #     max_mass_thresh = np.max(self.terminal.mass_fraction_map)  # 最大的质量分数，相当于最低的测量阻值
    #     quality_scores_map = self.terminal.grid_map * self.terminal.mass_fraction_map
    #     indexes = np.array(
    #         np.where(np.bitwise_and(quality_scores_map > 0, quality_scores_map < max_mass_thresh * 0.9)))[::-1].T
    #     # 将grid转换xyxy的矩形
    #     for index in indexes:
    #         visited_non_source.append(
    #             [*(index * self.terminal.box_min_step), *(index + np.array([1, 1])) * self.terminal.box_min_step])
    #     return visited_non_source

    @staticmethod
    def get_cluster_centers(particles, num_clusters):
        """使用 KMeans 聚类获取粒子分布的中心，并返回每个聚类的比例"""
        if len(particles) < num_clusters:
            return [], []

        kmeans = KMeans(n_clusters=num_clusters)
        kmeans.fit(particles)
        cluster_centers = kmeans.cluster_centers_

        # 获取每个粒子的聚类标签
        labels = kmeans.labels_

        # 统计每个聚类的粒子数量
        cluster_counts = np.bincount(labels, minlength=num_clusters)

        # 计算每个聚类的比例
        cluster_ratios = cluster_counts / len(particles)
        print(cluster_centers)
        print(cluster_ratios)

        return cluster_centers, cluster_ratios

    # def get_cluster_centers(self, particles, num_clusters):
    #     """使用 KMeans 聚类获取粒子分布的中心"""
    #     if len(particles) < num_clusters:
    #         return []
    #     kmeans = KMeans(n_clusters=num_clusters)
    #     kmeans.fit(particles)
    #     return kmeans.cluster_centers_

    def greedy_policy(self, bot, target_source_visual):
        step_choice = [[0, -1], [0, 1], [-1, 0], [1, 0], [1, -1], [-1, -1], [1, 1], [-1, 1]]
        score = []
        # action = list(self.action_dict.keys())[action]
        for index, choice in enumerate(step_choice):
            next_pos = (np.array(bot.current_grid_pos) + choice) * self.terminal.box_min_step

            Prior_distance = np.linalg.norm(np.array(target_source_visual) - next_pos)
            score.append(Prior_distance)
            # distance.append(np.linalg.norm(np.array(Prior_known_max_pos) - next_pos))
        probabilities = np.array(score) / np.sum(score)
        # return np.random.choice(len(probabilities), p=probabilities)
        return np.argmin(probabilities)

    def bot_controller(self):
        # 初始化，粒子环境
        initial_state = terminal.reset(bot_elements=Bot_elements)[0]
        state_sequence = deque(maxlen=10)
        for _ in range(10):
            state_sequence.append(initial_state)
            # 初始化隐状态
        hidden = self.agent.q_network.init_hidden(1)  # batch_size=1
        while True:
            cv2.imshow("mass", cv2.resize(self.terminal.mass_fraction_map, (600, 900)))
            key = cv2.waitKey(60)
            # 机器人仿真控制
            for bot in self.terminal.element_collection.values():
                if len(self.particle_history) % 3 == 0:
                    self.peak_ = self.get_dense_particle_position(self.particle_history[-1])
                state_tensor = torch.FloatTensor(np.array(state_sequence)).unsqueeze(0).to(
                    device)  # [1, seq_len, 2, h, w]
                with torch.no_grad():
                    q_values, hidden = self.agent.q_network(state_tensor, hidden)
                    action = q_values.argmax().item()  # 选择 Q 值最大的动作

                if key == 119:
                    print(q_values)

                    next_state, _, _ = terminal.step('upper', f"{bot.name}")
                elif key == 115:
                    print(q_values)

                    next_state, _, _ = terminal.step('lower', f"{bot.name}")
                elif key == 100:
                    print(q_values)

                    next_state, _, _ = terminal.step('right', f"{bot.name}")
                elif key == 97:
                    print(q_values)

                    next_state, _, _ = terminal.step('left', f"{bot.name}")
                elif key == 113:
                    print(terminal.reset(Bot_elements))
                    for _ in range(5):
                        state_sequence.append(initial_state)
                        # 初始化隐状态
                    hidden = self.agent.q_network.init_hidden(1)  # batch_size=1
                    self.particles, self.weights = self.initialize_particles()
                elif key == 99:
                    print("DRQN引导")

                    next_state, _, _ = terminal.step(action, f"{bot.name}")
                    print('q_values', q_values)
                elif key == 122:
                    print("粒子滤波引导")
                    action = self.greedy_policy(bot, self.peak_)
                    next_state, _, _ = terminal.step(action, f"{bot.name}")
                elif key == 98:
                    self.visualize()
                    # self.terminal.fit_map()
                    action = self.greedy_policy(bot, self.peak_)
                    next_state, _, _ = terminal.step(action, f"{bot.name}", True)
                else:
                    continue
                state_sequence.append(next_state)
                # 可视化机器人的位置
                self.particle_filter(bot)
                # 使用聚类k-means，可视化粒子簇中心
                self.particle_history.append(self.particles)
                self.terminal.draw_elements_on_canvas(csv_path=rf'D:\Gas_detector\data\gas_mass_data\export{15}.csv',
                                                      random_flip=0)

            # if self.terminal.element_collection["Bot1"].stride_left < 50 and self.terminal.element_collection[
            #     "Bot1"].stride_left % 20 == 0:

    # 作为粒子滤波的输出，粒子簇中心
    def PF_Output(self, element_name):
        grid_map = np.zeros(self.terminal.grid_map.shape)
        self.particle_filter(self.terminal.element_collection[element_name])
        # 使用聚类k-means，可视化粒子簇中心
        self.particle_history.append(self.particles)
        # 达到一定的步长才更新，或者机器人很靠近目标点也更新
        if len(self.particle_history) % 5 == 0 and len(self.particle_history) > 0:
            self.peak_ = self.get_dense_particle_position(self.particle_history[-1])
            self.clusters_target = (self.peak_ / self.terminal.box_min_step).astype(np.uint8)
        if np.linalg.norm(
                np.array(self.peak_) - np.array(
                    self.terminal.element_collection[element_name].current_pixel_pos)) < 150:
            self.peak_ = self.get_dense_particle_position(self.particle_history[-1])
            self.clusters_target = (self.peak_ / self.terminal.box_min_step).astype(np.uint8)

        """将粒子映射到栅格地图，每个栅格的值是范围内粒子权重的和"""
        # 创建栅格地图，维度为 height × width (y × x)
        # 遍历所有粒子，将权重累加到对应栅格
        # 计算缩放因子
        x_scale = self.terminal.grid_map.shape[1] / self.W
        y_scale = self.terminal.grid_map.shape[0] / self.H
        # 遍历所有粒子，将权重累加到对应栅格
        for (x, y), weight in zip(self.particles, self.weights):
            # 将粒子位置映射到栅格索引
            grid_x = min(max(int(x * x_scale), 0), self.terminal.grid_map.shape[1] - 1)
            grid_y = min(max(int(y * y_scale), 0), self.terminal.grid_map.shape[0] - 1)
            # 累加权重到对应栅格
            grid_map[grid_y, grid_x] += weight
        # self.visualize()
        return grid_map, self.clusters_target

    def get_dense_particle_position(self, particles, bin_size=100):
        """
        获取粒子分布最密集的位置，并计算峰值占比
        Returns:
            (x, y): 最密集位置
            peak_ratio: 最大峰值占总粒子的比例
        """
        x_coords = [p[0] for p in particles]
        y_coords = [p[1] for p in particles]
        # 直方图统计
        hist, x_edges, y_edges = np.histogram2d(
            x_coords, y_coords,
            bins=[int(self.W / bin_size), int(self.H / bin_size)],
            range=[[0, self.W], [0, self.H]]
        )
        max_idx = np.unravel_index(np.argmax(hist, axis=None), hist.shape)
        max_x_bin = x_edges[max_idx[0]]
        max_y_bin = y_edges[max_idx[1]]
        dense_x = max_x_bin + bin_size / 2
        dense_y = max_y_bin + bin_size / 2

        # 计算最大峰值占总粒子的比例
        peak_count = hist[max_idx]
        peak_ratio = peak_count / self.N
        # print(peak_ratio, '最大峰值占总粒子的比例')

        return int(dense_x), int(dense_y)

    # 粒子滤波主函数
    def particle_filter(self, bot):
        # 更新粒子滤波

        max_mass_thresh = np.max(self.terminal.mass_fraction_map)  # 最大的质量分数，相当于最低的测量阻值
        # 模拟机器人移动（随机移动）
        # 机器人当前的位置的气体质量分数
        # z_t 为机器人当前位置的浓度+随机噪声
        z_t = self.terminal.mass_fraction_map[
            bot.current_grid_pos[1], bot.current_grid_pos[0]]

        # sigma=self.sigma
        a = 3
        normalized_value = z_t / max_mass_thresh
        self.sigma_m = np.exp(-(a * normalized_value) ** 2)
        print(self.sigma_m)

        # z_t = z_t * np.exp(np.random.normal(0, self.sigma_m))
        # print(self.sigma_m, "sigma扩散系数")
        # print(z_t, '添加噪声')
        # 通过似然函数更新权重
        for i in range(self.N):
            self.weights[i] *= self.likelihood(z_t, bot.current_pixel_pos, self.particles[i], max_mass_thresh,
                                               self.sigma,
                                               self.sigma_m)
        # 归一化权重
        self.weights /= np.sum(self.weights)

        self.resample()

    def visualize(self):
        plt.figure(figsize=(10, 8))
        # 绘制地图和真实源
        plt.imshow(np.zeros((self.H, self.W)), cmap='gray', origin='lower')
        known_max_pos = np.unravel_index(np.argmax(self.terminal.mass_fraction_map),
                                         self.terminal.mass_fraction_map.shape)[::-1] * self.terminal.box_min_step

        plt.scatter(known_max_pos[0], known_max_pos[1], c='red', marker='x', s=200, label='True Source')
        # 可视化机器人的位置
        for bot in self.terminal.element_collection.values():
            plt.scatter(bot.x, bot.y, c='green', marker='o', s=200, label=f'BOT-{bot.name} Source')
        # 绘制初始和最终粒子
        initial_particles = self.particle_history[0]
        final_particles = self.particle_history[-1]

        clusters, ratios = self.get_cluster_centers(final_particles, 10)
        clusters = np.array(clusters, np.int32)

        # 绘制聚类中心并标注比例
        for peak, ratio in zip(clusters, ratios):
            plt.scatter(peak[0], peak[1], c='yellow', marker='x', s=250)
            plt.text(peak[0], peak[1] + 10, f'{ratio:.2f}', color='yellow', fontsize=18)

        # plt.scatter(self.peak_[0], self.peak_[1], c='white', marker='o', s=700, label='visual Source')

        # clusters = np.array(self.get_cluster_centers(clusters, 1), np.int32)
        # for peak in clusters:
        #     plt.scatter(peak[0], peak[1], c='white', marker='x', s=700, label='visual Source')

        plt.scatter(initial_particles[:, 0], initial_particles[:, 1], c='blue', alpha=0.3, s=10,
                    label='Initial Particles')
        plt.scatter(final_particles[:, 0], final_particles[:, 1], c='green', alpha=0.5, s=10,
                    label='Final Particles')

        plt.title('Particle Filter with Distance Decay Model')
        plt.xlabel('X')
        plt.ylabel('Y')
        plt.gca().invert_yaxis()  # 反转 y 轴
        plt.grid(True)
        plt.legend()
        plt.show()
        # plt.close()

        # plt.figure(figsize=(10, 8))

        # quality_scores_map = np.array(self.terminal.grid_map * self.terminal.mass_fraction_map)
        # plt.imshow(quality_scores_map / np.max(quality_scores_map), cmap='jet', vmin=0,
        #            vmax=np.max(self.terminal.mass_fraction_map))  # cmap 指定颜色映射
        # plt.colorbar(label='Value')  # 添加颜色条
        # plt.title('Heatmap Example')  # 设置标题
        # plt.xlabel('X-axis')  # X 轴标签
        # plt.ylabel('Y-axis')  # Y 轴标签s


# 运行
if __name__ == "__main__":
    Bot_elements = {"Bot1": Map_init4.BOT("Bot1", (15, 35), 300)}
    terminal = Map_init4.Terminal(canvas_size=(900, 600), split_x_nums=30, split_y_nums=45,
                                  obstacle_list=[(14, 17, 30, 16), (15, 9, 16, 0), (0, 25, 5, 26), (12, 17, 13, 35),
                                                 (5, 33, 6, 45), (0, 7, 11, 6)])
    terminal.render(Bot_elements)
    PF = ParticleFilter(terminal=terminal, particle_nums=3000, canvas_size=(900, 600))
    # particle_history = particle_filter()
    PF.bot_controller()
    # PF.PF_Output()
    # PF.visualize(particle_history)
