"""
    @brief:DRQN2 & Particle filter algorithm
    @Editor:CJH
    @Date:2025/6/5
"""

from utils import DRQN2, Map_init4, PF_Agent
import numpy as np

import torch
import os
import logging
import time
from collections import deque

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
logging.basicConfig(level=logging.INFO, format='%(levelname)s %(asctime)s [%(filename)s:%(lineno)d] %(message)s',
                    datefmt='%Y.%m.%d. %H:%M:%S', filemode="a")
date = time.strftime('%y-%m-%d', time.localtime(time.time()))
daytime = time.strftime('%H-%M', time.localtime(time.time()))

logfile_path = "log data"
if not os.path.exists(logfile_path):
    os.makedirs(logfile_path)


class Agent:
    def __init__(self, model_path):
        # 1.初始化地图
        self.model_path = model_path
        # 地图反转参数
        self.random_flip = 0
        # 粒子滤波最近距离参数
        self.closest = 25
        # 死端阈值
        self.BI_thresh = 0.1  # 扩散浓度越均匀越高，浓度越不均匀存在唯一peak点，则阈值降低
        obstacle_list = [(14, 17, 30, 16), (15, 9, 16, 0), (0, 25, 8, 26), (16, 17, 17, 35),
                         (7, 33, 8, 45), (0, 8, 11, 9)]
        canvas_size = (900, 600)
        self.terminal = Map_init4.Terminal(canvas_size=canvas_size, split_x_nums=30, split_y_nums=45,
                                           obstacle_list=obstacle_list)
        # 2.初始化粒子滤波器
        self.PF_agent = PF_Agent.ParticleFilter(self.terminal, particle_nums=3000, canvas_size=canvas_size)
        # 3.初始化DRQN
        self.DRQN_agent = DRQN2.DQNAgent(state_size=10, action_size=len(self.terminal.action_dict.keys()),
                                         sequence_length=10)

    def initial_state(self, csv_path):
        # 1.初始化机器人的状态
        start_point = (
            np.random.randint(0, self.terminal.grid_map.shape[1] - 1),
            np.random.randint(0, self.terminal.grid_map.shape[0] - 1))
        self.Bot_elements = {"Bot1": Map_init4.BOT("Bot1", start_point, 1500)}
        # 2.初始化地图
        self.terminal.render(self.Bot_elements)  # 启动多线程可视化
        self.terminal.map_init(csv_path, update_=True, random_flip=self.random_flip)
        # 3.初始化粒子滤波
        self.PF_agent.initialize_particles()
        # 4.初始化DRQN状态
        initial_state = self.terminal.reset(bot_elements=self.Bot_elements)[0]
        state_sequence = deque(maxlen=self.DRQN_agent.sequence_length)
        # 填充初始状态
        self.DRQN_agent.model_read(self.model_path)
        for _ in range(self.DRQN_agent.sequence_length):
            state_sequence.append(initial_state)
        return initial_state, state_sequence

    # 更新传感器测量结果
    def update_PF(self):
        current_pixel_pos = self.terminal.element_collection['Bot1'].current_pixel_pos
        current_grid_pos = self.terminal.element_collection['Bot1'].current_grid_pos
        # 输出气体真实测量值
        z_t = self.terminal.mass_fraction_map[current_grid_pos[1], current_grid_pos[0]]
        # 更新粒子滤波
        self.PF_agent.particle_filter(z_t, current_pixel_pos)
        self.PF_agent.particle_history.append(self.PF_agent.particles)

    def evaluate_policy_offline(self, csv_path):
        # 初始化所有机器状态并选择地图
        initial_state, state_sequence = self.initial_state(csv_path)
        # 初始化隐状态
        hidden = self.DRQN_agent.q_network.init_hidden(1)
        for t in range(self.Bot_elements['Bot1'].total_stride):
            state_tensor = torch.FloatTensor(np.array(state_sequence)).unsqueeze(0).to(
                device)  # [1, seq_len, 2, h, w]
            with torch.no_grad():
                q_values, hidden = self.DRQN_agent.q_network(state_tensor, hidden)
            self.terminal.RRT_local_planner(self.terminal.element_collection["Bot1"].current_grid_pos)
            # 计算信息熵最大的分支
            information_entropy = []
            for key in self.terminal.path_dict.keys():
                entropy = 0
                for step in self.terminal.path_dict[key]:
                    if not self.terminal.quality_scores_map[tuple(step)[::-1]]:
                        entropy += np.max(self.terminal.mass_fraction_map)
                    else:
                        entropy += self.terminal.quality_scores_map[tuple(step)[::-1]]
                information_entropy.append(entropy)
            normalized_entropy = (np.array(information_entropy) - min(information_entropy)) / (
                    max(information_entropy) - min(information_entropy))
            print("information_entropy", normalized_entropy)
            print("q_values", q_values)
            # choices = np.squeeze(q_values)
            choices = torch.tensor(normalized_entropy).to(device) * np.squeeze(q_values)

            cluster_centers, cluster_ratios = self.PF_agent.get_cluster_centers(self.PF_agent.particle_history[-1], 5)
            self.PF_agent.peak_ = np.array(cluster_centers[np.array(cluster_ratios).argmax()], dtype=np.int32)
            print('choices', choices)
            # 如果分支信息，小于1，则切换到全局导航
            # input("go")
            # if torch.max(choices).item() > self.BI_thresh or np.linalg.norm(
            #         np.array(self.terminal.element_collection['Bot1'].current_pixel_pos) - np.array(
            #             self.PF_agent.peak_)) < self.closest:
            action = choices.argmax()  # 选择 Q 值最大的动作
            for action in self.terminal.path_2_action(action):
                next_state, _, done = self.terminal.step(action, "Bot1", view=True)
                self.update_PF()  # 更新粒子滤波
                state_sequence.append(next_state)
                self.terminal.draw_elements_on_canvas(csv_path=csv_path,
                                                      random_flip=self.random_flip)
                if done:
                    return
            # else:
            #     print("粒子滤波引导")
            #     self.PF_agent.visualize()  # 可视化终点
            #     while np.linalg.norm(np.array(self.terminal.element_collection['Bot1'].current_pixel_pos) - np.array(
            #             self.PF_agent.peak_)) > self.closest:
            #         action = self.PF_agent.greedy_policy(self.terminal.element_collection['Bot1'].current_grid_pos)
            #         next_state, _, done = self.terminal.step(action, "Bot1", view=True)
            #         self.update_PF()  # 更新粒子滤波
            #         self.terminal.draw_elements_on_canvas(csv_path=csv_path,
            #                                               random_flip=self.random_flip)
            #         if done:
            #             return


            # 在移动后更新DRQN状态


if __name__ == "__main__":
    agent = Agent(model_path=r"D:\Gas_detector\weights\25-07-02\exp-1")
    for i in range(5):
        agent.evaluate_policy_offline(csv_path=rf'D:\Gas_detector\data\gas_mass_data_time\export{13}.csv')
