"""
    @brief:Reinforcement learning DQN train method
    @Editor:CJH
    @Date:2025/3/12
"""
import random

from utils import DQN, Map_init2
import Particle_Filter_Agent

import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import os
import logging
import time

logging.basicConfig(level=logging.INFO, format='%(levelname)s %(asctime)s [%(filename)s:%(lineno)d] %(message)s',
                    datefmt='%Y.%m.%d. %H:%M:%S',
                    filemode="a")
date = time.strftime('%y-%m-%d', time.localtime(time.time()))
daytime = time.strftime('%H-%M', time.localtime(time.time()))

# 记录log文件
logfile_path = f"log data"
if not os.path.exists(logfile_path):
    os.makedirs(logfile_path)


def record_log(name_process):
    logger = logging.getLogger(name_process)
    logger.addHandler(logging.FileHandler(
        f"{logfile_path}/{name_process}-{date}-{daytime}.log"))
    return logger


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def run_episode(terminal, agent, PF_Filter=None):
    # reset所有的状态
    agent.reset_epsilon()
    Bot_elements = {"Bot1": Map_init2.BOT("Bot1", (
        random.randint(0, terminal.grid_map.shape[1] - 1), random.randint(0, terminal.grid_map.shape[0] - 1)),
                                          random.randint(10, 30))}
    terminal.reset(bot_elements=Bot_elements)
    map_random = random.randint(4, 9)
    terminal.map_init(rf"D:\Gas_detector\data\gas_mass_data\export{9}.csv", update_=True)
    # 机器人当前位置
    current_position = np.array(terminal.grid_map)
    # 机器人当前位置质量分数地图，是带有高斯噪声的
    quality_scores = np.array(terminal.grid_map * terminal.Gaussian_mass_map)
    quality_scores_map = quality_scores / (np.max(quality_scores) + 1e-8)
    # 初始化粒子滤波,粒子滤波决策只是用于贪婪策略
    # PF_Filter.initialize_particles()
    # PF_map, target_cluster = PF_Filter.PF_Output("Bot1")
    # 堆叠成状态张量，形状为 (2, 3, 3)
    state = np.stack([current_position, quality_scores_map])
    # print(state)
    while True:
        terminal.draw_elements_on_canvas(
            csv_path=rf"D:\Gas_detector\data\gas_mass_data\export{9}.csv")  # display 可视化
        action = agent.take_action(state, terminal, "Bot1")
        # print(state, action)
        next_state, reward, done = terminal.step(action, "Bot1")
        # print(next_state)
        # PF_map, target_cluster = PF_Filter.PF_Output("Bot1")
        # print(action, reward)
        agent.store_experience(state, action, reward, next_state, done)
        agent.train()
        state = next_state
        if done:
            break
    return terminal.element_collection["Bot1"].ep_return


# 策略评估
def evaluate_policy(terminal, agent, num_episodes=5, max_steps=50):
    total_rewards = []
    for _ in range(num_episodes):
        Bot_elements = {"Bot1": Map_init2.BOT("Bot1", (
            random.randint(0, terminal.grid_map.shape[1] - 1), random.randint(0, terminal.grid_map.shape[0] - 1)),
                                              random.randint(10, 30))}
        terminal.reset(bot_elements=Bot_elements)
        agent.reset_epsilon()
        map_random = random.randint(4, 9)
        terminal.map_init(rf"D:\Gas_detector\data\gas_mass_data\export{9}.csv", update_=True)
        # 机器人当前位置
        current_position = np.array(terminal.grid_map)
        # 机器人当前位置质量分数地图，是带有高斯噪声的
        quality_scores = np.array(terminal.grid_map * terminal.Gaussian_mass_map)
        quality_scores_map = quality_scores / (np.max(quality_scores) + 1e-8)
        # 堆叠成状态张量，形状为 (2, 3, 3)
        state = np.stack([current_position, quality_scores_map])
        for t in range(max_steps):
            # 使用贪婪策略选择动作
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)  # 增加批次维度
            with torch.no_grad():
                q_values = agent.q_network(state_tensor)  # 假设 agent 有 q_network 属性
                action = q_values.argmax().item()  # 选择 Q 值最大的动作
            # 执行动作并更新状态
            terminal.draw_elements_on_canvas(
                csv_path=rf"D:\Gas_detector\data\gas_mass_data\export{9}.csv")  # display 可视化
            next_state, reward, done = terminal.step(action, "Bot1")
            state = next_state
            if done:
                break
        total_rewards.append(terminal.element_collection["Bot1"].ep_return)
    return np.mean(total_rewards)


if __name__ == "__main__":
    logger = record_log("DQN")
    # 获取环境的状态
    # 实例化地图对象
    reward_list = []
    times = []
    # 实例化地图数据载入
    Bot_elements = {"Bot1": Map_init2.BOT("Bot1", (0, 0), 100)}

    terminal = Map_init2.Terminal(bot_elements=Bot_elements, canvas_size=(750, 450), split_x_nums=15, split_y_nums=23)
    terminal.render()
    # PF_Filter = Particle_Filter_Agent.ParticleFilter(terminal=terminal, particle_nums=3000, split_y_nums=15,
    #                                                  split_x_nums=23)
    # 栅格地图是 5×5（height = 5, width = 5），状态包含 2 个通道（质量分数和当前位置）
    observation_n, action_n = terminal.grid_map.shape[0] * terminal.grid_map.shape[1], len(terminal.action_dict.keys())
    # GAMMA值0.9，学习率learning_rate0.002，e_greedy epsilon
    agent = DQN.DQNAgent(shape=terminal.grid_map.shape, action_size=len(terminal.action_dict.keys()), batch_size=128)
    epoch = 50  # 训练迭代次数
    num_episodes = 50  # 单次迭代次数
    least_reward = -10000
    total_reward_list = []
    total_loss_list = []
    print("start to train model")
    # 显示100个进度条
    for i in range(epoch):
        with tqdm(total=int(num_episodes), desc="Iteration %d" % i) as pbar:
            for episode in range(num_episodes):
                if episode % 10 == 0 and episode > 2:
                    agent.update_target_network()  # 更新网络
                reward_episode = run_episode(terminal, agent)
                reward_list.append(reward_episode)
                pbar.update(1)  # 更新进度条
            logger.info(f"\n val test avg rewards={evaluate_policy(terminal, agent)}")

        if i > 1:
            times.append(int(i))
            logger.info(f"avg rewards={np.average(reward_list)}")
            logger.info(f"loss={np.var(reward_list)}")

            total_reward_list.append(np.average(reward_list))
            total_loss_list.append(np.std(reward_list))
            agent.model_save()
            if least_reward < np.average(reward_list):
                least_reward = np.average(reward_list)
                agent.best_model_save()
            reward_list = []

    fig = plt.figure()  # 新开一张画布
    plt.plot(times, total_reward_list)
    plt.plot(times, total_loss_list)
    plt.show()
