import random
from utils import DRQN, Map_init2
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import os
import logging
import time
from collections import deque

logging.basicConfig(level=logging.INFO, format='%(levelname)s %(asctime)s [%(filename)s:%(lineno)d] %(message)s',
                    datefmt='%Y.%m.%d. %H:%M:%S', filemode="a")
date = time.strftime('%y-%m-%d', time.localtime(time.time()))
daytime = time.strftime('%H-%M', time.localtime(time.time()))

logfile_path = "log data"
if not os.path.exists(logfile_path):
    os.makedirs(logfile_path)


def record_log(name_process):
    logger = logging.getLogger(name_process)
    logger.addHandler(logging.FileHandler(f"{logfile_path}/{name_process}-{date}-{daytime}.log"))
    return logger


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def run_episode(terminal, agent, sequence_length=10):
    agent.reset_epsilon()
    agent.hidden = agent.q_network.init_hidden(1)  # 初始化隐状态
    Bot_elements = {"Bot1": Map_init2.BOT("Bot1", (
        random.randint(1, terminal.grid_map.shape[1] - 1), random.randint(1, terminal.grid_map.shape[0] - 1)),
                                          random.randint(5, 15))}
    terminal.reset(bot_elements=Bot_elements)
    map_random = random.randint(1, 9)
    terminal.map_init(rf"D:\Gas_detector\data\gas_mass_data\export{1}.csv", update_=True)

    # 初始化状态序列
    state_sequence = deque(maxlen=sequence_length)
    # 机器人当前位置
    current_position = np.array(terminal.grid_map)
    # 机器人当前位置质量分数地图，是带有高斯噪声的
    quality_scores = np.array(terminal.grid_map * terminal.Gaussian_mass_map)
    quality_scores_map = quality_scores / (np.max(quality_scores) + 1e-8)
    initial_state = np.stack([current_position, quality_scores_map])

    # 填充初始序列
    for _ in range(sequence_length):
        state_sequence.append(initial_state)

    episode_sequence = []  # 存储单次回合的序列经验
    while True:
        terminal.draw_elements_on_canvas(csv_path=rf"D:\Gas_detector\data\gas_mass_data\export{1}.csv")
        action = agent.take_action(np.array(state_sequence), terminal, "Bot1")
        next_state, reward, done = terminal.step(action, "Bot1")
        # 更新序列
        state_sequence.append(next_state)
        episode_sequence.append((initial_state, action, reward, next_state, done))
        # 如果序列长度足够，存储经验
        print(len(episode_sequence),'episode_sequence')
        if len(episode_sequence) >= sequence_length:
            agent.store_experience(episode_sequence[-sequence_length:])

        agent.train()
        initial_state = next_state
        # input("go")
        if done:
            break
    return terminal.element_collection["Bot1"].ep_return


def evaluate_policy(terminal, agent, num_episodes=5, max_steps=50, sequence_length=10):
    total_rewards = []
    for _ in range(num_episodes):
        Bot_elements = {"Bot1": Map_init2.BOT("Bot1", (
            random.randint(1, terminal.grid_map.shape[1] - 1), random.randint(1, terminal.grid_map.shape[0] - 1)),
                                              random.randint(5, 15))}
        terminal.reset(bot_elements=Bot_elements)
        map_random = random.randint(1, 9)
        terminal.map_init(rf"D:\Gas_detector\data\gas_mass_data\export{1}.csv", update_=True)

        state_sequence = deque(maxlen=sequence_length)
        current_position = np.array(terminal.grid_map)
        quality_scores = np.array(terminal.grid_map * terminal.Gaussian_mass_map)
        quality_scores_map = quality_scores / (np.max(quality_scores) + 1e-8)
        initial_state = np.stack([current_position, quality_scores_map])

        for _ in range(sequence_length):
            state_sequence.append(initial_state)
        # 初始化隐状态
        hidden = agent.q_network.init_hidden(1)  # batch_size=1
        agent.reset_epsilon()
        for t in range(max_steps):

            # plt.clf()
            # plt.imshow(state_sequence[-1][1], cmap='jet')  # cmap 指定颜色映射
            # plt.colorbar(label='Value')  # 添加颜色条
            # plt.title('Heatmap Example')  # 设置标题
            # plt.xlabel('X-axis')  # X 轴标签
            # plt.ylabel('Y-axis')  # Y 轴标签
            #
            # plt.figure()
            # plt.imshow(state_sequence[-1][0], cmap='jet')  # cmap 指定颜色映射
            # plt.colorbar(label='Value')  # 添加颜色条
            # plt.title('Heatmap')  # 设置标题
            # plt.xlabel('X-axis')  # X 轴标签
            # plt.ylabel('Y-axis')  # Y 轴标签
            # # plt.gca().invert_yaxis()  # 反转 y 轴
            # plt.show()

            state_tensor = torch.FloatTensor(np.array(state_sequence)).unsqueeze(0).to(device)  # [1, seq_len, 2, h, w]
            with torch.no_grad():
                q_values, hidden = agent.q_network(state_tensor, hidden)
                action = q_values.argmax().item()  # 选择 Q 值最大的动作

            terminal.draw_elements_on_canvas(csv_path=rf"D:\Gas_detector\data\gas_mass_data\export{1}.csv")
            next_state, reward, done = terminal.step(action, "Bot1")
            state_sequence.append(next_state)
            if done:
                break
        total_rewards.append(terminal.element_collection["Bot1"].ep_return)
    return np.mean(total_rewards)


if __name__ == "__main__":
    logger = record_log("DRQN")
    reward_list = []
    times = []
    Bot_elements = {"Bot1": Map_init2.BOT("Bot1", (0, 0), 100)}
    terminal = Map_init2.Terminal(bot_elements=Bot_elements, canvas_size=(750, 450), split_x_nums=10, split_y_nums=15)
    terminal.render()

    agent = DRQN.DQNAgent(shape=terminal.grid_map.shape, action_size=len(terminal.action_dict.keys()),
                          sequence_length=10, batch_size=128)
    epoch = 150
    num_episodes = 100
    least_reward = -10000
    total_reward_list = []
    total_loss_list = []

    print("start to train DRQN-GRU model")
    for i in range(epoch):
        with tqdm(total=num_episodes, desc="Iteration %d" % i) as pbar:
            for episode in range(num_episodes):
                if episode % 20 == 0 and episode > 2:
                    agent.update_target_network()
                reward_episode = run_episode(terminal, agent)
                reward_list.append(reward_episode)
                pbar.update(1)
            logger.info(f"\n val test avg rewards={evaluate_policy(terminal, agent)}")

        if i > 1:
            times.append(int(i))
            logger.info(f"avg rewards={np.average(reward_list)}")
            logger.info(f"loss={np.var(reward_list)}")
            total_reward_list.append(np.average(reward_list))
            total_loss_list.append(np.std(reward_list))
            agent.model_save()
            if least_reward < np.average(reward_list):
                least_reward = np.average(reward_list)
                agent.best_model_save()
            reward_list = []

    plt.figure()
    plt.plot(times, total_reward_list, label="Average Reward")
    plt.plot(times, total_loss_list, label="Loss (Std)")
    plt.legend()
    plt.savefig('training_results.png')
