import random
from utils import DRQN2, Map_init3
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import os
import logging
import time
from collections import deque

logging.basicConfig(level=logging.INFO, format='%(levelname)s %(asctime)s [%(filename)s:%(lineno)d] %(message)s',
                    datefmt='%Y.%m.%d. %H:%M:%S', filemode="a")
date = time.strftime('%y-%m-%d', time.localtime(time.time()))
daytime = time.strftime('%H-%M', time.localtime(time.time()))

logfile_path = "log data"
if not os.path.exists(logfile_path):
    os.makedirs(logfile_path)


def record_log(name_process):
    logger = logging.getLogger(name_process)
    logger.addHandler(logging.FileHandler(f"{logfile_path}/{name_process}-{date}-{daytime}.log"))
    return logger


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def run_episode(terminal, agent, map_random, start_point, sequence_length=20, random_flip=0):
    agent.hidden = agent.q_network.init_hidden(1)  # 初始化隐状态
    Bot_elements = {"Bot1": Map_init3.BOT("Bot1", start_point, 250)}
    terminal.map_init(rf"D:\Gas_detector\data\gas_mass_data\export{map_random}.csv", update_=True,
                      random_flip=0)

    initial_state = terminal.reset(bot_elements=Bot_elements)[0]
    # 初始化状态序列
    state_sequence = deque(maxlen=sequence_length)
    # 填充初始序列
    for _ in range(sequence_length):
        state_sequence.append(initial_state)
    episode_sequence = []  # 存储单次回合的序列经验
    while True:
        terminal.draw_elements_on_canvas(csv_path=rf"D:\Gas_detector\data\gas_mass_data\export{map_random}.csv",
                                         random_flip=random_flip)
        action = agent.take_action(np.array(state_sequence))

        next_state, reward, done = terminal.step(action, "Bot1")
        # 更新序列
        state_sequence.append(next_state)
        episode_sequence.append((initial_state, action, reward, next_state, done))
        # 如果序列长度足够，存储经验
        if len(episode_sequence) >= sequence_length:
            agent.store_experience(episode_sequence[-sequence_length:])

        agent.train()
        initial_state = next_state
        if done:
            break
    return terminal.element_collection["Bot1"].ep_return


def evaluate_policy(agent, terminal, num_episodes=5, sequence_length=10):
    total_rewards = []
    for _ in range(num_episodes):
        map_random = random.randint(10, 15)
        start_point = (
            random.randint(0, terminal.grid_map.shape[1] - 1), random.randint(0, terminal.grid_map.shape[0] - 1))
        Bot_elements = {"Bot1": Map_init3.BOT("Bot1", start_point,
                                              random.randint(200, 300))}
        terminal.map_init(rf"D:\Gas_detector\data\gas_mass_data\export{map_random}.csv", update_=True)

        initial_state = terminal.reset(bot_elements=Bot_elements)[0]

        state_sequence = deque(maxlen=sequence_length)

        for _ in range(sequence_length):
            state_sequence.append(initial_state)
        # 初始化隐状态
        hidden = agent.q_network.init_hidden(1)  # batch_size=1
        for t in range(Bot_elements['Bot1'].total_stride):
            state_tensor = torch.FloatTensor(np.array(state_sequence)).unsqueeze(0).to(device)  # [1, seq_len, 2, h, w]
            with torch.no_grad():
                q_values, hidden = agent.q_network(state_tensor, hidden)
                action = q_values.argmax().item()  # 选择 Q 值最大的动作
            terminal.draw_elements_on_canvas(
                csv_path=rf"D:\Gas_detector\data\gas_mass_data\export{map_random}.csv")
            next_state, reward, done = terminal.step(action, "Bot1", view=True)
            state_sequence.append(next_state)
            time.sleep(0.05)
            # input("go")
            if done:
                break
        total_rewards.append(terminal.element_collection["Bot1"].ep_return)
    return np.std(total_rewards)


if __name__ == "__main__":
    save_path = fr"D:/Gas_detector/weights"

    logger = record_log("DRQN")
    reward_list = []
    times = []
    Bot_elements = {"Bot1": Map_init3.BOT("Bot1", (0, 0), 100)}
    terminal = Map_init3.Terminal(canvas_size=(900, 600), split_x_nums=30, split_y_nums=45)

    terminal.render(Bot_elements)

    agent = DRQN2.DQNAgent(state_size=10, action_size=len(terminal.action_dict.keys()),
                           sequence_length=20, batch_size=128)
    epoch = 3
    num_episodes = 200
    least_reward = -10000
    total_reward_list = []
    total_loss_list = []

    print("start to train DRQN-GRU model")
    # start_point = (random.randint(0, terminal.grid_map.shape[1] - 1), random.randint(0, terminal.grid_map.shape[0] - 1))
    # map_random = random.randint(7, 15)
    map_random = random.randint(10, 14)

    for i in range(epoch):
        agent.reset_epsilon()
        with tqdm(total=num_episodes, desc="Iteration %d" % i) as pbar:
            for episode in range(num_episodes):
                if episode % 100 == 0 and episode > 2:
                    agent.update_target_network()
                if episode % 50 == 0 and episode > 2:
                    agent.reset_epsilon()
                map_random = random.randint(10, 14)
                # map_random = random.randint(7, 15)
                random_flip = random.randint(0, 3)
                start_point = (
                    random.randint(0, terminal.grid_map.shape[1] - 1),
                    random.randint(0, terminal.grid_map.shape[0] - 1))
                reward_episode = run_episode(terminal, agent, start_point=start_point, map_random=map_random,
                                             sequence_length=agent.sequence_length, random_flip=random_flip)
                reward_list.append(reward_episode)
                pbar.update(1)
            logger.info(
                f"\n val test avg rewards={evaluate_policy(agent, terminal, sequence_length=agent.sequence_length)}")

        # if i > 0:
        counter = 0
        save_path_ = save_path + str(f"/{date}")
        if not os.path.exists(save_path_):
            os.makedirs(save_path_)
        for dir in os.listdir(save_path_):
            if "exp" in dir:
                counter += 1
        save_path_ = save_path_ + str(rf"/exp-{counter}")
        if not os.path.exists(save_path_):
            os.makedirs(save_path_)

        times.append(int(i))
        logger.info(f"avg rewards={np.average(reward_list)}")
        logger.info(f"loss={np.var(reward_list)}")
        total_reward_list.append(np.average(reward_list))
        total_loss_list.append(np.std(reward_list))
        agent.model_save(save_path_)
        if least_reward < np.average(reward_list):
            least_reward = np.average(reward_list)
            agent.best_model_save(save_path_)
        reward_list = []

    plt.figure()
    plt.plot(times, total_reward_list, label="Average Reward")
    plt.plot(times, total_loss_list, label="Loss (Std)")
    plt.legend()
    plt.savefig('training_results.png')
