import random
from utils import DRQN2, Map_init3
import numpy as np

import torch
import os
import logging
import time
from collections import deque

logging.basicConfig(level=logging.INFO, format='%(levelname)s %(asctime)s [%(filename)s:%(lineno)d] %(message)s',
                    datefmt='%Y.%m.%d. %H:%M:%S', filemode="a")
date = time.strftime('%y-%m-%d', time.localtime(time.time()))
daytime = time.strftime('%H-%M', time.localtime(time.time()))

logfile_path = "log data"
if not os.path.exists(logfile_path):
    os.makedirs(logfile_path)


def record_log(name_process):
    logger = logging.getLogger(name_process)
    logger.addHandler(logging.FileHandler(f"{logfile_path}/{name_process}-{date}-{daytime}.log"))
    return logger


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
map_choice = 14
random_flip = 0


def evaluate_policy_offline(terminal, num_episodes=5, sequence_length=5):
    total_rewards = []
    agent = DRQN2.DQNAgent(state_size=26, action_size=len(terminal.action_dict.keys()),
                           sequence_length=sequence_length, batch_size=128)
    # # 读取模型，进行测试fwrite
    agent.model_read(r"D:\Gas_detector\weights\25-06-20\exp-4")
    for i in range(num_episodes):
        start_point = (
            random.randint(0, 30 - 1), random.randint(0, 45 - 1))
        Bot_elements = {"Bot1": Map_init3.BOT("Bot1", start_point, 500)}
        print("起点", start_point)
        # map_random = random.randint(2, 3)
        terminal.map_init(rf"D:\Gas_detector\data\gas_mass_data\export{map_choice + i}.csv", update_=True,
                          random_flip=random_flip)
        initial_state = terminal.reset(bot_elements=Bot_elements)[0]
        state_sequence = deque(maxlen=sequence_length)
        print('终点',
              np.unravel_index(np.argmax(terminal.mass_fraction_map), terminal.mass_fraction_map.shape)[::-1])  # y,x)
        for _ in range(sequence_length):
            state_sequence.append(initial_state)
        # 初始化隐状态
        done = False
        hidden = agent.q_network.init_hidden(1)  # batch_size=1
        for t in range(Bot_elements['Bot1'].total_stride):
            state_tensor = torch.FloatTensor(np.array(state_sequence)).unsqueeze(0).to(device)  # [1, seq_len, 2, h, w]
            with torch.no_grad():
                q_values, hidden = agent.q_network(state_tensor, hidden)
                # action = q_values.argmax().item()  # 选择 Q 值最大的动作
            terminal.RRT_local_planner(terminal.element_collection["Bot1"].current_grid_pos)
            # 计算信息熵最大的分支
            information_entropy = []
            for key in terminal.path_dict.keys():
                entropy = 0
                for step in terminal.path_dict[key]:
                    if not terminal.quality_scores_map[tuple(step)[::-1]]:
                        entropy += np.max(terminal.mass_fraction_map) / 2
                    else:
                        entropy += terminal.quality_scores_map[tuple(step)[::-1]]
                information_entropy.append(entropy)
            print(torch.tensor(information_entropy).to(device), "分支信息和")
            print(np.squeeze(q_values), "q_values")
            choices = torch.tensor(information_entropy).to(device) * np.squeeze(q_values)
            print(choices, '信息求和')
            action = choices.argmax()  # 选择 Q 值最大的动作
            input("go")
            for action in terminal.path_2_action(action):
                next_state, _, done = terminal.step(action, "Bot1", view=True)
                state_sequence.append(next_state)
                terminal.draw_elements_on_canvas(
                    csv_path=rf"D:\Gas_detector\data\gas_mass_data\export{map_choice + i}.csv",
                    random_flip=random_flip)
                if done:
                    break
            if done:
                break
        total_rewards.append(terminal.element_collection["Bot1"].ep_return)
    return np.mean(total_rewards)


if __name__ == "__main__":
    logger = record_log("DRQN")
    reward_list = []
    times = []

    Bot_elements = {"Bot1": Map_init3.BOT("Bot1", (0, 0), 500)}
    terminal = Map_init3.Terminal(canvas_size=(900, 600), split_x_nums=30, split_y_nums=45,
                                  obstacle_list=[(0, 26, 15, 24), (14, 17, 26, 15), (14, 7, 15, 3)])
    terminal.render(Bot_elements)
    print("start to train DRQN-GRU model")
    evaluate_policy_offline(terminal=terminal)
