import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import parallel_upgrades_env as pue
import rl_utils
import Q_learning as ql
import deep_Q_network as dqn
import torch
import graph_neural_network as GNN




# --- 1) 初始化环境 & GNN-DQN agent ---
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
env = pue.env  # env 已在 pue 模块里初始化好了
vdg = GNN.VersionedDependencyGraph(env.dependency_matrix, env.jump_matrix, env.upgrade_time, device)


# GNN + DQN agent
agent = dqn.SimpleAttentionGNNDQNAgent(
    graph_dim       = 2 * max(env.max_versions) + 1,
    graph_hid_dim   = 32,
    q_net_hid_dim   = 64,
    agent_feat_dim  = env.feats_dim,        # 环境里 get_state_action_features 返回的 D
    lr              = 2e-3,
    gamma           = 0.1,
    eps             = 0.1,
    target_update   = 200,
    device          = device
)

num_episodes = 100
return_list = []           # 记录每个 episode 的累计回报
makespan_list = []         # 记录每个 episode 的完成时刻
skip_count_list = []       # 记录每个 episode 的跳过动作次数
avg_parallel_list = []     # 记录每个 episode 的平均并行利用率

# --- 2) 训练循环 ---
for outer in range(10):
    with tqdm(total=num_episodes//10, desc=f"Iter {outer}") as pbar:
        for inner in range(num_episodes//10):
            episode_return = 0
            _ = env.reset()
            done = False

            # 首次提取图 & 动作特征
            cur_versions = torch.tensor(env.state, dtype=torch.long, device=device)
            graph_data = vdg.build_graph(cur_versions)  
            valid_idxs, feats = env.get_state_action_features()  # feats: np.ndarray (K,D)
            feats = torch.tensor(feats, device=device)

            while not done:
                # 1) 用 GNN+DQN 选动作
                pick = agent.select_action(graph_data, feats, is_training=True)
                action = valid_idxs[pick]

                # 2) 与环境交互
                _, reward, done, info = env.step(action)
                episode_return += reward

                # 3) 提取 next 图 & 特征
                next_graph = vdg.build_graph(cur_versions)
                valid_next, feats_next = env.get_state_action_features()
                feats_next = torch.tensor(feats_next, device=device)

                # 4) 存入 replay buffer
                agent.store(
                    graph_data, feats, pick,
                    reward,
                    next_graph, feats_next,
                    done
                )

                # 5) DQN+GNN 更新
                agent.update()

                # 6) 准备下一步
                graph_data, valid_idxs, feats = next_graph, valid_next, feats_next

            return_list.append(episode_return)
            makespan = info.get('makespan', pue.env.current_time)  # 如果 env 没有返回，直接用 current_time
            skip_cnt = pue.env.skip_count
            # 并行利用率历史中，每步占用槽数 / 最大并行数
            avg_parallel_usage = np.mean(pue.env.parallel_usage_history) / pue.env.max_parallel

            makespan_list.append(makespan)
            skip_count_list.append(skip_cnt)
            avg_parallel_list.append(avg_parallel_usage)

            # 每10条打印一次
            if (inner + 1) % 10 == 0:
                avg_ret = np.mean(return_list[-10:])
                avg_makespan = np.mean(makespan_list[-10:])
                avg_skip = np.mean(skip_count_list[-10:])
                avg_par = np.mean(avg_parallel_list[-10:])
                pbar.set_postfix({
                    'episode': f'{(num_episodes//10)*outer + inner + 1}',
                    'return':  f'{avg_ret:.3f}',
                    'makespan': f'{avg_makespan:.1f}',
                    'skip_cnt': f'{avg_skip:.1f}',
                    'par_util': f'{avg_par:.2f}'
                })
            pbar.update(1)



# dqn.save_agent(agent, './gnn_dqn_agent.pth')
done = False
cur_versions = torch.tensor(env.state, dtype=torch.long, device=device)
graph_data = vdg.build_graph(cur_versions)
valid_idxs, feats = env.get_state_action_features()
feats = torch.tensor(feats, device=device)
obs = env.reset()

while not done:
    pick = agent.select_action(graph_data, feats, is_training=False)
    action = valid_idxs[pick]
    _, _, done, _ = env.step(action, is_need_visualization=True)
    env.render(done)

    # update for next step
    cur_versions = torch.tensor(env.state, dtype=torch.long, device=device)
    graph_data = vdg.build_graph(cur_versions)
    valid_idxs, feats = env.get_state_action_features()
    feats = torch.tensor(feats, device=device)


# --- 绘制学习曲线 ---
def moving_average(x, window=20):
    return np.convolve(x, np.ones(window)/window, mode='valid')

episodes = np.arange(len(return_list))
ma = moving_average(return_list, window=20)

# 1) 收敛曲线：Return vs Episodes
plt.figure(figsize=(6,4))
plt.plot(episodes, np.array(return_list))
plt.xlabel('Episode')
plt.ylabel('Return')
plt.title('Return over Episodes')
plt.grid(True)
plt.tight_layout()
plt.show()

# 2) 完成时刻（Makespan） vs Episodes
plt.figure(figsize=(6,4))
plt.plot(episodes, np.array(makespan_list))
plt.xlabel('Episode')
plt.ylabel('Makespan (time steps)')
plt.title('Makespan over Episodes')
plt.grid(True)
plt.tight_layout()
plt.show()

# 3) 跳过次数 vs Episodes
plt.figure(figsize=(6,4))
plt.plot(episodes, np.array(skip_count_list))
plt.xlabel('Episode')
plt.ylabel('Skip Count')
plt.title('Skip Count over Episodes')
plt.grid(True)
plt.tight_layout()
plt.show()

# 4) 平均并行利用率 vs Episodes
plt.figure(figsize=(6,4))
plt.plot(episodes, np.array(avg_parallel_list))
plt.xlabel('Episode')
plt.ylabel('Average Parallel Utilization')
plt.title('Parallel Utilization over Episodes')
plt.grid(True)
plt.tight_layout()
plt.show()