import torch
import deep_Q_network as dqn
import parallel_upgrades_env as pue
import graph_neural_network as GNN
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt


# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
USE_GNN = True

num_components = 5
max_versions = 10
time_windows = [10, 15, 12, 10, 12, 10]  # 每个时间窗的长度
max_parallel = 3
p_one = 0.2   # 10% 是 1，90% 是 0
dep_matrix = pue.generate_feasible_dependency_matrix(num_components, max_versions, dep_prob=0.1)
jump_matrix = np.random.choice([0, 1],
    size=(num_components, max_versions, max_versions),
    p=[1-p_one, p_one])
env = pue.ParallelUpgradeEnv(num_components, time_windows, dep_matrix, jump_matrix, max_parallel, device)

if USE_GNN:
    agent = dqn.GNNDQNAgent(graph_dim = 2 * max(env.max_versions) + 1,
        graph_hid_dim   = 32,
        q_net_hid_dim   = 64,
        agent_feat_dim  = env.feats_dim,        # 环境里 get_state_action_features 返回的 D)
        device          = device)
    dqn.load_agent(agent, './gnn_dqn_agent.pth', map_location=device)
else:
    agent = dqn.DQNAgent(feature_dim=pue.env.feats_dim, hidden_dim=64, device=device)
    dqn.dqn_load_agent(agent, './dqn_agent.pth', map_location=device)
vdg = GNN.VersionedDependencyGraph(env.dependency_matrix, env.jump_matrix, env.upgrade_time, device)

NUM_TESTS = 500
makespan_list = []
jump_ratio_list = []
parallel_util_list = []
T_max = sum(env.time_windows)  # 最长时间步或手动设定一个上界

# 先在外面预先创建存储这些数据的结构
parallel_actions_all = []      # 每个 Episode 的 parallel_actions 列表
cumulative_completed_all = []  # 每个 Episode 的 cumulative_completed 列表
delta_completed_all = []       # 每个 Episode 的 delta_completed_list 列表
comp_completion_times = defaultdict(list)  # comp_id → [t1, t2, ...]

for i in range(NUM_TESTS):
    # 随机生成依赖矩阵 D 和跳跃矩阵 J（你已有代码）
    dep_matrix = pue.generate_feasible_dependency_matrix(num_components, max_versions, dep_prob=0.1)
    jump_matrix = np.random.choice([0, 1],
        size=(num_components, max_versions, max_versions),
        p=[1-p_one, p_one])

    env = pue.ParallelUpgradeEnv(num_components, time_windows, dep_matrix, jump_matrix, max_parallel, device)

    # 这一轮 Episode 专用的临时列表
    parallel_actions = []        # 会 append 0/1 
    cumulative_completed = []    # 会 append 当前已完成组件总数
    delta_completed = []         # 会 append 当前时间步新完成数

    # 记录每个组件是否已经完成过（不要重复计数）
    # 一旦组件完成升级到最高版本，就将它标记为 True
    comp_done_flag = [False] * env.num_components
    completed_so_far = 0  # 到当前时间步为止已完成的组件总数

    noop_idx = env.noop_index  # 约定最后一个动作索引是 "跳过"


    # 创建环境并加载已有模型
    state = env.reset()
    cur_versions = torch.tensor(env.state, dtype=torch.long, device=device)
    graph_data = vdg.build_graph(cur_versions)
    valid_idxs, feats = env.get_state_action_features()
    feats = torch.tensor(feats, device=device)
    done = False

    while not done:
        if USE_GNN:
            pick = agent.select_action(graph_data, feats, is_training=False)
            action = valid_idxs[pick]
        else:
            feats_idx = agent.select_action(feats, is_training=False)
            action = valid_idxs[feats_idx]
        # 1) 记录是否跳过；如果 action_idx == noop_idx，则是跳过动作，记录 0，否则记录 1
        if action == noop_idx:
            parallel_actions.append(0)
        else:
            parallel_actions.append(1)

        # 2) 在执行 step 之前，先记下“当前已完成数”——
        #    这里有两种思路：可以在上一步（上一个 time step）就已经更新了 comp_done_flag，
        #    也可以每次通过检查 env.state[] 来决定哪些组件在上一时刻已经到达最高版本。下面用 env.state 来做示例。
        completed_prev = completed_so_far

        #  3) 真正执行环境步进，得到下一个状态和 done 标志
        _, _, done, _ = env.step(action, is_need_visualization=False)
        env.render(done)

        # 4) 更新 comp_done_flag 以及 completed_so_far
        #    我们假定：如果 env.state[c] == (env.max_versions[c]-1) 并且 comp_done_flag[c]==False，
        #    说明 c 组件第一次到达了最高版本，这里就视为“在当前 time-step 刚刚完成”。
        for c in range(env.num_components):
            if (env.state[c] == env.max_versions[c] - 1) and (not comp_done_flag[c]):
                comp_done_flag[c] = True
                completed_so_far += 1

        # 5) 记录 cumulative_completed 列表：到当前这一时间步为止已完成的组件数
        cumulative_completed.append(completed_so_far)

        # 6) 记录 delta_completed：本 time-step 新完成的数量 = completed_so_far - completed_prev
        delta_completed.append(completed_so_far - completed_prev)

        # update for next step
        cur_versions = torch.tensor(env.state, dtype=torch.long, device=device)
        graph_data = vdg.build_graph(cur_versions)
        valid_idxs, feats = env.get_state_action_features()
        feats = torch.tensor(feats, device=device)

    # 收集指标
    makespan_list.append(env.total_time)
    jump_ratio_list.append(env.skip_count / env.action_count)
    parallel_util_list.append(env.parallel_util_timeline.copy())  # 每步资源利用率
    for cid, t in env.completion_times.items():
        comp_completion_times[cid].append(t)
    
    # 由于上面的循环只在 done=True 时才停止，但我们希望 parallel_actions、cumulative_completed、delta_completed 
    # 都扩展到 T_max 长度，做可视化时方便对齐。这里我们用“最后一个时间步”的值做 padding。
    L = len(parallel_actions)           # 实际跑了多少个 time-step
    if L < T_max:
        # 补齐到 T_max
        parallel_actions.extend([0] * (T_max - L))       # 对“跳过”/“执行”这里一般用补 0（表示后面全部算作 skip）
        # cumulative_completed[L-1] 就是最后一刻的“已完成组件数”；
        cumulative_completed.extend(
            [cumulative_completed[-1]] * (T_max - L)
        )
        delta_completed.extend([0] * (T_max - L))        # 对 delta_completed 用 0 padding
    elif L > T_max:
        # 如果某些 Episode 跑得比 T_max 还长，可以直接 truncate
        parallel_actions = parallel_actions[:T_max]
        cumulative_completed = cumulative_completed[:T_max]
        delta_completed = delta_completed[:T_max]

    # 将这一 Episode 的数据加入所有 Episode 的总体列表
    parallel_actions_all.append(parallel_actions)
    cumulative_completed_all.append(cumulative_completed)
    delta_completed_all.append(delta_completed)

parallel_actions_np = np.array(parallel_actions_all)  # (E, T_max)
cumulative_completed_np = np.array(cumulative_completed_all)  # (E, T_max)
delta_completed_np = np.array(delta_completed_all)    # (E, T_max)

# 1. Makespan
print("Avg Makespan:", np.mean(makespan_list), "Std:", np.std(makespan_list))

# 2. Jump ratio
print("Avg Jump Ratio:", np.mean(jump_ratio_list), "Std:", np.std(jump_ratio_list))
skip_ratio = np.mean(parallel_actions_np == 0, axis=0)       # (T_max,)
upgrade_ratio = np.mean(parallel_actions_np == 1, axis=0)    # (T_max,)
time_steps = np.arange(T_max)

plt.figure(figsize=(8, 4))
plt.plot(time_steps, skip_ratio, label='Skip Ratio (0)')
plt.plot(time_steps, upgrade_ratio, label='Upgrade Ratio (1)')
plt.xlabel('Time Step')
plt.ylabel('Ratio')
plt.title('Skip vs Upgrade Operation Ratio Over Time')
plt.legend()
plt.grid(linestyle='--', alpha=0.5)
plt.tight_layout()
plt.show()


# 3. 并行利用率时间热力图
# 例如，我们只取前 T_max = min(len(seq) for seq in all_parallel_utils) 个时间步
# 1. 获取最大时间步长度（而不是最短）
T_max = max(len(seq) for seq in parallel_util_list)
K = len(parallel_util_list)

# 2. 构造 U，使用 np.nan 填充空缺部分
U = np.full((K, T_max), np.nan)
for k, seq in enumerate(parallel_util_list):
    U[k, :len(seq)] = seq  # 保留有效值，右侧补 nan

plt.figure(figsize=(10,6))
plt.imshow(U, aspect='auto', cmap='YlGnBu', origin='lower')
plt.colorbar(label='Utilization')
plt.xlabel("Time Step (truncated to {})".format(T_max))
plt.ylabel("Episode Index")
plt.title("Parallel Slots Utilization Heatmap (first {} timesteps)".format(T_max))
plt.show()

# 4. 各组件完成时刻
means = []
stds = []
for cid in range(num_components):
    times = comp_completion_times.get(cid, [])
    if len(times) > 0:
        means.append(np.mean(times))
        stds.append(np.std(times))
    else:
        # 如果某个组件在所有 Episode 中都没有完成（一般不会出现，但在保险起见做个占位）
        means.append(np.nan)
        stds.append(0.0)

x = np.arange(num_components)  # 组件索引 0, 1, ..., N-1

plt.figure(figsize=(10, 5))
plt.bar(x, means, yerr=stds, capsize=5, color='skyblue', edgecolor='black')
plt.xticks(x, [f"C{cid}" for cid in range(num_components)], fontsize=10)
plt.xlabel("Component ID", fontsize=12)
plt.ylabel("Average Completion Time", fontsize=12)
plt.title("Component-wise Makespan (Mean ± Std)", fontsize=14)
plt.grid(axis="y", linestyle="--", alpha=0.5)
plt.tight_layout()
plt.show()

# 5. 累计完成率曲线
# 取平均与标准差
avg_cum = np.mean(cumulative_completed_np, axis=0)   # (T_max,)
std_cum = np.std(cumulative_completed_np, axis=0)    # (T_max,)

plt.figure(figsize=(8, 4))
plt.plot(time_steps, avg_cum, label='Avg Completed')
plt.fill_between(time_steps,
                 avg_cum - std_cum,
                 avg_cum + std_cum,
                 color='lightgreen', alpha=0.4,
                 label='±1 Std Dev')
plt.xlabel('Time Step')
plt.ylabel('Number of Components Completed')
plt.title('Cumulative Completed Components Over Time')
plt.legend()
plt.grid(linestyle='--', alpha=0.5)
plt.tight_layout()
plt.show()

# 6. 单位时间组件升级速率曲线

# 假设 delta_completed[e][t] = 在Episode e 的时间步 t 刚刚完成升级的组件数
# 求平均与标准差
avg_delta = np.mean(delta_completed_np, axis=0)   # (T_max,)
std_delta = np.std(delta_completed_np, axis=0)    # (T_max,)

plt.figure(figsize=(8, 4))
plt.bar(time_steps, avg_delta, yerr=std_delta, capsize=3, color='orange', edgecolor='black')
plt.xlabel('Time Step')
plt.ylabel('Components Completed at This Step')
plt.title('Average Completed-Per-Step Counts Across Episodes')
plt.grid(axis='y', linestyle='--', alpha=0.5)
plt.tight_layout()
plt.show()

# 7. 依赖图嵌入可视化

# 这里似乎python版本不够
# 假设 gnn_embeddings 是一个形状为 (N, G) 的矩阵，
# N 为组件总数，G 为 GNN 隐藏维度（本例中为 32）
# embeddings = gnn_embeddings.cpu().detach().numpy()  # (N, 32)
# tsne = TSNE(n_components=2, perplexity=30, random_state=42)
# proj = tsne.fit_transform(embeddings)  # (N, 2)

# plt.figure(figsize=(6, 6))
# plt.scatter(proj[:, 0], proj[:, 1], c=current_versions, cmap='tab10', alpha=0.8)
# plt.colorbar(label='Component Current Version')
# plt.title('t-SNE Projection of GNN Embeddings')
# plt.xlabel('TSNE-1')
# plt.ylabel('TSNE-2')
# plt.tight_layout()
# plt.show()

