from tqdm import tqdm   # pip install tqdm
import numpy as np
from env import JammingEnv
from agent import DQNAgent
from config import *

# def main():
#     env = JammingEnv()
#
#     # 第一层：选子频段
#     agent1 = DQNAgent(env.n_sub, env.n_sub)
#     # 第二层：选带宽（11 档）
#     agent2 = DQNAgent(env.n_sub + 1 + len(env.targets), BAND_OPTS)
#
#     for episode in range(MAX_EPISODE):
#         state = env.reset()
#         s1 = state["s1"]
#         total_r = 0
#         done = False
#         while not done:
#             # —— 决策子频段 ——
#             a1 = agent1.act(s1)
#             # —— 决策带宽 ——
#             s2 = np.concatenate([s1, [a1], state["s2_base"]]).astype(np.float32)
#             a2 = agent2.act(s2)
#
#             s1_, r, done = env.step(a1, a2)
#             s1_next = s1_["s1"]
#             s2_next = np.concatenate([s1_next, [a1], s1_["s2_base"]])
#
#             # 存储
#             agent1.store(s1, a1, r, s1_next, done)
#             agent2.store(s2, a2, r, s2_next, done)
#             # 训练
#             agent1.update()
#             agent2.update()
#
#             s1 = s1_next
#             total_r += r
#
#         # 简易专家轨迹规则：total_r 高就加入 expert
#         if total_r > 100:
#             for t in agent1.memory.buf[-20:]:
#                 agent1.store(*t, expert=True)
#             for t in agent2.memory.buf[-20:]:
#                 agent2.store(*t, expert=True)
#
#         if episode % 100 == 0:
#             print(f"Ep{episode:5d}  R={total_r:6.1f}  Band={env.total_band:5.1f}  Jammed={sum(env.jammed)}")
#
#     torch.save(agent1.q_net.state_dict(), "agent1.pth")
#     torch.save(agent2.q_net.state_dict(), "agent2.pth")
#     print("Training finished.")

def main():
    env = JammingEnv()

    agent1 = DQNAgent(env.n_sub, env.n_sub)
    agent2 = DQNAgent(env.n_sub + 1 + len(env.targets), BAND_OPTS)

    # 进度条
    pbar = tqdm(range(MAX_EPISODE), desc="Training", ncols=100)

    for episode in pbar:
        state = env.reset()
        s1 = state["s1"]
        total_r = 0
        steps = 0
        done = False

        while not done:
            a1 = agent1.act(s1)
            s2 = np.concatenate([s1, [a1], state["s2_base"]]).astype(np.float32)
            a2 = agent2.act(s2)

            s1_, r, done = env.step(a1, a2)
            s1_next = s1_["s1"]
            s2_next = np.concatenate([s1_next, [a1], s1_["s2_base"]]).astype(np.float32)

            agent1.store(s1, a1, r, s1_next, done)
            agent2.store(s2, a2, r, s2_next, done)

            agent1.update()
            agent2.update()

            s1 = s1_next
            total_r += r
            steps += 1

        # 实时刷新进度条后描述
        pbar.set_postfix({
            "ep": episode,
            "reward": f"{total_r:.2f}",
            "band": f"{env.total_band:.2f}",
            "jammed": f"{sum(env.jammed)}/{len(env.targets)}"
        })

        # 每 1000 轮保存一次模型
        if episode % 1000 == 0:
            torch.save(agent1.q_net.state_dict(), "agent1.pth")
            torch.save(agent2.q_net.state_dict(), "agent2.pth")

if __name__ == "__main__":
    main()