import time
import numpy as np
from environment import DriverEnv
from agent_dqn import DqnAgent

path = "models/DQN_model.pt"
env = DriverEnv("maps/0")
agent = DqnAgent(
    input_size=41,
    output_size=25,
    memory_capacity=1*1024*8,
    batch_size=1024*1,
    gamma=0.95,
    learning_rate=1e-3,
)
agent.load(path)

# 训练
N = int(201)
count = 50
rewards = 0
kmin = 0.4
kmax = 1
start_time = time.time()
for epoch in range(N):
    # k 从 kmin 经过 stp 步递增到 kmax，即 kmin*(rate**stp)=kmax
    k = kmin  # 随机探索的概率
    stp = count * (np.tanh(rewards) + 1.1) * 0.8  # agent能够成功探索的步数
    rate = (kmax / kmin) ** (1 / stp)  # 随机探索率递增
    state = env.reset()
    count = 0
    rewards = 0
    while True:
        action = agent.choose_action(state)
        if np.random.uniform() < k:  # 随机探索
            action = env.action_space.sample()
        next_state, reward, done, _ = env.step(action, epoch / N)
        agent.remember(state, action, reward, next_state, done)
        agent.learn()
        state = next_state
        count += 1
        k *= rate
        rewards += reward
        if done:
            break
    print(
        "epoch:",
        epoch + 1,
        "/",
        N,
        " , time:",
        time.time() - start_time,
        "/",
        (time.time() - start_time) * N / (epoch + 1),
        " , count:",
        count,
    )
    if (epoch+1) % 100 == 0:
        agent.save(path)
        print("save model")
    if (epoch+1) % 25 == 0:
        env.mapGenerator()
# 保存
# agent.save("DQN_model.pt")