import torch
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from pathlib import Path
from agent import DQNAgent
from environment import GridWorld
from datetime import datetime

# 设置超参数
state_size = 2  # 包含长宽
action_size = 5
hidden_size = 16
lr = 0.05
gamma = 0.90
buffer_init_size = 1000
buffer_capacity = 1000
batch_size = 64
update_intervals = 5
epsilon = 0.5
episodes = 500
lr_milestones = [episodes / 10 * i for i in range(1, 10)]

# 创建GridWorld环境和DQNAgent
env = GridWorld(size=5)
agent = DQNAgent(
    state_size,
    action_size,
    hidden_size,
    lr,
    lr_milestones,
    gamma,
    buffer_init_size,
    buffer_capacity,
    batch_size,
)


# 计算Q值
def get_Q_table():
    Q = np.zeros((25, 5), dtype=float)
    with torch.no_grad():
        for i in range(5):
            for j in range(5):
                input = torch.tensor([i, j], device=agent.device).float()
                Q[i * 5 + j] = agent.model(input).cpu().numpy()
    return Q


# 加载最优Q值用于对比
optimal_Q = np.load("./optimal_qv_VI.npy")

# 记录日志
Path("./result").mkdir(exist_ok=True)
save_dir = Path("./result") / datetime.now().strftime(r"%Y-%m-%d-%H-%M-%S")
Path.mkdir(save_dir)
log_path = save_dir / "log.csv"
log_path.touch()
with open(log_path, "a+", encoding="utf8") as f:
    f.write(f"Episode,TD误差,Q误差,学习率,epsilon\n")
logging_data = {"episode": [], "TD-loss": [], "Q-loss": [], "lr": [], "epsilon": []}


# 开始训练
for episode in tqdm(range(episodes)):
    env.reset(random=False)
    state = env.state
    done = False
    batch_idx = 0
    loss = 0

    # 训练一个batch
    while True:
        """off policy
        behavior policy: 随机选取动作"""
        action = np.random.choice(5)
        done = env.step(action)[-1]
        l = agent.train()
        loss = loss if l is None else loss + l
        batch_idx += 1

        if batch_idx % update_intervals == 0:
            agent.update_target_model()

        if done or batch_idx > 100:
            break

    # 记录相关日志
    with open(log_path, "a+", encoding="utf8") as f:
        logging_data["episode"].append(episode)
        logging_data["TD-loss"].append(loss / (batch_idx + 1))
        Q = get_Q_table()
        q_loss = np.abs(Q - optimal_Q).sum()
        logging_data["Q-loss"].append(q_loss)
        lr = agent.get_lr()
        logging_data["lr"].append(lr)
        logging_data["epsilon"].append(epsilon)
        f.write(
            f"{episode+1},{round(loss/(batch_idx+1),4)},{round(q_loss,4)},{round(lr,4)},{round(epsilon,4)}\n"
        )

    # 定期保存模型
    if (episode + 1) % (episodes // 10) == 0 and episode >= 0.5 * episodes:
        torch.save(agent.model.state_dict(), save_dir / f"model_e{episode+1}.pth")
        torch.save(agent.target_model.state_dict(), save_dir / f"target_e{episode+1}.pth")

    # 优化器计数，用于定期递减学习率
    agent.scheduler.step()


# 保存模型最优权重
torch.save(agent.model.state_dict(), save_dir / "latest.pth")
torch.save(agent.model.state_dict(), "optimal_DQN.pth")

# 绘制误差曲线
plt.subplot(2, 2, 1)
plt.plot(logging_data["episode"], logging_data["TD-loss"]), plt.title("TD-loss")
plt.grid(True, axis="both")
plt.subplot(2, 2, 2)
plt.plot(logging_data["episode"], logging_data["Q-loss"]), plt.title("Q-loss")
plt.grid(True, axis="both")
plt.subplot(2, 2, 3)
plt.plot(logging_data["episode"], logging_data["lr"]), plt.title("lr")
plt.grid(True, axis="both")
plt.subplot(2, 2, 4)
plt.plot(logging_data["episode"], logging_data["epsilon"]), plt.title("epsilon")
plt.grid(True, axis="both")
plt.savefig(save_dir / "curves.svg")
plt.show()
