#!/usr/bin/env python3
import argparse
import gymnasium as gym

from lib import model, kfac

import numpy as np
import torch
import ptan
import torch.optim as optim
import os

LEARNING_RATE_CRITIC = 1e-3


ENV_ID = "Walker2d-v4"

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", "--model", required=True, help="Model file to load")
    parser.add_argument("-e", "--env", default=ENV_ID, help="Environment name to use, default=" + ENV_ID)
    parser.add_argument("-r", "--record", help="If specified, sets the recording dir, default=Disabled")
    parser.add_argument("-s", "--save", type=int, help="If specified, save every N-th step as an image")
    parser.add_argument("--acktr", default=False, action='store_true', help="Enable Acktr-specific tweaks")
    args = parser.parse_args()
    device = torch.device("cpu")

    env = gym.make(args.env, render_mode="human")

    net_act = model.ModelActor(env.observation_space.shape[0], env.action_space.shape[0]).to(device)
    net_crt = model.ModelCritic(env.observation_space.shape[0]).to(device)
    print(net_act)
    print(net_crt)

    # 动作预测网路使用了自己实现的优化器
    opt_act = kfac.KFACOptimizer(net_act)
    # 评价网络使用了Adam优化器
    opt_crt = optim.Adam(net_crt.parameters(), lr=LEARNING_RATE_CRITIC)

    save_path = os.path.join("saves", "acktr-HalfCheetah")
    start_idx = 0
    # 增加加载模型的代码
    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        # net_act.load_state_dict(checkpoint['net_act'])
        net_act.load_state_dict(torch.load(args.model))
        net_crt.load_state_dict(checkpoint['net_crt'])
        # opt_act.load_state_dict(checkpoint['opt_act'])
        opt_crt.load_state_dict(checkpoint['opt_crt'])
        start_idx = checkpoint['start_idx']
        print("加载模型成功")

    obs, _ = env.reset()
    total_reward = 0.0
    total_steps = 0
    while True:
        obs_v = ptan.agent.float32_preprocessor([obs])
        mu_v = net_act(obs_v)[0]
        action = mu_v.squeeze(dim=0).data.numpy()
        action = np.clip(action, -1, 1)
        obs, reward, done, trunc, _ = env.step(action)
        total_reward += reward
        total_steps += 1
        if True or done or trunc:
            break
    print("In %d steps we got %.3f reward" % (total_steps, total_reward))
    env.close()