from stable_baselines3 import PPO
from environment import SysEnv
import torch
import numpy as np
from gym.utils.env_checker import check_env

env = SysEnv(SysEnv.MAX_DATA, SysEnv.MAX_ENERGY_trans, 1.0)

# print(env.reset(seed=123))
# print(check_env(env))         # 检查 base 环境是否符合 gym 规范


episodes = 50

for episodes in range(episodes):
    done = False
    obs = env.reset()
    while done == False:
        random_action = env.action_space.sample()
        upper_bound= SysEnv.data_upper_bound(obs)
        action = torch.clamp(torch.tensor(random_action), 0, upper_bound)
        print(f'random_action:{random_action}')
        print(f'upper_bound:{upper_bound}')
        print(f'action:{action}')
        obs, reward, done, _ = env.step(action)
        print(f'reward:{reward}')
        print(f'obs:{obs}')


