from env.FutureEnv import FutureTradingEnv
from stable_baselines3 import PPO
from stable_baselines3.common.env_util import make_vec_env
from PPO_model.CustomActorCriticPolicy import CustomTradingExtractor, policy_kwargs, CustomActorCriticPolicy


def PPO_(data):

    env = make_vec_env(lambda: FutureTradingEnv(data), n_envs=4)
    policy_kwarg = policy_kwargs()
    model = PPO(CustomActorCriticPolicy, 
                env, 
                verbose=1, 
                policy_kwargs=policy_kwarg, 
                learning_rate=5e-4,
                n_steps=2048,
                gae_lambda=0.95,
                gamma=0.99,
                clip_range=0.1, 
                ent_coef=0.01, 
                batch_size=128)
    # model = PPO(
    # CustomActorCriticPolicy,
    # env,
    # learning_rate=3e-4,
    # n_steps=1024,
    # batch_size=64,
    # n_epochs=5,
    # gamma=0.99,
    # gae_lambda=0.95,
    # clip_range=0.2,
    # ent_coef=0.05,
    # max_grad_norm=0.5,
    # target_kl=0.03,
    # policy_kwargs=dict(
    #     features_extractor_class=CustomTradingExtractor,
    #     net_arch=[dict(pi=[64, 64], vf=[64, 64])]
    # )
    # )
    
    # model = PPO(
    # CustomActorCriticPolicy, 
    # env,
    # learning_rate=5e-4,  # 提高学习率
    # n_steps=2048,        # 增加轨迹长度
    # batch_size=128,      # 减小批大小
    # gamma=0.99,          # 提高折扣因子
    # gae_lambda=0.95,
    # clip_range=0.1,      # 减小剪裁范围
    # ent_coef=0.01,       # 增加熵系数
    # verbose=1
    # )

    return model

