import ray
import numpy as np
import random
from ray.rllib import agents
ray.shutdown()
#ray.init(ignore_reinit_error=True ) # 如果已经调用，跳过或设置为忽略
#
# config = {'gamma': 0.9,
#           'lr': 1e-2,
#           'num_workers': 4,
#           'train_batch_size': 1000,
#           'model': {
#               'fcnet_hiddens': [128, 128]
#           }}
# #trainer = agents.dqn.DQNTrainer(env='CartPole-v0') #深度Q网络
# #trainer = agents.a3c.A2CTrainer(env='CartPole-v0')
# trainer = agents.ppo.PPOTrainer(env='CartPole-v0', config=config)
# results = trainer.train()



#
# from envs.JssEnv import JssEnv
# env = JssEnv()
# state = env.reset()
# for i in range(2000):
#     action = env.get_legal_actions()
#     aa = random.choice([i for i, x in enumerate(action) if x])
#     state,reward,is_done,_ =  env.step(aa)
#     if is_done:
#         print(action.sum())
#         break
# fig = env.render()
# fig.show()
# print(action.sum())
#




##from custom_gym import GridEnv1 as env
from envs.JssEnv import JssEnv as env
config = { 'num_workers': 4 }
trainer = agents.ppo.PPOTrainer( env=env ,  config=config ) ##
max_training_episodes = 10
# while True:
#     results = trainer.train()
#     # 输入你喜欢的任何停止条件
#     if results['episodes_total'] >= max_training_episodes:
#         break
results = trainer.train()
print('Mean Rewards:\t{:.1f}'.format(results['episode_reward_mean']))
##trainer.export_policy_model("./model")
ray.shutdown()







