zjowowen commited on
Commit
1146e45
1 Parent(s): 492da09

Upload policy_config.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. policy_config.py +106 -0
policy_config.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exp_config = {
2
+ 'main_config': {
3
+ 'exp_name': 'TicTacToe-play-with-bot-AlphaZero',
4
+ 'seed': 0,
5
+ 'env': {
6
+ 'env_id': 'TicTacToe-play-with-bot',
7
+ 'board_size': 3,
8
+ 'battle_mode': 'play_with_bot_mode',
9
+ 'bot_action_type': 'v0',
10
+ 'channel_last': False,
11
+ 'collector_env_num': 8,
12
+ 'evaluator_env_num': 5,
13
+ 'n_evaluator_episode': 5,
14
+ 'manager': {
15
+ 'shared_memory': False
16
+ },
17
+ 'agent_vs_human': False,
18
+ 'prob_random_agent': 0,
19
+ 'prob_expert_agent': 0,
20
+ 'scale': True,
21
+ 'alphazero_mcts_ctree': False,
22
+ 'save_replay_gif': False,
23
+ 'replay_path_gif': './replay_gif'
24
+ },
25
+ 'policy': {
26
+ 'on_policy': False,
27
+ 'cuda': True,
28
+ 'multi_gpu': False,
29
+ 'bp_update_sync': True,
30
+ 'traj_len_inf': False,
31
+ 'model': {
32
+ 'observation_shape': [3, 3, 3],
33
+ 'action_space_size': 9,
34
+ 'num_res_blocks': 1,
35
+ 'num_channels': 16,
36
+ 'fc_value_layers': [8],
37
+ 'fc_policy_layers': [8]
38
+ },
39
+ 'torch_compile': False,
40
+ 'tensor_float_32': False,
41
+ 'sampled_algo': False,
42
+ 'gumbel_algo': False,
43
+ 'update_per_collect': 50,
44
+ 'model_update_ratio': 0.1,
45
+ 'batch_size': 256,
46
+ 'optim_type': 'Adam',
47
+ 'learning_rate': 0.003,
48
+ 'weight_decay': 0.0001,
49
+ 'momentum': 0.9,
50
+ 'grad_clip_value': 0.5,
51
+ 'value_weight': 1.0,
52
+ 'collector_env_num': 8,
53
+ 'evaluator_env_num': 5,
54
+ 'lr_piecewise_constant_decay': False,
55
+ 'threshold_training_steps_for_final_lr': 500000,
56
+ 'manual_temperature_decay': False,
57
+ 'threshold_training_steps_for_final_temperature': 100000,
58
+ 'fixed_temperature_value': 0.25,
59
+ 'mcts': {
60
+ 'num_simulations': 25
61
+ },
62
+ 'other': {
63
+ 'replay_buffer': {
64
+ 'replay_buffer_size': 1000000,
65
+ 'save_episode': False
66
+ }
67
+ },
68
+ 'cfg_type': 'AlphaZeroPolicyDict',
69
+ 'mcts_ctree': False,
70
+ 'simulation_env_name': 'tictactoe',
71
+ 'simulation_env_config_type': 'play_with_bot',
72
+ 'board_size': 3,
73
+ 'entropy_weight': 0.0,
74
+ 'n_episode': 8,
75
+ 'eval_freq': 2000
76
+ },
77
+ 'wandb_logger': {
78
+ 'gradient_logger': False,
79
+ 'video_logger': False,
80
+ 'plot_logger': False,
81
+ 'action_logger': False,
82
+ 'return_logger': False
83
+ }
84
+ },
85
+ 'create_config': {
86
+ 'env': {
87
+ 'type': 'tictactoe',
88
+ 'import_names': ['zoo.board_games.tictactoe.envs.tictactoe_env']
89
+ },
90
+ 'env_manager': {
91
+ 'type': 'subprocess'
92
+ },
93
+ 'policy': {
94
+ 'type': 'alphazero',
95
+ 'import_names': ['lzero.policy.alphazero']
96
+ },
97
+ 'collector': {
98
+ 'type': 'episode_alphazero',
99
+ 'import_names': ['lzero.worker.alphazero_collector']
100
+ },
101
+ 'evaluator': {
102
+ 'type': 'alphazero',
103
+ 'import_names': ['lzero.worker.alphazero_evaluator']
104
+ }
105
+ }
106
+ }