exp_config = { 'main_config': { 'exp_name': 'PongNoFrameskip-v4-EfficientZero', 'seed': 0, 'env': { 'env_id': 'PongNoFrameskip-v4', 'env_name': 'PongNoFrameskip-v4', 'obs_shape': [4, 96, 96], 'collector_env_num': 8, 'evaluator_env_num': 3, 'n_evaluator_episode': 3, 'manager': { 'shared_memory': False } }, 'policy': { 'on_policy': False, 'cuda': True, 'multi_gpu': False, 'bp_update_sync': True, 'traj_len_inf': False, 'model': { 'observation_shape': [4, 96, 96], 'frame_stack_num': 4, 'action_space_size': 6, 'downsample': True, 'discrete_action_encoding_type': 'one_hot', 'norm_type': 'BN' }, 'use_rnd_model': False, 'sampled_algo': False, 'gumbel_algo': False, 'mcts_ctree': True, 'collector_env_num': 8, 'evaluator_env_num': 3, 'env_type': 'not_board_games', 'action_type': 'fixed_action_space', 'battle_mode': 'play_with_bot_mode', 'monitor_extra_statistics': True, 'game_segment_length': 400, 'transform2string': False, 'gray_scale': False, 'use_augmentation': True, 'augmentation': ['shift', 'intensity'], 'ignore_done': False, 'update_per_collect': 1000, 'model_update_ratio': 0.1, 'batch_size': 256, 'optim_type': 'SGD', 'learning_rate': 0.2, 'target_update_freq': 100, 'target_update_freq_for_intrinsic_reward': 1000, 'weight_decay': 0.0001, 'momentum': 0.9, 'grad_clip_value': 10, 'n_episode': 8, 'num_simulations': 50, 'discount_factor': 0.997, 'td_steps': 5, 'num_unroll_steps': 5, 'reward_loss_weight': 1, 'value_loss_weight': 0.25, 'policy_loss_weight': 1, 'policy_entropy_loss_weight': 0, 'ssl_loss_weight': 2, 'lr_piecewise_constant_decay': True, 'threshold_training_steps_for_final_lr': 50000, 'manual_temperature_decay': False, 'threshold_training_steps_for_final_temperature': 100000, 'fixed_temperature_value': 0.25, 'use_ture_chance_label_in_chance_encoder': False, 'use_priority': True, 'priority_prob_alpha': 0.6, 'priority_prob_beta': 0.4, 'root_dirichlet_alpha': 0.3, 'root_noise_weight': 0.25, 'random_collect_episode_num': 0, 'eps': { 'eps_greedy_exploration_in_collect': False, 'type': 'linear', 'start': 1.0, 'end': 0.05, 'decay': 100000 }, 'cfg_type': 'EfficientZeroPolicyDict', 'lstm_horizon_len': 5, 'reanalyze_ratio': 0.0, 'eval_freq': 2000, 'replay_buffer_size': 1000000 }, 'wandb_logger': { 'gradient_logger': False, 'video_logger': False, 'plot_logger': False, 'action_logger': False, 'return_logger': False } }, 'create_config': { 'env': { 'type': 'atari_lightzero', 'import_names': ['zoo.atari.envs.atari_lightzero_env'] }, 'env_manager': { 'type': 'subprocess' }, 'policy': { 'type': 'efficientzero', 'import_names': ['lzero.policy.efficientzero'] } } }