exp_config = { 'main_config': { 'exp_name': 'MsPacmanNoFrameskip-v4-SampledEfficientZero', 'seed': 0, 'env': { 'env_id': 'MsPacmanNoFrameskip-v4', 'env_name': 'MsPacmanNoFrameskip-v4', 'obs_shape': [4, 96, 96], 'collector_env_num': 8, 'evaluator_env_num': 3, 'n_evaluator_episode': 3, 'manager': { 'shared_memory': False } }, 'policy': { 'on_policy': False, 'cuda': True, 'multi_gpu': False, 'bp_update_sync': True, 'traj_len_inf': False, 'model': { 'observation_shape': [4, 96, 96], 'frame_stack_num': 4, 'action_space_size': 9, 'downsample': True, 'continuous_action_space': False, 'num_of_sampled_actions': 5, 'discrete_action_encoding_type': 'one_hot', 'norm_type': 'BN' }, 'use_rnd_model': False, 'sampled_algo': True, 'gumbel_algo': False, 'mcts_ctree': True, 'collector_env_num': 8, 'evaluator_env_num': 3, 'env_type': 'not_board_games', 'action_type': 'fixed_action_space', 'battle_mode': 'play_with_bot_mode', 'monitor_extra_statistics': True, 'game_segment_length': 400, 'transform2string': False, 'gray_scale': False, 'use_augmentation': True, 'augmentation': ['shift', 'intensity'], 'ignore_done': False, 'update_per_collect': 1000, 'model_update_ratio': 0.1, 'batch_size': 256, 'optim_type': 'SGD', 'learning_rate': 0.2, 'target_update_freq': 100, 'target_update_freq_for_intrinsic_reward': 1000, 'weight_decay': 0.0001, 'momentum': 0.9, 'grad_clip_value': 10, 'n_episode': 8, 'num_simulations': 50, 'discount_factor': 0.997, 'td_steps': 5, 'num_unroll_steps': 5, 'reward_loss_weight': 1, 'value_loss_weight': 0.25, 'policy_loss_weight': 1, 'policy_entropy_loss_weight': 0, 'ssl_loss_weight': 2, 'lr_piecewise_constant_decay': True, 'threshold_training_steps_for_final_lr': 50000, 'manual_temperature_decay': False, 'threshold_training_steps_for_final_temperature': 100000, 'fixed_temperature_value': 0.25, 'use_ture_chance_label_in_chance_encoder': False, 'use_priority': True, 'priority_prob_alpha': 0.6, 'priority_prob_beta': 0.4, 'root_dirichlet_alpha': 0.3, 'root_noise_weight': 0.25, 'random_collect_episode_num': 0, 'eps': { 'eps_greedy_exploration_in_collect': False, 'type': 'linear', 'start': 1.0, 'end': 0.05, 'decay': 100000 }, 'cfg_type': 'SampledEfficientZeroPolicyDict', 'init_w': 0.003, 'normalize_prob_of_sampled_actions': False, 'policy_loss_type': 'cross_entropy', 'lstm_horizon_len': 5, 'cos_lr_scheduler': False, 'reanalyze_ratio': 0.0, 'eval_freq': 2000, 'replay_buffer_size': 1000000 }, 'wandb_logger': { 'gradient_logger': False, 'video_logger': False, 'plot_logger': False, 'action_logger': False, 'return_logger': False } }, 'create_config': { 'env': { 'type': 'atari_lightzero', 'import_names': ['zoo.atari.envs.atari_lightzero_env'] }, 'env_manager': { 'type': 'subprocess' }, 'policy': { 'type': 'sampled_efficientzero', 'import_names': ['lzero.policy.sampled_efficientzero'] } } }