crossroderick commited on
Commit
c433002
1 Parent(s): 39bacd5

Initial commit

Browse files
qrdqn-SpaceInvadersNoFrameskip-v4.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06641b7f1c631d9de09fbb681df8b82509910e41228b3c08fa868332b8ce327a
3
  size 37018978
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63f2ad3748a3fcfa3f95c78b26f4f5aa9e30afebe61118ba304c4e9df5c3aac8
3
  size 37018978
qrdqn-SpaceInvadersNoFrameskip-v4/data CHANGED
@@ -4,9 +4,9 @@
4
  ":serialized:": "gAWVLAAAAAAAAACMGnNiM19jb250cmliLnFyZHFuLnBvbGljaWVzlIwJQ25uUG9saWN5lJOULg==",
5
  "__module__": "sb3_contrib.qrdqn.policies",
6
  "__doc__": "\n Policy class for QR-DQN when using images as input.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param n_quantiles: Number of quantiles\n :param net_arch: The specification of the network architecture.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
- "__init__": "<function CnnPolicy.__init__ at 0x7f1ec46ec040>",
8
  "__abstractmethods__": "frozenset()",
9
- "_abc_impl": "<_abc._abc_data object at 0x7f1ec46e8f40>"
10
  },
11
  "verbose": 1,
12
  "policy_kwargs": {
@@ -88,13 +88,13 @@
88
  "__module__": "stable_baselines3.common.buffers",
89
  "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'next_observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'dones': <class 'numpy.ndarray'>, 'timeouts': <class 'numpy.ndarray'>}",
90
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
91
- "__init__": "<function ReplayBuffer.__init__ at 0x7f1ec4f4c280>",
92
- "add": "<function ReplayBuffer.add at 0x7f1ec4f4c310>",
93
- "sample": "<function ReplayBuffer.sample at 0x7f1ec4f4c3a0>",
94
- "_get_samples": "<function ReplayBuffer._get_samples at 0x7f1ec4f4c430>",
95
- "_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at 0x7f1ec4f4c4c0>)>",
96
  "__abstractmethods__": "frozenset()",
97
- "_abc_impl": "<_abc._abc_data object at 0x7f1ec50c6bc0>"
98
  },
99
  "replay_buffer_kwargs": {},
100
  "train_freq": {
 
4
  ":serialized:": "gAWVLAAAAAAAAACMGnNiM19jb250cmliLnFyZHFuLnBvbGljaWVzlIwJQ25uUG9saWN5lJOULg==",
5
  "__module__": "sb3_contrib.qrdqn.policies",
6
  "__doc__": "\n Policy class for QR-DQN when using images as input.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param n_quantiles: Number of quantiles\n :param net_arch: The specification of the network architecture.\n :param activation_fn: Activation function\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
+ "__init__": "<function CnnPolicy.__init__ at 0x7f6b8a040040>",
8
  "__abstractmethods__": "frozenset()",
9
+ "_abc_impl": "<_abc._abc_data object at 0x7f6b8a03cfc0>"
10
  },
11
  "verbose": 1,
12
  "policy_kwargs": {
 
88
  "__module__": "stable_baselines3.common.buffers",
89
  "__annotations__": "{'observations': <class 'numpy.ndarray'>, 'next_observations': <class 'numpy.ndarray'>, 'actions': <class 'numpy.ndarray'>, 'rewards': <class 'numpy.ndarray'>, 'dones': <class 'numpy.ndarray'>, 'timeouts': <class 'numpy.ndarray'>}",
90
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
91
+ "__init__": "<function ReplayBuffer.__init__ at 0x7f6b8a8a0280>",
92
+ "add": "<function ReplayBuffer.add at 0x7f6b8a8a0310>",
93
+ "sample": "<function ReplayBuffer.sample at 0x7f6b8a8a03a0>",
94
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x7f6b8a8a0430>",
95
+ "_maybe_cast_dtype": "<staticmethod(<function ReplayBuffer._maybe_cast_dtype at 0x7f6b8a8a04c0>)>",
96
  "__abstractmethods__": "frozenset()",
97
+ "_abc_impl": "<_abc._abc_data object at 0x7f6b8aa24180>"
98
  },
99
  "replay_buffer_kwargs": {},
100
  "train_freq": {
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d9ddeb39beab38d2ebef699c1bf897b4c475a6e4a198dddb3121c0a763c908e
3
+ size 211717
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 768.0, "std_reward": 234.94893062110327, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2024-02-26T13:17:38.271632"}
 
1
+ {"mean_reward": 768.0, "std_reward": 234.94893062110327, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2024-02-26T13:39:04.070893"}