ppo-BreakoutNoFrameskip-v4 / pyproject.toml
sgoodfriend's picture
PPO playing BreakoutNoFrameskip-v4 from https://github.com/sgoodfriend/rl-algo-impls/tree/983cb75e43e51cf4ef57f177194ab9a4a1a8808b
4e087eb
[project]
name = "rl_algo_impls"
version = "0.0.9"
description = "Implementations of reinforcement learning algorithms"
authors = [
{name = "Scott Goodfriend", email = "goodfriend.scott@gmail.com"},
]
license = {file = "LICENSE"}
readme = "README.md"
requires-python = ">= 3.8"
classifiers = [
"License :: OSI Approved :: MIT License",
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
dependencies = [
"cmake",
"swig",
"scipy",
"torch",
"torchvision",
"tensorboard >= 2.11.2, < 2.12",
"AutoROM.accept-rom-license >= 0.4.2, < 0.5",
"stable-baselines3[extra] >= 1.7.0, < 1.8",
"gym[box2d] >= 0.21.0, < 0.22",
"pyglet == 1.5.27",
"wandb",
"pyvirtualdisplay",
"pybullet",
"tabulate",
"huggingface-hub",
"optuna",
"dash",
"kaleido",
"PyYAML",
"scikit-learn",
]
[tool.setuptools]
packages = ["rl_algo_impls"]
[project.optional-dependencies]
test = [
"pytest",
"black",
"mypy",
"flake8",
"flake8-bugbear",
"isort",
]
procgen = [
"numexpr >= 2.8.4",
"gym3",
"glfw >= 1.12.0, < 1.13",
"procgen; platform_machine=='x86_64'",
]
microrts-ppo = [
"numpy < 1.24.0", # Support for gym-microrts < 0.6.0
"gym-microrts == 0.2.0", # Match ppo-implementation-details
]
microrts-paper = [
"numpy < 1.24.0", # Support for gym-microrts < 0.6.0
"gym-microrts == 0.3.2",
]
microrts = [
"gym-microrts",
]
jupyter = [
"jupyter",
"notebook"
]
all = [
"rl-algo-impls[test]",
"rl-algo-impls[procgen]",
"rl-algo-impls[microrts]",
]
[project.urls]
"Homepage" = "https://github.com/sgoodfriend/rl-algo-impls"
[build-system]
requires = ["setuptools==65.5.0", "setuptools-scm"]
build-backend = "setuptools.build_meta"
[tool.isort]
profile = "black"