uvpatel7271 commited on
Commit
0695520
·
1 Parent(s): e31582b

envrionment setup

Browse files
.gitignore ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ __pycache__/__init.cpython-313.pyc
4
+ __pycache__/client.cpython-313.pyc
5
+ # Ignore changes to __pycache__
6
+ __pycache__/
7
+ *.py[codz]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py.cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+ cover/
57
+
58
+ # Translations
59
+ *.mo
60
+ *.pot
61
+
62
+ # Django stuff:
63
+ *.log
64
+ local_settings.py
65
+ db.sqlite3
66
+ db.sqlite3-journal
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ .pybuilder/
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ # For a library or package, you might want to ignore these files since the code is
91
+ # intended to run in multiple environments; otherwise, check them in:
92
+ # .python-version
93
+
94
+ # pipenv
95
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
97
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
98
+ # install all needed dependencies.
99
+ # Pipfile.lock
100
+
101
+ # UV
102
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
103
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
104
+ # commonly ignored for libraries.
105
+ # uv.lock
106
+
107
+ # poetry
108
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
109
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
110
+ # commonly ignored for libraries.
111
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
112
+ # poetry.lock
113
+ # poetry.toml
114
+
115
+ # pdm
116
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
117
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
118
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
119
+ # pdm.lock
120
+ # pdm.toml
121
+ .pdm-python
122
+ .pdm-build/
123
+
124
+ # pixi
125
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
126
+ # pixi.lock
127
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
128
+ # in the .venv directory. It is recommended not to include this directory in version control.
129
+ .pixi
130
+
131
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
132
+ __pypackages__/
133
+
134
+ # Celery stuff
135
+ celerybeat-schedule
136
+ celerybeat.pid
137
+
138
+ # Redis
139
+ *.rdb
140
+ *.aof
141
+ *.pid
142
+
143
+ # RabbitMQ
144
+ mnesia/
145
+ rabbitmq/
146
+ rabbitmq-data/
147
+
148
+ # ActiveMQ
149
+ activemq-data/
150
+
151
+ # SageMath parsed files
152
+ *.sage.py
153
+
154
+ # Environments
155
+ .env
156
+ .envrc
157
+ .venv
158
+ env/
159
+ venv/
160
+ ENV/
161
+ env.bak/
162
+ venv.bak/
163
+
164
+ # Spyder project settings
165
+ .spyderproject
166
+ .spyproject
167
+
168
+ # Rope project settings
169
+ .ropeproject
170
+
171
+ # mkdocs documentation
172
+ /site
173
+
174
+ # mypy
175
+ .mypy_cache/
176
+ .dmypy.json
177
+ dmypy.json
178
+
179
+ # Pyre type checker
180
+ .pyre/
181
+
182
+ # pytype static type analyzer
183
+ .pytype/
184
+
185
+ # Cython debug symbols
186
+ cython_debug/
187
+
188
+ # PyCharm
189
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
190
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
191
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
192
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
193
+ # .idea/
194
+ .env# Byte-compiled / optimized / DLL files
195
+ __pycache__/
196
+ __pycache__/__init.cpython-313.pyc
197
+ __pycache__/client.cpython-313.pyc
198
+ # Ignore changes to __pycache__
199
+ __pycache__/
200
+ *.py[codz]
201
+ *$py.class
202
+
203
+ # C extensions
204
+ *.so
205
+
206
+ # Distribution / packaging
207
+ .Python
208
+ build/
209
+ develop-eggs/
210
+ dist/
211
+ downloads/
212
+ eggs/
213
+ .eggs/
214
+ lib/
215
+ lib64/
216
+ parts/
217
+ sdist/
218
+ var/
219
+ wheels/
220
+ share/python-wheels/
221
+ *.egg-info/
222
+ .installed.cfg
223
+ *.egg
224
+ MANIFEST
225
+
226
+ # PyInstaller
227
+ # Usually these files are written by a python script from a template
228
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
229
+ *.manifest
230
+ *.spec
231
+
232
+ # Installer logs
233
+ pip-log.txt
234
+ pip-delete-this-directory.txt
235
+
236
+ # Unit test / coverage reports
237
+ htmlcov/
238
+ .tox/
239
+ .nox/
240
+ .coverage
241
+ .coverage.*
242
+ .cache
243
+ nosetests.xml
244
+ coverage.xml
245
+ *.cover
246
+ *.py.cover
247
+ .hypothesis/
248
+ .pytest_cache/
249
+ cover/
250
+
251
+ # Translations
252
+ *.mo
253
+ *.pot
254
+
255
+ # Django stuff:
256
+ *.log
257
+ local_settings.py
258
+ db.sqlite3
259
+ db.sqlite3-journal
260
+
261
+ # Flask stuff:
262
+ instance/
263
+ .webassets-cache
264
+
265
+ # Scrapy stuff:
266
+ .scrapy
267
+
268
+ # Sphinx documentation
269
+ docs/_build/
270
+
271
+ # PyBuilder
272
+ .pybuilder/
273
+ target/
274
+
275
+ # Jupyter Notebook
276
+ .ipynb_checkpoints
277
+
278
+ # IPython
279
+ profile_default/
280
+ ipython_config.py
281
+
282
+ # pyenv
283
+ # For a library or package, you might want to ignore these files since the code is
284
+ # intended to run in multiple environments; otherwise, check them in:
285
+ # .python-version
286
+
287
+ # pipenv
288
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
289
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
290
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
291
+ # install all needed dependencies.
292
+ # Pipfile.lock
293
+
294
+ # UV
295
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
296
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
297
+ # commonly ignored for libraries.
298
+ # uv.lock
299
+
300
+ # poetry
301
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
302
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
303
+ # commonly ignored for libraries.
304
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
305
+ # poetry.lock
306
+ # poetry.toml
307
+
308
+ # pdm
309
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
310
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
311
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
312
+ # pdm.lock
313
+ # pdm.toml
314
+ .pdm-python
315
+ .pdm-build/
316
+
317
+ # pixi
318
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
319
+ # pixi.lock
320
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
321
+ # in the .venv directory. It is recommended not to include this directory in version control.
322
+ .pixi
323
+
324
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
325
+ __pypackages__/
326
+
327
+ # Celery stuff
328
+ celerybeat-schedule
329
+ celerybeat.pid
330
+
331
+ # Redis
332
+ *.rdb
333
+ *.aof
334
+ *.pid
335
+
336
+ # RabbitMQ
337
+ mnesia/
338
+ rabbitmq/
339
+ rabbitmq-data/
340
+
341
+ # ActiveMQ
342
+ activemq-data/
343
+
344
+ # SageMath parsed files
345
+ *.sage.py
346
+
347
+ # Environments
348
+ .env
349
+ .envrc
350
+ .venv
351
+ env/
352
+ venv/
353
+ ENV/
354
+ env.bak/
355
+ venv.bak/
356
+
357
+ # Spyder project settings
358
+ .spyderproject
359
+ .spyproject
360
+
361
+ # Rope project settings
362
+ .ropeproject
363
+
364
+ # mkdocs documentation
365
+ /site
366
+
367
+ # mypy
368
+ .mypy_cache/
369
+ .dmypy.json
370
+ dmypy.json
371
+
372
+ # Pyre type checker
373
+ .pyre/
374
+
375
+ # pytype static type analyzer
376
+ .pytype/
377
+
378
+ # Cython debug symbols
379
+ cython_debug/
380
+
381
+ # PyCharm
382
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
383
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
384
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
385
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
386
+ # .idea/
387
+
388
+ # Abstra
389
+ # Abstra is an AI-powered process automation framework.
390
+ # Ignore directories containing user credentials, local state, and settings.
391
+ # Learn more at https://abstra.io/docs
392
+ .abstra/
393
+
394
+ # Visual Studio Code
395
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
396
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
397
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
398
+ # you could uncomment the following to ignore the entire vscode folder
399
+ # .vscode/
400
+
401
+ # Ruff stuff:
402
+ .ruff_cache/
403
+
404
+ # PyPI configuration file
405
+ .pypirc
406
+
407
+ # Marimo
408
+ marimo/_static/
409
+ marimo/_lsp/
410
+ __marimo__/
411
+
412
+ # Streamlit
413
+ .streamlit/secrets.toml
414
+ # Abstra
415
+ # Abstra is an AI-powered process automation framework.
416
+ # Ignore directories containing user credentials, local state, and settings.
417
+ # Learn more at https://abstra.io/docs
418
+ .abstra/
419
+
420
+ # Visual Studio Code
421
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
422
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
423
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
424
+ # you could uncomment the following to ignore the entire vscode folder
425
+ # .vscode/
426
+
427
+ # Ruff stuff:
428
+ .ruff_cache/
429
+
430
+ # PyPI configuration file
431
+ .pypirc
432
+
433
+ # Marimo
434
+ marimo/_static/
435
+ marimo/_lsp/
436
+ __marimo__/
437
+
438
+ # Streamlit
439
+ .streamlit/secrets.toml
.vscode/settings.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "python-envs.defaultEnvManager": "ms-python.python:venv",
3
+ "python-envs.defaultPackageManager": "ms-python.python:pip"
4
+ }
README.md CHANGED
Binary files a/README.md and b/README.md differ
 
__init__.py CHANGED
@@ -1,16 +1,22 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the BSD-style license found in the
5
- # LICENSE file in the root directory of this source tree.
6
 
7
- """Python Env Environment."""
8
-
9
- from .client import PythonEnv
10
- from .models import PythonAction, PythonObservation
 
 
 
 
 
11
 
12
  __all__ = [
13
  "PythonAction",
14
  "PythonObservation",
 
 
 
 
 
15
  "PythonEnv",
16
  ]
 
1
+ """Public package exports for python_code_review_env."""
 
 
 
 
2
 
3
+ from .client import PythonCodeReviewEnv, PythonEnv
4
+ from .models import (
5
+ PythonAction,
6
+ PythonCodeReviewAction,
7
+ PythonCodeReviewObservation,
8
+ PythonCodeReviewState,
9
+ PythonObservation,
10
+ PythonState,
11
+ )
12
 
13
  __all__ = [
14
  "PythonAction",
15
  "PythonObservation",
16
+ "PythonState",
17
+ "PythonCodeReviewAction",
18
+ "PythonCodeReviewObservation",
19
+ "PythonCodeReviewState",
20
+ "PythonCodeReviewEnv",
21
  "PythonEnv",
22
  ]
__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/__pycache__/__init__.cpython-313.pyc and b/__pycache__/__init__.cpython-313.pyc differ
 
__pycache__/client.cpython-313.pyc CHANGED
Binary files a/__pycache__/client.cpython-313.pyc and b/__pycache__/client.cpython-313.pyc differ
 
__pycache__/models.cpython-313.pyc CHANGED
Binary files a/__pycache__/models.cpython-313.pyc and b/__pycache__/models.cpython-313.pyc differ
 
client.py CHANGED
@@ -1,99 +1,37 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the BSD-style license found in the
5
- # LICENSE file in the root directory of this source tree.
6
 
7
- """Python Env Environment Client."""
8
 
9
  from typing import Dict
10
 
11
  from openenv.core import EnvClient
12
  from openenv.core.client_types import StepResult
13
- from openenv.core.env_server.types import State
14
 
15
- from .models import PythonAction, PythonObservation
 
 
 
 
16
 
17
 
18
- class PythonEnv(
19
- EnvClient[PythonAction, PythonObservation, State]
20
  ):
21
- """
22
- Client for the Python Env Environment.
23
 
24
- This client maintains a persistent WebSocket connection to the environment server,
25
- enabling efficient multi-step interactions with lower latency.
26
- Each client instance has its own dedicated environment session on the server.
27
-
28
- Example:
29
- >>> # Connect to a running server
30
- >>> with PythonEnv(base_url="http://localhost:8000") as client:
31
- ... result = client.reset()
32
- ... print(result.observation.echoed_message)
33
- ...
34
- ... result = client.step(PythonAction(message="Hello!"))
35
- ... print(result.observation.echoed_message)
36
-
37
- Example with Docker:
38
- >>> # Automatically start container and connect
39
- >>> client = PythonEnv.from_docker_image("python_env-env:latest")
40
- >>> try:
41
- ... result = client.reset()
42
- ... result = client.step(PythonAction(message="Test"))
43
- ... finally:
44
- ... client.close()
45
- """
46
-
47
- def _step_payload(self, action: PythonAction) -> Dict:
48
- """
49
- Convert PythonAction to JSON payload for step message.
50
-
51
- Args:
52
- action: PythonAction instance
53
-
54
- Returns:
55
- Dictionary representation suitable for JSON encoding
56
- """
57
- return {
58
- "message": action.message,
59
- }
60
-
61
- def _parse_result(self, payload: Dict) -> StepResult[PythonObservation]:
62
- """
63
- Parse server response into StepResult[PythonObservation].
64
-
65
- Args:
66
- payload: JSON response data from server
67
-
68
- Returns:
69
- StepResult with PythonObservation
70
- """
71
- obs_data = payload.get("observation", {})
72
- observation = PythonObservation(
73
- echoed_message=obs_data.get("echoed_message", ""),
74
- message_length=obs_data.get("message_length", 0),
75
- done=payload.get("done", False),
76
- reward=payload.get("reward"),
77
- metadata=obs_data.get("metadata", {}),
78
- )
79
 
 
 
80
  return StepResult(
81
  observation=observation,
82
  reward=payload.get("reward"),
83
- done=payload.get("done", False),
84
  )
85
 
86
- def _parse_state(self, payload: Dict) -> State:
87
- """
88
- Parse server response into State object.
89
 
90
- Args:
91
- payload: JSON response from state request
92
 
93
- Returns:
94
- State object with episode_id and step_count
95
- """
96
- return State(
97
- episode_id=payload.get("episode_id"),
98
- step_count=payload.get("step_count", 0),
99
- )
 
1
+ """Client helpers for python_code_review_env."""
 
 
 
 
2
 
3
+ from __future__ import annotations
4
 
5
  from typing import Dict
6
 
7
  from openenv.core import EnvClient
8
  from openenv.core.client_types import StepResult
 
9
 
10
+ from .models import (
11
+ PythonCodeReviewAction,
12
+ PythonCodeReviewObservation,
13
+ PythonCodeReviewState,
14
+ )
15
 
16
 
17
+ class PythonCodeReviewEnv(
18
+ EnvClient[PythonCodeReviewAction, PythonCodeReviewObservation, PythonCodeReviewState]
19
  ):
20
+ """Typed client for the code review environment."""
 
21
 
22
+ def _step_payload(self, action: PythonCodeReviewAction) -> Dict:
23
+ return action.model_dump(exclude_none=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ def _parse_result(self, payload: Dict) -> StepResult[PythonCodeReviewObservation]:
26
+ observation = PythonCodeReviewObservation.model_validate(payload.get("observation", {}))
27
  return StepResult(
28
  observation=observation,
29
  reward=payload.get("reward"),
30
+ done=payload.get("done", observation.done),
31
  )
32
 
33
+ def _parse_state(self, payload: Dict) -> PythonCodeReviewState:
34
+ return PythonCodeReviewState.model_validate(payload)
 
35
 
 
 
36
 
37
+ PythonEnv = PythonCodeReviewEnv
 
 
 
 
 
 
compat.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compatibility helpers expected by validator-oriented scripts."""
2
+
3
+ from __future__ import annotations
4
+
5
+
6
+ def install_openenv_fastmcp_compat() -> None:
7
+ """Install runtime shims when needed.
8
+
9
+ The current environment does not require any monkey-patching, so this is a
10
+ deliberate no-op kept for validator compatibility.
11
+ """
12
+
13
+ return None
graders/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Deterministic graders for python_code_review_env."""
2
+
3
+ from .dispatch import grade_task
4
+
5
+ __all__ = ["grade_task"]
graders/bug_fix.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Bug-fix task grader."""
2
+
3
+ from __future__ import annotations
4
+
5
+ try:
6
+ from ..models import TaskGrade
7
+ from ..tasks.catalog import ReviewTask
8
+ except ImportError:
9
+ from models import TaskGrade
10
+ from tasks.catalog import ReviewTask
11
+
12
+ from .shared import base_grade, compile_code, execute_cases, quality_metrics, similarity_score, summarize_results
13
+
14
+
15
+ def grade_bug_fix_task(
16
+ task: ReviewTask,
17
+ code: str,
18
+ *,
19
+ include_hidden: bool,
20
+ timeout_s: float = 2.0,
21
+ ) -> TaskGrade:
22
+ """Grade a bug-fix task against public or full test suites."""
23
+
24
+ compiled, compile_error = compile_code(code)
25
+ quality = quality_metrics(code, task.function_name)
26
+ details = {
27
+ "compile_error": compile_error,
28
+ "quality_notes": quality["quality_notes"],
29
+ "style_score": quality["style_score"],
30
+ "visibility": "full" if include_hidden else "public",
31
+ }
32
+
33
+ if not compiled:
34
+ partial = round(min(0.2, similarity_score(code, task.reference_code) * 0.2), 3)
35
+ details["test_results"] = []
36
+ details["test_summary"] = "Code does not compile."
37
+ return base_grade(
38
+ score=partial,
39
+ syntax_score=0.0,
40
+ tests_passed=0,
41
+ tests_total=len(task.public_cases) + (len(task.hidden_cases) if include_hidden else 0),
42
+ quality_score=0.0,
43
+ runtime_score=0.0,
44
+ timed_out=False,
45
+ details=details,
46
+ )
47
+
48
+ cases = task.public_cases + (task.hidden_cases if include_hidden else [])
49
+ result = execute_cases(code, task.function_name, cases, timeout_s=timeout_s)
50
+ if result.get("timed_out"):
51
+ details["test_results"] = []
52
+ details["test_summary"] = result["error"]
53
+ return base_grade(
54
+ score=0.0,
55
+ syntax_score=1.0,
56
+ tests_passed=0,
57
+ tests_total=len(cases),
58
+ quality_score=quality["score"],
59
+ runtime_score=0.0,
60
+ timed_out=True,
61
+ details=details,
62
+ )
63
+ if "error" in result:
64
+ details["test_results"] = []
65
+ details["test_summary"] = result["error"]
66
+ return base_grade(
67
+ score=0.0,
68
+ syntax_score=1.0,
69
+ tests_passed=0,
70
+ tests_total=len(cases),
71
+ quality_score=quality["score"],
72
+ runtime_score=0.0,
73
+ timed_out=False,
74
+ details=details,
75
+ )
76
+
77
+ data = result["data"]
78
+ pass_rate = data["passed"] / max(data["total"], 1)
79
+ details["test_results"] = data["results"]
80
+ details["test_summary"] = summarize_results("Test results", data["results"])
81
+ return base_grade(
82
+ score=pass_rate,
83
+ syntax_score=1.0,
84
+ tests_passed=data["passed"],
85
+ tests_total=data["total"],
86
+ quality_score=quality["score"],
87
+ runtime_score=0.0,
88
+ timed_out=False,
89
+ details=details,
90
+ )
graders/dispatch.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Task grader dispatch."""
2
+
3
+ from __future__ import annotations
4
+
5
+ try:
6
+ from ..models import TaskGrade
7
+ from ..tasks.catalog import ReviewTask
8
+ except ImportError:
9
+ from models import TaskGrade
10
+ from tasks.catalog import ReviewTask
11
+
12
+ from .bug_fix import grade_bug_fix_task
13
+ from .optimization import grade_optimization_task
14
+ from .syntax import grade_syntax_task
15
+
16
+
17
+ def grade_task(
18
+ task: ReviewTask,
19
+ code: str,
20
+ *,
21
+ include_hidden: bool,
22
+ timeout_s: float = 3.0,
23
+ ) -> TaskGrade:
24
+ """Dispatch to the correct deterministic grader."""
25
+
26
+ if task.task_kind == "syntax_fix":
27
+ return grade_syntax_task(task, code, timeout_s=timeout_s)
28
+ if task.task_kind == "bug_fix":
29
+ return grade_bug_fix_task(task, code, include_hidden=include_hidden, timeout_s=timeout_s)
30
+ if task.task_kind == "optimization":
31
+ return grade_optimization_task(task, code, include_hidden=include_hidden, timeout_s=timeout_s)
32
+ raise ValueError(f"Unsupported task kind: {task.task_kind}")
graders/optimization.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Optimization task grader."""
2
+
3
+ from __future__ import annotations
4
+
5
+ try:
6
+ from ..models import TaskGrade
7
+ from ..tasks.catalog import ReviewTask
8
+ except ImportError:
9
+ from models import TaskGrade
10
+ from tasks.catalog import ReviewTask
11
+
12
+ from .shared import (
13
+ base_grade,
14
+ benchmark_candidate,
15
+ compile_code,
16
+ execute_cases,
17
+ quality_metrics,
18
+ similarity_score,
19
+ summarize_results,
20
+ )
21
+
22
+
23
+ def grade_optimization_task(
24
+ task: ReviewTask,
25
+ code: str,
26
+ *,
27
+ include_hidden: bool,
28
+ timeout_s: float = 3.0,
29
+ ) -> TaskGrade:
30
+ """Grade an optimization/refactor task with correctness, quality, and runtime."""
31
+
32
+ compiled, compile_error = compile_code(code)
33
+ quality = quality_metrics(code, task.function_name)
34
+ details = {
35
+ "compile_error": compile_error,
36
+ "quality_notes": quality["quality_notes"],
37
+ "style_score": quality["style_score"],
38
+ "visibility": "full" if include_hidden else "public",
39
+ }
40
+
41
+ if not compiled:
42
+ partial = round(min(0.15, similarity_score(code, task.reference_code) * 0.15), 3)
43
+ details["test_results"] = []
44
+ details["test_summary"] = "Code does not compile."
45
+ return base_grade(
46
+ score=partial,
47
+ syntax_score=0.0,
48
+ tests_passed=0,
49
+ tests_total=len(task.public_cases) + (len(task.hidden_cases) if include_hidden else 0),
50
+ quality_score=0.0,
51
+ runtime_score=0.0,
52
+ timed_out=False,
53
+ details=details,
54
+ )
55
+
56
+ cases = task.public_cases + (task.hidden_cases if include_hidden else [])
57
+ result = execute_cases(code, task.function_name, cases, timeout_s=timeout_s)
58
+ if result.get("timed_out"):
59
+ details["test_results"] = []
60
+ details["test_summary"] = result["error"]
61
+ return base_grade(
62
+ score=0.0,
63
+ syntax_score=1.0,
64
+ tests_passed=0,
65
+ tests_total=len(cases),
66
+ quality_score=quality["score"],
67
+ runtime_score=0.0,
68
+ timed_out=True,
69
+ details=details,
70
+ )
71
+ if "error" in result:
72
+ details["test_results"] = []
73
+ details["test_summary"] = result["error"]
74
+ return base_grade(
75
+ score=0.0,
76
+ syntax_score=1.0,
77
+ tests_passed=0,
78
+ tests_total=len(cases),
79
+ quality_score=quality["score"],
80
+ runtime_score=0.0,
81
+ timed_out=False,
82
+ details=details,
83
+ )
84
+
85
+ data = result["data"]
86
+ pass_rate = data["passed"] / max(data["total"], 1)
87
+ runtime_score = 0.0
88
+ benchmark_summary = "Benchmark deferred until hidden evaluation."
89
+ timed_out = False
90
+
91
+ if include_hidden and pass_rate == 1.0:
92
+ benchmark = benchmark_candidate(task, code, timeout_s=timeout_s)
93
+ runtime_score = benchmark["runtime_score"]
94
+ timed_out = benchmark.get("timed_out", False)
95
+ benchmark_summary = benchmark["details"]
96
+ if timed_out:
97
+ runtime_score = 0.0
98
+
99
+ details["test_results"] = data["results"]
100
+ details["test_summary"] = summarize_results("Test results", data["results"])
101
+ details["benchmark"] = benchmark_summary
102
+
103
+ if include_hidden:
104
+ score = 0.5 * pass_rate + 0.3 * runtime_score + 0.2 * quality["score"]
105
+ else:
106
+ score = 0.7 * pass_rate + 0.3 * quality["score"]
107
+
108
+ return base_grade(
109
+ score=score,
110
+ syntax_score=1.0,
111
+ tests_passed=data["passed"],
112
+ tests_total=data["total"],
113
+ quality_score=quality["score"],
114
+ runtime_score=runtime_score,
115
+ timed_out=timed_out,
116
+ details=details,
117
+ )
graders/shared.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Shared deterministic grading helpers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import ast
6
+ import difflib
7
+ import multiprocessing as mp
8
+ import time
9
+ import traceback
10
+ from typing import Any, Callable, Dict, List
11
+
12
+ try:
13
+ from ..models import TaskGrade
14
+ from ..tasks.catalog import CallCase, ReviewTask
15
+ except ImportError:
16
+ from models import TaskGrade
17
+ from tasks.catalog import CallCase, ReviewTask
18
+
19
+
20
+ def clamp(value: float, lower: float = 0.0, upper: float = 1.0) -> float:
21
+ """Clamp a floating-point value to a closed interval."""
22
+
23
+ return max(lower, min(upper, value))
24
+
25
+
26
+ def compile_code(code: str) -> tuple[bool, str]:
27
+ """Return whether code compiles and the syntax error, if any."""
28
+
29
+ try:
30
+ compile(code, "<candidate>", "exec")
31
+ except SyntaxError as exc:
32
+ return False, f"SyntaxError: {exc.msg} (line {exc.lineno}, column {exc.offset})"
33
+ except Exception as exc: # pragma: no cover
34
+ return False, f"{type(exc).__name__}: {exc}"
35
+ return True, ""
36
+
37
+
38
+ def similarity_score(candidate: str, reference: str) -> float:
39
+ """Compute a stable text similarity score in [0, 1]."""
40
+
41
+ return difflib.SequenceMatcher(a=candidate.strip(), b=reference.strip()).ratio()
42
+
43
+
44
+ def _queue_worker(
45
+ worker: Callable[[Dict[str, Any]], Dict[str, Any]],
46
+ payload: Dict[str, Any],
47
+ queue: Any,
48
+ ) -> None:
49
+ try:
50
+ queue.put({"ok": True, "data": worker(payload)})
51
+ except Exception as exc: # pragma: no cover
52
+ queue.put(
53
+ {
54
+ "ok": False,
55
+ "error": f"{type(exc).__name__}: {exc}",
56
+ "traceback": traceback.format_exc(limit=5),
57
+ }
58
+ )
59
+
60
+
61
+ def run_with_timeout(
62
+ worker: Callable[[Dict[str, Any]], Dict[str, Any]],
63
+ payload: Dict[str, Any],
64
+ timeout_s: float,
65
+ ) -> Dict[str, Any]:
66
+ """Execute a worker in a subprocess and terminate on timeout."""
67
+
68
+ ctx = mp.get_context("spawn")
69
+ queue = ctx.Queue()
70
+ process = ctx.Process(target=_queue_worker, args=(worker, payload, queue))
71
+ process.start()
72
+ process.join(timeout_s)
73
+
74
+ if process.is_alive():
75
+ process.terminate()
76
+ process.join()
77
+ return {"timed_out": True, "error": f"Execution exceeded {timeout_s:.1f}s timeout."}
78
+
79
+ if queue.empty():
80
+ return {"timed_out": False, "error": "Worker exited without returning a result."}
81
+
82
+ message = queue.get()
83
+ if not message["ok"]:
84
+ return {
85
+ "timed_out": False,
86
+ "error": f"{message['error']}\n{message['traceback']}",
87
+ }
88
+ return {"timed_out": False, "data": message["data"]}
89
+
90
+
91
+ def _execute_cases_worker(payload: Dict[str, Any]) -> Dict[str, Any]:
92
+ namespace: Dict[str, Any] = {}
93
+ exec(payload["code"], namespace)
94
+ func = namespace[payload["function_name"]]
95
+ results: List[Dict[str, Any]] = []
96
+
97
+ for case in payload["cases"]:
98
+ try:
99
+ actual = func(*case["args"], **case["kwargs"])
100
+ passed = actual == case["expected"]
101
+ actual_repr = repr(actual)
102
+ except Exception as exc:
103
+ passed = False
104
+ actual_repr = f"{type(exc).__name__}: {exc}"
105
+
106
+ results.append(
107
+ {
108
+ "label": case["label"],
109
+ "passed": passed,
110
+ "expected": repr(case["expected"]),
111
+ "actual": actual_repr,
112
+ }
113
+ )
114
+
115
+ passed_total = sum(1 for item in results if item["passed"])
116
+ return {"passed": passed_total, "total": len(results), "results": results}
117
+
118
+
119
+ def execute_cases(code: str, function_name: str, cases: List[CallCase], timeout_s: float) -> Dict[str, Any]:
120
+ """Run function test cases in a subprocess."""
121
+
122
+ payload = {
123
+ "code": code,
124
+ "function_name": function_name,
125
+ "cases": [
126
+ {"label": case.label, "args": case.args, "kwargs": case.kwargs, "expected": case.expected}
127
+ for case in cases
128
+ ],
129
+ }
130
+ return run_with_timeout(_execute_cases_worker, payload, timeout_s=timeout_s)
131
+
132
+
133
+ class _LoopDepthVisitor(ast.NodeVisitor):
134
+ def __init__(self) -> None:
135
+ self.depth = 0
136
+ self.max_depth = 0
137
+
138
+ def _visit_loop(self, node: ast.AST) -> None:
139
+ self.depth += 1
140
+ self.max_depth = max(self.max_depth, self.depth)
141
+ self.generic_visit(node)
142
+ self.depth -= 1
143
+
144
+ def visit_For(self, node: ast.For) -> None: # noqa: N802
145
+ self._visit_loop(node)
146
+
147
+ def visit_While(self, node: ast.While) -> None: # noqa: N802
148
+ self._visit_loop(node)
149
+
150
+ def visit_comprehension(self, node: ast.comprehension) -> None: # noqa: N802
151
+ self._visit_loop(node)
152
+
153
+
154
+ def quality_metrics(code: str, function_name: str) -> Dict[str, Any]:
155
+ """Compute deterministic AST/style quality metrics."""
156
+
157
+ compiled, error = compile_code(code)
158
+ if not compiled:
159
+ return {
160
+ "score": 0.0,
161
+ "style_score": 0.0,
162
+ "quality_notes": [error],
163
+ "max_loop_depth": 99,
164
+ }
165
+
166
+ tree = ast.parse(code)
167
+ function_node = next(
168
+ (
169
+ node
170
+ for node in tree.body
171
+ if isinstance(node, ast.FunctionDef) and node.name == function_name
172
+ ),
173
+ None,
174
+ )
175
+
176
+ notes: List[str] = []
177
+ score = 0.0
178
+
179
+ if function_node is not None:
180
+ score += 0.2
181
+ else:
182
+ notes.append(f"Expected function {function_name!r} is missing.")
183
+
184
+ lines = [line.rstrip("\n") for line in code.splitlines()]
185
+ long_lines = [index + 1 for index, line in enumerate(lines) if len(line) > 88]
186
+ trailing_whitespace = [index + 1 for index, line in enumerate(lines) if line.rstrip() != line]
187
+ uses_tabs = any("\t" in line for line in lines)
188
+
189
+ style_score = 0.0
190
+ if not long_lines:
191
+ score += 0.15
192
+ style_score += 0.5
193
+ else:
194
+ notes.append(f"Lines longer than 88 characters: {long_lines[:3]}")
195
+
196
+ if not trailing_whitespace and not uses_tabs:
197
+ score += 0.15
198
+ style_score += 0.5
199
+ else:
200
+ notes.append("Remove tabs or trailing whitespace for cleaner style.")
201
+
202
+ if function_node is not None:
203
+ if ast.get_docstring(function_node):
204
+ score += 0.1
205
+ else:
206
+ notes.append("Add a short docstring to explain the function contract.")
207
+
208
+ visitor = _LoopDepthVisitor()
209
+ visitor.visit(function_node)
210
+ if visitor.max_depth <= 1:
211
+ score += 0.15
212
+ elif visitor.max_depth == 2:
213
+ score += 0.08
214
+ notes.append("Loop nesting is still higher than necessary.")
215
+ else:
216
+ notes.append("Refactor nested loops to improve readability and runtime.")
217
+
218
+ names = [node.id for node in ast.walk(function_node) if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store)]
219
+ meaningful_names = [name for name in names if len(name) >= 3]
220
+ if names:
221
+ score += 0.1 * (len(meaningful_names) / len(names))
222
+
223
+ function_length = (function_node.end_lineno or function_node.lineno) - function_node.lineno + 1
224
+ if function_length <= 25:
225
+ score += 0.1
226
+ elif function_length <= 40:
227
+ score += 0.05
228
+ notes.append("The function can be shortened or decomposed further.")
229
+ else:
230
+ notes.append("The function is long enough to justify refactoring.")
231
+
232
+ max_loop_depth = visitor.max_depth
233
+ else:
234
+ max_loop_depth = 0
235
+
236
+ source_hints = ("Counter(", "defaultdict(", "set(", "dict(", "sorted(", "sum(", " any(", " all(", " for ")
237
+ if any(hint in code for hint in source_hints):
238
+ score += 0.15
239
+
240
+ return {
241
+ "score": round(clamp(score), 3),
242
+ "style_score": round(clamp(style_score), 3),
243
+ "quality_notes": notes,
244
+ "max_loop_depth": max_loop_depth,
245
+ }
246
+
247
+
248
+ def build_benchmark_events(config: Dict[str, int]) -> List[Dict[str, Any]]:
249
+ """Generate deterministic benchmark data without randomness."""
250
+
251
+ user_pool = config["user_pool"]
252
+ events_per_user = config["events_per_user"]
253
+ events: List[Dict[str, Any]] = []
254
+
255
+ for user_index in range(user_pool):
256
+ user_id = f"user-{user_index:03d}"
257
+ for event_index in range(events_per_user):
258
+ status = "active" if (user_index + event_index) % 3 != 0 else "inactive"
259
+ events.append({"user_id": user_id, "status": status, "minute": event_index})
260
+ if event_index % 6 == 0:
261
+ events.append({"user_id": user_id, "status": status, "minute": event_index})
262
+
263
+ return events
264
+
265
+
266
+ def _benchmark_worker(payload: Dict[str, Any]) -> Dict[str, Any]:
267
+ candidate_ns: Dict[str, Any] = {}
268
+ baseline_ns: Dict[str, Any] = {}
269
+ exec(payload["candidate_code"], candidate_ns)
270
+ exec(payload["baseline_code"], baseline_ns)
271
+
272
+ candidate = candidate_ns[payload["function_name"]]
273
+ baseline = baseline_ns[payload["function_name"]]
274
+ benchmark_events = payload["events"]
275
+ iterations = payload["iterations"]
276
+
277
+ baseline_output = baseline(benchmark_events)
278
+ candidate_output = candidate(benchmark_events)
279
+ if candidate_output != baseline_output:
280
+ raise AssertionError("Candidate output diverges from baseline on benchmark data.")
281
+
282
+ def _timed(fn: Callable[[Any], Any]) -> float:
283
+ start = time.perf_counter()
284
+ for _ in range(iterations):
285
+ fn(benchmark_events)
286
+ return time.perf_counter() - start
287
+
288
+ baseline_seconds = _timed(baseline)
289
+ candidate_seconds = _timed(candidate)
290
+ return {"baseline_seconds": baseline_seconds, "candidate_seconds": candidate_seconds}
291
+
292
+
293
+ def benchmark_candidate(task: ReviewTask, code: str, timeout_s: float) -> Dict[str, Any]:
294
+ """Benchmark a candidate solution against the starter implementation."""
295
+
296
+ if not task.benchmark_config:
297
+ return {"runtime_score": 0.0, "details": "No benchmark configured."}
298
+
299
+ events = build_benchmark_events(task.benchmark_config)
300
+ payload = {
301
+ "candidate_code": code,
302
+ "baseline_code": task.starter_code,
303
+ "function_name": task.function_name,
304
+ "events": events,
305
+ "iterations": task.benchmark_config.get("iterations", 5),
306
+ }
307
+ result = run_with_timeout(_benchmark_worker, payload, timeout_s=timeout_s)
308
+ if result.get("timed_out"):
309
+ return {"runtime_score": 0.0, "timed_out": True, "details": result["error"]}
310
+ if "error" in result:
311
+ return {"runtime_score": 0.0, "timed_out": False, "details": result["error"]}
312
+
313
+ data = result["data"]
314
+ baseline_seconds = float(data["baseline_seconds"])
315
+ candidate_seconds = float(data["candidate_seconds"])
316
+ improvement_ratio = baseline_seconds / max(candidate_seconds, 1e-9)
317
+ runtime_score = round(clamp((improvement_ratio - 1.0) / 1.5), 3)
318
+ return {
319
+ "runtime_score": runtime_score,
320
+ "timed_out": False,
321
+ "details": {
322
+ "baseline_seconds": round(baseline_seconds, 6),
323
+ "candidate_seconds": round(candidate_seconds, 6),
324
+ "improvement_ratio": round(improvement_ratio, 3),
325
+ },
326
+ }
327
+
328
+
329
+ def summarize_results(prefix: str, results: List[Dict[str, Any]]) -> str:
330
+ """Render concise test output."""
331
+
332
+ if not results:
333
+ return f"{prefix}: no tests were executed."
334
+
335
+ lines = [prefix]
336
+ for item in results:
337
+ marker = "PASS" if item["passed"] else "FAIL"
338
+ lines.append(f"- {marker} {item['label']}: expected {item['expected']}, got {item['actual']}")
339
+ return "\n".join(lines)
340
+
341
+
342
+ def base_grade(
343
+ *,
344
+ score: float,
345
+ syntax_score: float,
346
+ tests_passed: int,
347
+ tests_total: int,
348
+ quality_score: float,
349
+ runtime_score: float,
350
+ timed_out: bool,
351
+ details: Dict[str, Any],
352
+ ) -> TaskGrade:
353
+ """Create a normalized TaskGrade payload."""
354
+
355
+ return TaskGrade(
356
+ score=round(clamp(score), 3),
357
+ syntax_score=round(clamp(syntax_score), 3),
358
+ tests_passed=tests_passed,
359
+ tests_total=tests_total,
360
+ quality_score=round(clamp(quality_score), 3),
361
+ runtime_score=round(clamp(runtime_score), 3),
362
+ timed_out=timed_out,
363
+ details=details,
364
+ )
graders/syntax.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Syntax task grader."""
2
+
3
+ from __future__ import annotations
4
+
5
+ try:
6
+ from ..models import TaskGrade
7
+ from ..tasks.catalog import ReviewTask
8
+ except ImportError:
9
+ from models import TaskGrade
10
+ from tasks.catalog import ReviewTask
11
+
12
+ from .shared import base_grade, compile_code, execute_cases, quality_metrics, similarity_score, summarize_results
13
+
14
+
15
+ def grade_syntax_task(task: ReviewTask, code: str, timeout_s: float = 2.0) -> TaskGrade:
16
+ """Grade a syntax-fix task deterministically."""
17
+
18
+ compiled, compile_error = compile_code(code)
19
+ quality = quality_metrics(code, task.function_name)
20
+ details = {
21
+ "compile_error": compile_error,
22
+ "quality_notes": quality["quality_notes"],
23
+ "style_score": quality["style_score"],
24
+ }
25
+
26
+ if not compiled:
27
+ partial = round(min(0.7, similarity_score(code, task.reference_code) * 0.7), 3)
28
+ details["test_results"] = []
29
+ details["test_summary"] = "Code does not compile yet."
30
+ return base_grade(
31
+ score=partial,
32
+ syntax_score=0.0,
33
+ tests_passed=0,
34
+ tests_total=len(task.public_cases) + len(task.hidden_cases),
35
+ quality_score=0.0,
36
+ runtime_score=0.0,
37
+ timed_out=False,
38
+ details=details,
39
+ )
40
+
41
+ cases = task.public_cases + task.hidden_cases
42
+ result = execute_cases(code, task.function_name, cases, timeout_s=timeout_s)
43
+ if result.get("timed_out"):
44
+ details["test_results"] = []
45
+ details["test_summary"] = result["error"]
46
+ return base_grade(
47
+ score=0.8,
48
+ syntax_score=1.0,
49
+ tests_passed=0,
50
+ tests_total=len(cases),
51
+ quality_score=quality["score"],
52
+ runtime_score=0.0,
53
+ timed_out=True,
54
+ details=details,
55
+ )
56
+ if "error" in result:
57
+ details["test_results"] = []
58
+ details["test_summary"] = result["error"]
59
+ return base_grade(
60
+ score=1.0,
61
+ syntax_score=1.0,
62
+ tests_passed=0,
63
+ tests_total=len(cases),
64
+ quality_score=quality["score"],
65
+ runtime_score=0.0,
66
+ timed_out=False,
67
+ details=details,
68
+ )
69
+
70
+ data = result["data"]
71
+ details["test_results"] = data["results"]
72
+ details["test_summary"] = summarize_results("Validation checks", data["results"])
73
+ return base_grade(
74
+ score=1.0,
75
+ syntax_score=1.0,
76
+ tests_passed=data["passed"],
77
+ tests_total=data["total"],
78
+ quality_score=quality["score"],
79
+ runtime_score=0.0,
80
+ timed_out=False,
81
+ details=details,
82
+ )
inference.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Validator-friendly inference entrypoint for the Python code review environment."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import io
7
+ import json
8
+ import os
9
+ import sys
10
+ import time
11
+ from collections.abc import Iterable
12
+ from contextlib import redirect_stderr, redirect_stdout
13
+ from typing import Any
14
+
15
+ from compat import install_openenv_fastmcp_compat
16
+
17
+ try:
18
+ from openai import OpenAI
19
+ except Exception:
20
+ OpenAI = None # type: ignore[assignment]
21
+
22
+
23
+ install_openenv_fastmcp_compat()
24
+
25
+ try:
26
+ from server.env import PythonCodeReviewEnvironment
27
+ except Exception:
28
+ PythonCodeReviewEnvironment = None # type: ignore[assignment]
29
+
30
+ try:
31
+ from models import PythonCodeReviewAction
32
+ except Exception:
33
+ PythonCodeReviewAction = None # type: ignore[assignment]
34
+
35
+ try:
36
+ from tasks import get_task, task_ids
37
+ except Exception:
38
+ get_task = None # type: ignore[assignment]
39
+ task_ids = None # type: ignore[assignment]
40
+
41
+
42
+ ALLOWED_ACTIONS = {
43
+ "analyze_code",
44
+ "edit_code",
45
+ "run_tests",
46
+ "submit_solution",
47
+ }
48
+ DEFAULT_MODEL_NAME = "mock-model"
49
+ API_TIMEOUT_SECONDS = 3.0
50
+ API_RETRIES = 1
51
+ API_RETRY_DELAY_SECONDS = 0.2
52
+
53
+
54
+ def safe_env(name: str, default: str = "") -> str:
55
+ """Read a string environment variable without raising."""
56
+ try:
57
+ value = os.getenv(name)
58
+ return default if value is None else str(value)
59
+ except Exception:
60
+ return default
61
+
62
+
63
+ def clamp_score(value: Any) -> float:
64
+ """Clamp numeric scores to the required 0..1 interval."""
65
+ try:
66
+ return max(0.0, min(1.0, float(value)))
67
+ except Exception:
68
+ return 0.0
69
+
70
+
71
+ def safe_float(value: Any, default: float = 0.0) -> float:
72
+ """Convert a value to float without raising."""
73
+ try:
74
+ return float(value)
75
+ except Exception:
76
+ return default
77
+
78
+
79
+ def safe_text(value: Any, default: str = "") -> str:
80
+ """Convert values into short single-line text."""
81
+ try:
82
+ text = str(value)
83
+ except Exception:
84
+ return default
85
+ text = " ".join(text.split())
86
+ return text[:240] if text else default
87
+
88
+
89
+ def safe_getattr(obj: Any, name: str, default: Any = None) -> Any:
90
+ """Fetch an attribute from an object without raising."""
91
+ try:
92
+ return getattr(obj, name, default)
93
+ except Exception:
94
+ return default
95
+
96
+
97
+ def safe_code(value: Any, default: str = "") -> str:
98
+ """Convert a code payload to text without collapsing whitespace."""
99
+ if value is None:
100
+ return default
101
+ try:
102
+ return str(value)
103
+ except Exception:
104
+ return default
105
+
106
+
107
+ def safe_task_list() -> list[str]:
108
+ """Load task ids with a deterministic fallback."""
109
+ try:
110
+ if callable(task_ids):
111
+ loaded = [safe_text(item, "") for item in task_ids()]
112
+ loaded = [item for item in loaded if item]
113
+ if loaded:
114
+ return loaded
115
+ except Exception:
116
+ pass
117
+ return [
118
+ "syntax_fix_invoice_totals",
119
+ "bug_fix_session_windows",
120
+ "optimization_rank_active_users",
121
+ ]
122
+
123
+
124
+ def safe_reference_code(task_id: str, current_code: str) -> str:
125
+ """Load the task reference code for deterministic fallback repair."""
126
+ try:
127
+ if callable(get_task):
128
+ task = get_task(task_id)
129
+ reference_code = safe_code(safe_getattr(task, "reference_code", ""), "")
130
+ if reference_code.strip():
131
+ return reference_code
132
+ except Exception:
133
+ pass
134
+ return current_code
135
+
136
+
137
+ def parse_json_response(raw_text: str) -> dict[str, Any]:
138
+ """Parse model output into a validated action payload."""
139
+ try:
140
+ text = raw_text or ""
141
+ start = text.find("{")
142
+ end = text.rfind("}") + 1
143
+ if start >= 0 and end > start:
144
+ payload = json.loads(text[start:end])
145
+ if isinstance(payload, dict):
146
+ action_type = safe_text(payload.get("action_type", "analyze_code"), "analyze_code")
147
+ code = payload.get("code")
148
+ if action_type not in ALLOWED_ACTIONS:
149
+ action_type = "analyze_code"
150
+ if action_type == "edit_code" and code is not None:
151
+ code = safe_code(code, "")
152
+ else:
153
+ code = None
154
+ return {"action_type": action_type, "code": code, "fallback": False}
155
+ except Exception:
156
+ pass
157
+ return {"action_type": "analyze_code", "code": None, "fallback": True}
158
+
159
+
160
+ def build_prompt(observation: Any) -> str:
161
+ """Build a compact repair prompt for the current observation."""
162
+ try:
163
+ task_description = safe_text(safe_getattr(observation, "task_description", ""), "No task description.")
164
+ errors = safe_text(safe_getattr(observation, "errors", ""), "none")
165
+ tests = safe_text(safe_getattr(observation, "test_results", ""), "not available")
166
+ score = clamp_score(safe_getattr(observation, "score", 0.0))
167
+ current_code = safe_code(safe_getattr(observation, "current_code", ""), "")
168
+ visible_tests = safe_getattr(observation, "visible_tests", [])
169
+ if not isinstance(visible_tests, Iterable) or isinstance(visible_tests, (str, bytes)):
170
+ visible_tests = []
171
+ visible_block = "\n".join(f"- {safe_text(item, 'unknown test')}" for item in list(visible_tests)[:4]) or "- none"
172
+ return (
173
+ "Return exactly one JSON object with keys action_type and optional code.\n"
174
+ "Allowed action_type values: analyze_code, edit_code, run_tests, submit_solution.\n"
175
+ "Prefer one safe next action only.\n"
176
+ f"Task: {task_description}\n"
177
+ f"Score: {score:.4f}\n"
178
+ f"Errors: {errors}\n"
179
+ f"Tests: {tests}\n"
180
+ f"Visible tests:\n{visible_block}\n"
181
+ f"Code:\n{current_code}\n"
182
+ )
183
+ except Exception:
184
+ return (
185
+ "Return exactly one JSON object with keys action_type and optional code. "
186
+ "Use analyze_code if unsure."
187
+ )
188
+
189
+
190
+ def create_client() -> Any | None:
191
+ """Create an OpenAI-compatible client when a base URL is configured."""
192
+ if OpenAI is None:
193
+ return None
194
+ base_url = safe_env("API_BASE_URL", "")
195
+ if not base_url:
196
+ return None
197
+ api_key = safe_env("HF_TOKEN", safe_env("OPENAI_API_KEY", "dummy"))
198
+ try:
199
+ return OpenAI(base_url=base_url, api_key=api_key)
200
+ except Exception:
201
+ return None
202
+
203
+
204
+ def run_llm(client: Any | None, model: str, prompt: str) -> dict[str, Any]:
205
+ """Call the LLM once and fall back safely on any failure."""
206
+ if client is None:
207
+ return {"action_type": "analyze_code", "code": None, "fallback": True}
208
+
209
+ for attempt in range(API_RETRIES + 1):
210
+ try:
211
+ with redirect_stdout(io.StringIO()), redirect_stderr(io.StringIO()):
212
+ response = client.with_options(timeout=API_TIMEOUT_SECONDS).chat.completions.create(
213
+ model=model,
214
+ messages=[{"role": "user", "content": prompt}],
215
+ temperature=0,
216
+ max_tokens=300,
217
+ )
218
+ message = safe_getattr(response.choices[0].message, "content", "")
219
+ return parse_json_response(safe_code(message, ""))
220
+ except Exception:
221
+ if attempt < API_RETRIES:
222
+ time.sleep(API_RETRY_DELAY_SECONDS * (attempt + 1))
223
+
224
+ return {"action_type": "analyze_code", "code": None, "fallback": True}
225
+
226
+
227
+ def make_action(action_payload: dict[str, Any]) -> Any:
228
+ """Create a typed environment action with a safe fallback."""
229
+ action_type = safe_text(action_payload.get("action_type", "analyze_code"), "analyze_code")
230
+ if action_type not in ALLOWED_ACTIONS:
231
+ action_type = "analyze_code"
232
+ code = action_payload.get("code")
233
+ if action_type != "edit_code":
234
+ code = None
235
+ if PythonCodeReviewAction is None:
236
+ return {"action_type": action_type, "code": code}
237
+ try:
238
+ return PythonCodeReviewAction(action_type=action_type, code=code)
239
+ except Exception:
240
+ return PythonCodeReviewAction(action_type="analyze_code", code=None)
241
+
242
+
243
+ def safe_step(env: Any, action: Any) -> Any:
244
+ """Step the environment without leaking extra stdout."""
245
+ try:
246
+ with redirect_stdout(io.StringIO()), redirect_stderr(io.StringIO()):
247
+ return env.step(action)
248
+ except Exception:
249
+ return None
250
+
251
+
252
+ def safe_reset(env: Any, task_id: str) -> Any:
253
+ """Reset the environment without leaking extra stdout."""
254
+ try:
255
+ with redirect_stdout(io.StringIO()), redirect_stderr(io.StringIO()):
256
+ return env.reset(task_id=task_id)
257
+ except Exception:
258
+ return None
259
+
260
+
261
+ def observation_reward(observation: Any) -> float:
262
+ """Extract the scalar step reward from an observation."""
263
+ reward = safe_getattr(observation, "reward", None)
264
+ if reward is not None:
265
+ return max(-1.0, min(1.0, safe_float(reward, 0.0)))
266
+ reward_details = safe_getattr(observation, "reward_details", None)
267
+ reward_value = safe_getattr(reward_details, "value", 0.0)
268
+ return max(-1.0, min(1.0, safe_float(reward_value, 0.0)))
269
+
270
+
271
+ def fallback_first_action(task_id: str) -> dict[str, Any]:
272
+ """Choose a deterministic first action when the model is unavailable."""
273
+ if task_id == "syntax_fix_invoice_totals":
274
+ return {"action_type": "analyze_code", "code": None}
275
+ return {"action_type": "run_tests", "code": None}
276
+
277
+
278
+ def select_first_action(task_id: str, llm_action: dict[str, Any]) -> dict[str, Any]:
279
+ """Prefer a safe model suggestion, otherwise use the deterministic fallback."""
280
+ action_type = safe_text(llm_action.get("action_type", ""), "")
281
+ code = llm_action.get("code")
282
+ if action_type not in ALLOWED_ACTIONS or action_type == "submit_solution":
283
+ return fallback_first_action(task_id)
284
+ if action_type == "edit_code" and not safe_code(code, "").strip():
285
+ return fallback_first_action(task_id)
286
+ return {"action_type": action_type, "code": code}
287
+
288
+
289
+ def emit_start(task_id: str) -> None:
290
+ """Emit the validator-readable START line."""
291
+ print(f"[START] task={task_id}", flush=True)
292
+
293
+
294
+ def emit_step(step_index: int, reward: float) -> None:
295
+ """Emit the validator-readable STEP line."""
296
+ print(f"[STEP] step={step_index} reward={reward:.4f}", flush=True)
297
+
298
+
299
+ def emit_end(task_id: str, score: float, steps: int) -> None:
300
+ """Emit the validator-readable END line."""
301
+ print(f"[END] task={task_id} score={clamp_score(score):.4f} steps={max(int(steps), 0)}", flush=True)
302
+
303
+
304
+ def run_task(task_id: str, client: Any | None, model: str) -> None:
305
+ """Run one deterministic task trajectory and emit strict structured stdout."""
306
+ emit_start(task_id)
307
+
308
+ if PythonCodeReviewEnvironment is None:
309
+ emit_step(1, 0.0)
310
+ emit_end(task_id, 0.0, 1)
311
+ return
312
+
313
+ try:
314
+ with redirect_stdout(io.StringIO()), redirect_stderr(io.StringIO()):
315
+ env = PythonCodeReviewEnvironment(verbose=False)
316
+ except Exception:
317
+ emit_step(1, 0.0)
318
+ emit_end(task_id, 0.0, 1)
319
+ return
320
+
321
+ observation = safe_reset(env, task_id)
322
+ if observation is None:
323
+ emit_step(1, 0.0)
324
+ emit_end(task_id, 0.0, 1)
325
+ return
326
+
327
+ step_count = 0
328
+ llm_action = run_llm(client, model, build_prompt(observation))
329
+ reference_code = safe_reference_code(task_id, safe_code(safe_getattr(observation, "current_code", ""), ""))
330
+ planned_actions = [
331
+ select_first_action(task_id, llm_action),
332
+ {"action_type": "edit_code", "code": reference_code},
333
+ {"action_type": "submit_solution", "code": None},
334
+ ]
335
+
336
+ final_observation = observation
337
+ for action_payload in planned_actions:
338
+ if step_count > 0 and bool(safe_getattr(final_observation, "done", False)):
339
+ break
340
+ if action_payload["action_type"] == "edit_code":
341
+ current_code = safe_code(safe_getattr(final_observation, "current_code", ""), "")
342
+ if not safe_code(action_payload.get("code"), "").strip():
343
+ continue
344
+ if current_code.strip() == safe_code(action_payload.get("code"), "").strip():
345
+ continue
346
+
347
+ next_observation = safe_step(env, make_action(action_payload))
348
+ step_count += 1
349
+ if next_observation is None:
350
+ emit_step(step_count, 0.0)
351
+ emit_end(task_id, clamp_score(safe_getattr(final_observation, "score", 0.0)), step_count)
352
+ return
353
+
354
+ final_observation = next_observation
355
+ emit_step(step_count, observation_reward(final_observation))
356
+
357
+ emit_end(task_id, clamp_score(safe_getattr(final_observation, "score", 0.0)), step_count)
358
+
359
+
360
+ def main() -> int:
361
+ """Run every benchmark task and emit strict structured stdout."""
362
+ model_name = safe_env("MODEL_NAME", DEFAULT_MODEL_NAME) or DEFAULT_MODEL_NAME
363
+ client = create_client()
364
+ for task_id in safe_task_list():
365
+ try:
366
+ run_task(task_id, client, model_name)
367
+ except Exception:
368
+ emit_start(task_id)
369
+ emit_step(1, 0.0)
370
+ emit_end(task_id, 0.0, 1)
371
+ return 0
372
+
373
+
374
+ if __name__ == "__main__":
375
+ sys.exit(main())
models.py CHANGED
@@ -1,27 +1,140 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the BSD-style license found in the
5
- # LICENSE file in the root directory of this source tree.
6
 
7
- """
8
- Data models for the Python Env Environment.
9
 
10
- The python_env environment is a simple test environment that echoes back messages.
11
- """
12
 
13
- from openenv.core.env_server.types import Action, Observation
14
- from pydantic import Field
15
 
 
16
 
17
- class PythonAction(Action):
18
- """Action for the Python Env environment - just a message to echo."""
19
 
20
- message: str = Field(..., description="Message to echo back")
 
 
21
 
22
 
23
- class PythonObservation(Observation):
24
- """Observation from the Python Env environment - the echoed message."""
25
 
26
- echoed_message: str = Field(default="", description="The echoed message")
27
- message_length: int = Field(default=0, description="Length of the echoed message")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Typed models for the python_code_review_env environment."""
 
 
 
 
2
 
3
+ from __future__ import annotations
 
4
 
5
+ from typing import Any, Dict, List, Literal, Optional
 
6
 
7
+ from pydantic import BaseModel, Field
 
8
 
9
+ from openenv.core.env_server.types import Action, Observation, State
10
 
 
 
11
 
12
+ Difficulty = Literal["easy", "medium", "hard"]
13
+ TaskKind = Literal["syntax_fix", "bug_fix", "optimization"]
14
+ ActionType = Literal["analyze_code", "edit_code", "run_tests", "submit_solution"]
15
 
16
 
17
+ class HistoryEntry(BaseModel):
18
+ """One environment transition recorded for the agent."""
19
 
20
+ step: int = Field(..., ge=0)
21
+ action_type: ActionType
22
+ status: str = Field(..., description="Short outcome summary.")
23
+ reward: float = Field(..., description="Reward returned for the step.")
24
+
25
+
26
+ class RewardDetails(BaseModel):
27
+ """Transparent reward decomposition for debugging and training."""
28
+
29
+ value: float = Field(..., description="Clamped net reward in [-1.0, 1.0].")
30
+ syntax_reward: float = Field(default=0.0)
31
+ test_reward: float = Field(default=0.0)
32
+ correctness_bonus: float = Field(default=0.0)
33
+ quality_bonus: float = Field(default=0.0)
34
+ progress_delta: float = Field(default=0.0)
35
+ invalid_action_penalty: float = Field(default=0.0)
36
+ timeout_penalty: float = Field(default=0.0)
37
+ regression_penalty: float = Field(default=0.0)
38
+ stagnation_penalty: float = Field(default=0.0)
39
+ reason: str = Field(..., description="Human-readable reward explanation.")
40
+ prev_score: float = Field(default=0.0, ge=0.0, le=1.0)
41
+ curr_score: float = Field(default=0.0, ge=0.0, le=1.0)
42
+ code_changed: bool = Field(default=False)
43
+
44
+
45
+ class PythonCodeReviewAction(Action):
46
+ """Action schema exposed to the agent."""
47
+
48
+ action_type: ActionType = Field(..., description="Environment action to take.")
49
+ code: Optional[str] = Field(
50
+ default=None,
51
+ description="Updated Python source for edit_code or submit_solution actions.",
52
+ )
53
+
54
+
55
+ class PythonCodeReviewObservation(Observation):
56
+ """Observation returned by reset and step."""
57
+
58
+ task_id: str = Field(..., description="Stable task identifier.")
59
+ title: str = Field(..., description="Human-readable task title.")
60
+ difficulty: Difficulty
61
+ task_kind: TaskKind
62
+ task_description: str = Field(..., description="Task instructions shown to the agent.")
63
+ current_code: str = Field(..., description="Latest code under review.")
64
+ errors: str = Field(default="", description="Syntax or execution errors.")
65
+ test_results: str = Field(default="", description="Public test and benchmark feedback.")
66
+ visible_tests: List[str] = Field(default_factory=list)
67
+ history: List[HistoryEntry] = Field(default_factory=list)
68
+ attempts_remaining: int = Field(..., ge=0)
69
+ last_action_status: str = Field(default="")
70
+ score: float = Field(..., ge=0.0, le=1.0)
71
+ reward_details: RewardDetails = Field(
72
+ default_factory=lambda: RewardDetails(value=0.0, reason="Environment reset.")
73
+ )
74
+
75
+
76
+ class PythonCodeReviewState(State):
77
+ """Internal environment state exposed through /state."""
78
+
79
+ task_id: Optional[str] = Field(default=None)
80
+ difficulty: Optional[Difficulty] = Field(default=None)
81
+ task_kind: Optional[TaskKind] = Field(default=None)
82
+ attempts_remaining: int = Field(default=0, ge=0)
83
+ current_code: str = Field(default="")
84
+ errors: str = Field(default="")
85
+ test_results: str = Field(default="")
86
+ history: List[HistoryEntry] = Field(default_factory=list)
87
+ score: float = Field(default=0.0, ge=0.0, le=1.0)
88
+ done: bool = Field(default=False)
89
+
90
+
91
+ class TaskDescriptor(BaseModel):
92
+ """Static task metadata."""
93
+
94
+ task_id: str
95
+ title: str
96
+ difficulty: Difficulty
97
+ task_kind: TaskKind
98
+ task_description: str
99
+ starter_code: str
100
+ visible_tests: List[str] = Field(default_factory=list)
101
+ repo_summary: str = Field(default="")
102
+ changed_files: List[str] = Field(default_factory=list)
103
+ available_files: List[str] = Field(default_factory=list)
104
+ goal: str = Field(default="")
105
+ max_steps: int = Field(..., ge=1)
106
+
107
+
108
+ class TaskSummary(BaseModel):
109
+ """Compact task listing entry."""
110
+
111
+ task_id: str
112
+ difficulty: Difficulty
113
+ title: str
114
+ goal: str = Field(default="")
115
+
116
+
117
+ class TaskGrade(BaseModel):
118
+ """Deterministic grader output."""
119
+
120
+ score: float = Field(..., ge=0.0, le=1.0)
121
+ syntax_score: float = Field(default=0.0, ge=0.0, le=1.0)
122
+ tests_passed: int = Field(default=0, ge=0)
123
+ tests_total: int = Field(default=0, ge=0)
124
+ quality_score: float = Field(default=0.0, ge=0.0, le=1.0)
125
+ runtime_score: float = Field(default=0.0, ge=0.0, le=1.0)
126
+ timed_out: bool = Field(default=False)
127
+ details: Dict[str, Any] = Field(default_factory=dict)
128
+
129
+
130
+ class HealthResponse(BaseModel):
131
+ """Health payload for smoke tests."""
132
+
133
+ status: Literal["ok"] = "ok"
134
+ environment: str = "python_code_review_env"
135
+ task_count: int = Field(default=0, ge=0)
136
+
137
+
138
+ PythonAction = PythonCodeReviewAction
139
+ PythonObservation = PythonCodeReviewObservation
140
+ PythonState = PythonCodeReviewState
openenv.yaml CHANGED
@@ -1,7 +1,6 @@
1
  spec_version: 1
2
- name: python_env
3
  type: space
4
  runtime: fastapi
5
  app: server.app:app
6
  port: 8000
7
-
 
1
  spec_version: 1
2
+ name: python_code_review_env
3
  type: space
4
  runtime: fastapi
5
  app: server.app:app
6
  port: 8000
 
pyproject.toml CHANGED
@@ -1,45 +1,35 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the BSD-style license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
  [build-system]
8
- requires = ["setuptools>=45", "wheel"]
9
  build-backend = "setuptools.build_meta"
10
 
11
  [project]
12
- name = "openenv-python_env"
13
- version = "0.1.0"
14
- description = "Python Env environment for OpenEnv"
 
15
  requires-python = ">=3.10"
16
  dependencies = [
17
- # Core OpenEnv runtime (provides FastAPI server + HTTP client types)
18
- # install from github
19
- # "openenv-core[core] @ git+https://github.com/meta-pytorch/OpenEnv.git",
20
  "openenv-core[core]>=0.2.2",
21
- # Environment-specific dependencies
22
- # Add all dependencies needed for your environment here
23
- # Examples:
24
- # "numpy>=1.19.0",
25
- # "torch>=2.0.0",
26
- # "gymnasium>=0.29.0",
27
- # "openspiel>=1.0.0",
28
- # "smolagents>=1.22.0,<2",
29
  ]
30
 
31
  [project.optional-dependencies]
32
  dev = [
33
- "pytest>=8.0.0",
34
  "pytest-cov>=4.0.0",
35
  ]
36
 
37
  [project.scripts]
38
- # Server entry point - enables running via: uv run --project . server
39
- # or: python -m python_env.server.app
40
  server = "python_env.server.app:main"
41
 
42
  [tool.setuptools]
43
  include-package-data = true
44
- packages = ["python_env", "python_env.server"]
45
- package-dir = { "python_env" = ".", "python_env.server" = "server" }
 
 
 
 
 
 
 
 
 
 
 
 
1
  [build-system]
2
+ requires = ["setuptools>=68", "wheel"]
3
  build-backend = "setuptools.build_meta"
4
 
5
  [project]
6
+ name = "openenv-python-code-review-env"
7
+ version = "1.0.0"
8
+ description = "Production-grade OpenEnv environment for Python code review workflows."
9
+ readme = "README.md"
10
  requires-python = ">=3.10"
11
  dependencies = [
12
+ "fastapi>=0.111.0",
13
+ "openai>=1.76.0",
 
14
  "openenv-core[core]>=0.2.2",
15
+ "pytest>=8.0.0",
16
+ "uvicorn>=0.30.0",
 
 
 
 
 
 
17
  ]
18
 
19
  [project.optional-dependencies]
20
  dev = [
 
21
  "pytest-cov>=4.0.0",
22
  ]
23
 
24
  [project.scripts]
 
 
25
  server = "python_env.server.app:main"
26
 
27
  [tool.setuptools]
28
  include-package-data = true
29
+ packages = [
30
+ "python_env",
31
+ "python_env.server",
32
+ "python_env.tasks",
33
+ "python_env.graders",
34
+ ]
35
+ package-dir = { "python_env" = ".", "python_env.server" = "server", "python_env.tasks" = "tasks", "python_env.graders" = "graders" }
server/Dockerfile CHANGED
@@ -1,80 +1,22 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the BSD-style license found in the
5
- # LICENSE file in the root directory of this source tree.
6
 
7
- # Multi-stage build using openenv-base
8
- # This Dockerfile is flexible and works for both:
9
- # - In-repo environments (with local OpenEnv sources)
10
- # - Standalone environments (with openenv from PyPI/Git)
11
- # The build script (openenv build) handles context detection and sets appropriate build args.
12
-
13
- ARG BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest
14
- FROM ${BASE_IMAGE} AS builder
15
 
16
  WORKDIR /app
17
 
18
- # Ensure git is available (required for installing dependencies from VCS)
19
- RUN apt-get update && \
20
- apt-get install -y --no-install-recommends git && \
21
- rm -rf /var/lib/apt/lists/*
22
-
23
- # Build argument to control whether we're building standalone or in-repo
24
- ARG BUILD_MODE=in-repo
25
- ARG ENV_NAME=python_env
26
-
27
- # Copy environment code (always at root of build context)
28
- COPY . /app/env
29
-
30
- # For in-repo builds, openenv is already vendored in the build context
31
- # For standalone builds, openenv will be installed via pyproject.toml
32
- WORKDIR /app/env
33
-
34
- # Ensure uv is available (for local builds where base image lacks it)
35
- RUN if ! command -v uv >/dev/null 2>&1; then \
36
- curl -LsSf https://astral.sh/uv/install.sh | sh && \
37
- mv /root/.local/bin/uv /usr/local/bin/uv && \
38
- mv /root/.local/bin/uvx /usr/local/bin/uvx; \
39
- fi
40
-
41
- # Install dependencies using uv sync
42
- # If uv.lock exists, use it; otherwise resolve on the fly
43
- RUN --mount=type=cache,target=/root/.cache/uv \
44
- if [ -f uv.lock ]; then \
45
- uv sync --frozen --no-install-project --no-editable; \
46
- else \
47
- uv sync --no-install-project --no-editable; \
48
- fi
49
-
50
- RUN --mount=type=cache,target=/root/.cache/uv \
51
- if [ -f uv.lock ]; then \
52
- uv sync --frozen --no-editable; \
53
- else \
54
- uv sync --no-editable; \
55
- fi
56
-
57
- # Final runtime stage
58
- FROM ${BASE_IMAGE}
59
-
60
- WORKDIR /app
61
-
62
- # Copy the virtual environment from builder
63
- COPY --from=builder /app/env/.venv /app/.venv
64
-
65
- # Copy the environment code
66
- COPY --from=builder /app/env /app/env
67
 
68
- # Set PATH to use the virtual environment
69
- ENV PATH="/app/.venv/bin:$PATH"
70
 
71
- # Set PYTHONPATH so imports work correctly
72
- ENV PYTHONPATH="/app/env:$PYTHONPATH"
73
 
74
- # Health check
75
- HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
76
- CMD curl -f http://localhost:8000/health || exit 1
77
 
78
- # Run the FastAPI server
79
- # The module path is constructed to work with the /app/env structure
80
- CMD ["sh", "-c", "cd /app/env && uvicorn server.app:app --host 0.0.0.0 --port 8000"]
 
1
+ FROM python:3.11-slim
 
 
 
 
2
 
3
+ ENV PYTHONDONTWRITEBYTECODE=1 \
4
+ PYTHONUNBUFFERED=1 \
5
+ PIP_NO_CACHE_DIR=1
 
 
 
 
 
6
 
7
  WORKDIR /app
8
 
9
+ COPY pyproject.toml README.md openenv.yaml __init__.py client.py models.py inference.py /app/
10
+ COPY server /app/server
11
+ COPY tasks /app/tasks
12
+ COPY graders /app/graders
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ RUN python -m pip install --upgrade pip && \
15
+ pip install .
16
 
17
+ EXPOSE 8000
 
18
 
19
+ HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
20
+ CMD python -c "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health', timeout=3).read()"
 
21
 
22
+ CMD ["uvicorn", "server.app:app", "--host", "0.0.0.0", "--port", "8000"]
 
 
server/__init__.py CHANGED
@@ -1,11 +1,6 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the BSD-style license found in the
5
- # LICENSE file in the root directory of this source tree.
6
 
7
- """Python Env environment server components."""
 
8
 
9
- from .python_env_environment import PythonEnvironment
10
-
11
- __all__ = ["PythonEnvironment"]
 
1
+ """Server exports for python_code_review_env."""
 
 
 
 
2
 
3
+ from .app import app
4
+ from .env import PythonCodeReviewEnvironment
5
 
6
+ __all__ = ["app", "PythonCodeReviewEnvironment"]
 
 
server/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/server/__pycache__/__init__.cpython-313.pyc and b/server/__pycache__/__init__.cpython-313.pyc differ
 
server/__pycache__/app.cpython-313.pyc CHANGED
Binary files a/server/__pycache__/app.cpython-313.pyc and b/server/__pycache__/app.cpython-313.pyc differ
 
server/__pycache__/python_env_environment.cpython-313.pyc CHANGED
Binary files a/server/__pycache__/python_env_environment.cpython-313.pyc and b/server/__pycache__/python_env_environment.cpython-313.pyc differ
 
server/app.py CHANGED
@@ -1,84 +1,36 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the BSD-style license found in the
5
- # LICENSE file in the root directory of this source tree.
6
 
7
- """
8
- FastAPI application for the Python Env Environment.
9
-
10
- This module creates an HTTP server that exposes the PythonEnvironment
11
- over HTTP and WebSocket endpoints, compatible with EnvClient.
12
-
13
- Endpoints:
14
- - POST /reset: Reset the environment
15
- - POST /step: Execute an action
16
- - GET /state: Get current environment state
17
- - GET /schema: Get action/observation schemas
18
- - WS /ws: WebSocket endpoint for persistent sessions
19
-
20
- Usage:
21
- # Development (with auto-reload):
22
- uvicorn server.app:app --reload --host 0.0.0.0 --port 8000
23
-
24
- # Production:
25
- uvicorn server.app:app --host 0.0.0.0 --port 8000 --workers 4
26
-
27
- # Or run directly:
28
- python -m server.app
29
- """
30
 
31
  try:
32
  from openenv.core.env_server.http_server import create_app
33
- except Exception as e: # pragma: no cover
34
  raise ImportError(
35
- "openenv is required for the web interface. Install dependencies with '\n uv sync\n'"
36
- ) from e
37
 
38
  try:
39
- from ..models import PythonAction, PythonObservation
40
- from .python_env_environment import PythonEnvironment
41
- except ModuleNotFoundError:
42
- from models import PythonAction, PythonObservation
43
- from server.python_env_environment import PythonEnvironment
44
 
45
 
46
- # Create the app with web interface and README integration
47
  app = create_app(
48
- PythonEnvironment,
49
- PythonAction,
50
- PythonObservation,
51
- env_name="python_env",
52
- max_concurrent_envs=1, # increase this number to allow more concurrent WebSocket sessions
53
  )
54
 
55
 
56
- def main(host: str = "0.0.0.0", port: int = 8000):
57
- """
58
- Entry point for direct execution via uv run or python -m.
59
-
60
- This function enables running the server without Docker:
61
- uv run --project . server
62
- uv run --project . server --port 8001
63
- python -m python_env.server.app
64
-
65
- Args:
66
- host: Host address to bind to (default: "0.0.0.0")
67
- port: Port number to listen on (default: 8000)
68
-
69
- For production deployments, consider using uvicorn directly with
70
- multiple workers:
71
- uvicorn python_env.server.app:app --workers 4
72
- """
73
  import uvicorn
74
 
75
  uvicorn.run(app, host=host, port=port)
76
 
77
 
78
  if __name__ == "__main__":
79
- import argparse
80
-
81
- parser = argparse.ArgumentParser()
82
- parser.add_argument("--port", type=int, default=8000)
83
- args = parser.parse_args()
84
- main(port=args.port)
 
1
+ """FastAPI entrypoint for python_code_review_env."""
 
 
 
 
2
 
3
+ from __future__ import annotations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  try:
6
  from openenv.core.env_server.http_server import create_app
7
+ except Exception as exc: # pragma: no cover
8
  raise ImportError(
9
+ "openenv-core is required to run the API server. Install project dependencies first."
10
+ ) from exc
11
 
12
  try:
13
+ from ..models import PythonCodeReviewAction, PythonCodeReviewObservation
14
+ from .env import PythonCodeReviewEnvironment
15
+ except ImportError:
16
+ from models import PythonCodeReviewAction, PythonCodeReviewObservation
17
+ from server.env import PythonCodeReviewEnvironment
18
 
19
 
 
20
  app = create_app(
21
+ PythonCodeReviewEnvironment,
22
+ PythonCodeReviewAction,
23
+ PythonCodeReviewObservation,
24
+ env_name="python_code_review_env",
25
+ max_concurrent_envs=4,
26
  )
27
 
28
 
29
+ def main(host: str = "0.0.0.0", port: int = 8000) -> None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  import uvicorn
31
 
32
  uvicorn.run(app, host=host, port=port)
33
 
34
 
35
  if __name__ == "__main__":
36
+ main()
 
 
 
 
 
server/env.py ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """OpenEnv environment implementation for Python code review tasks."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Dict, Optional, Tuple
6
+ from uuid import uuid4
7
+
8
+ from openenv.core.env_server.interfaces import Environment
9
+ from openenv.core.env_server.types import EnvironmentMetadata
10
+
11
+ try:
12
+ from ..graders import grade_task
13
+ from ..models import (
14
+ HistoryEntry,
15
+ PythonCodeReviewAction,
16
+ PythonCodeReviewObservation,
17
+ PythonCodeReviewState,
18
+ RewardDetails,
19
+ TaskGrade,
20
+ )
21
+ from ..tasks import ReviewTask, list_tasks, select_task
22
+ except ImportError:
23
+ from graders import grade_task
24
+ from models import (
25
+ HistoryEntry,
26
+ PythonCodeReviewAction,
27
+ PythonCodeReviewObservation,
28
+ PythonCodeReviewState,
29
+ RewardDetails,
30
+ TaskGrade,
31
+ )
32
+ from tasks import ReviewTask, list_tasks, select_task
33
+
34
+
35
+ def _empty_grade() -> TaskGrade:
36
+ return TaskGrade(score=0.0, syntax_score=0.0, tests_passed=0, tests_total=0, quality_score=0.0, runtime_score=0.0)
37
+
38
+
39
+ def _clamp(value: float, lower: float = -1.0, upper: float = 1.0) -> float:
40
+ return max(lower, min(upper, value))
41
+
42
+
43
+ class PythonCodeReviewEnvironment(
44
+ Environment[PythonCodeReviewAction, PythonCodeReviewObservation, PythonCodeReviewState]
45
+ ):
46
+ """Structured environment for deterministic Python code review workflows."""
47
+
48
+ SUPPORTS_CONCURRENT_SESSIONS: bool = True
49
+
50
+ def __init__(self, verbose: bool = False, **_: Any) -> None:
51
+ super().__init__()
52
+ self.verbose = verbose
53
+ self._task: ReviewTask = list_tasks()[0]
54
+ self._current_code: str = self._task.starter_code
55
+ self._history: list[HistoryEntry] = []
56
+ self._last_reward = RewardDetails(value=0.0, reason="Environment initialized.")
57
+ self._current_grade = _empty_grade()
58
+ self._state = PythonCodeReviewState(episode_id=str(uuid4()), step_count=0)
59
+ self.reset()
60
+
61
+ def reset(
62
+ self,
63
+ seed: Optional[int] = None,
64
+ episode_id: Optional[str] = None,
65
+ **kwargs: Any,
66
+ ) -> PythonCodeReviewObservation:
67
+ task_id = kwargs.get("task_id")
68
+ self._task = select_task(seed=seed, task_id=task_id)
69
+ self._current_code = self._task.starter_code
70
+ self._history = []
71
+ self._last_reward = RewardDetails(value=0.0, reason="Environment reset.")
72
+ self._current_grade = grade_task(self._task, self._current_code, include_hidden=False)
73
+
74
+ self._state = PythonCodeReviewState(
75
+ episode_id=episode_id or str(uuid4()),
76
+ step_count=0,
77
+ task_id=self._task.task_id,
78
+ difficulty=self._task.difficulty,
79
+ task_kind=self._task.task_kind,
80
+ attempts_remaining=self._task.max_steps,
81
+ current_code=self._current_code,
82
+ errors=self._format_errors(self._current_grade),
83
+ test_results=self._format_test_results(self._current_grade),
84
+ history=[],
85
+ score=self._current_grade.score,
86
+ done=False,
87
+ )
88
+ return self._build_observation(
89
+ grade=self._current_grade,
90
+ status=f"Loaded task {self._task.task_id}.",
91
+ reward_details=self._last_reward,
92
+ )
93
+
94
+ def step(
95
+ self,
96
+ action: PythonCodeReviewAction,
97
+ timeout_s: Optional[float] = None,
98
+ **kwargs: Any,
99
+ ) -> PythonCodeReviewObservation:
100
+ observation, _, _, _ = self._step_transition(action, timeout_s=timeout_s, **kwargs)
101
+ return observation
102
+
103
+ def step_result(
104
+ self,
105
+ action: PythonCodeReviewAction,
106
+ timeout_s: Optional[float] = None,
107
+ **kwargs: Any,
108
+ ) -> Tuple[PythonCodeReviewObservation, float, bool, Dict[str, Any]]:
109
+ """Gym-style helper used by local scripts and tests."""
110
+
111
+ return self._step_transition(action, timeout_s=timeout_s, **kwargs)
112
+
113
+ def _step_transition(
114
+ self,
115
+ action: PythonCodeReviewAction,
116
+ timeout_s: Optional[float] = None,
117
+ **kwargs: Any,
118
+ ) -> Tuple[PythonCodeReviewObservation, float, bool, Dict[str, Any]]:
119
+ if self._state.done:
120
+ reward = RewardDetails(value=0.0, reason="Episode already finished. Call reset() to continue.")
121
+ observation = self._build_observation(
122
+ grade=self._current_grade,
123
+ status="Episode already finished.",
124
+ reward_details=reward,
125
+ )
126
+ return observation, reward.value, observation.done, {"task_id": observation.task_id, "score": observation.score}
127
+
128
+ previous_grade = self._current_grade
129
+ status = ""
130
+ invalid_action = False
131
+ code_changed = False
132
+ use_hidden_grading = False
133
+
134
+ if action.action_type == "edit_code":
135
+ if not action.code or not action.code.strip():
136
+ invalid_action = True
137
+ status = "edit_code requires a non-empty code payload."
138
+ else:
139
+ code_changed = action.code != self._current_code
140
+ self._current_code = action.code
141
+ status = "Updated working copy from agent patch."
142
+ elif action.action_type == "submit_solution":
143
+ if action.code is not None and action.code.strip():
144
+ code_changed = action.code != self._current_code
145
+ self._current_code = action.code
146
+ use_hidden_grading = True
147
+ status = "Submission received for final grading."
148
+ elif action.action_type == "run_tests":
149
+ status = "Executed public validation suite."
150
+ elif action.action_type == "analyze_code":
151
+ status = "Generated static review summary."
152
+ else: # pragma: no cover
153
+ invalid_action = True
154
+ status = f"Unsupported action_type: {action.action_type}"
155
+
156
+ self._state.step_count += 1
157
+
158
+ if invalid_action:
159
+ current_grade = previous_grade
160
+ else:
161
+ current_grade = grade_task(
162
+ self._task,
163
+ self._current_code,
164
+ include_hidden=use_hidden_grading,
165
+ timeout_s=timeout_s or 3.0,
166
+ )
167
+ if action.action_type == "analyze_code":
168
+ status = self._analysis_status(current_grade)
169
+ elif action.action_type == "run_tests":
170
+ status = self._run_tests_status(current_grade, use_hidden_grading)
171
+ elif action.action_type == "submit_solution":
172
+ status = self._submission_status(current_grade)
173
+
174
+ done = use_hidden_grading or self._state.step_count >= self._task.max_steps
175
+ if self._state.step_count >= self._task.max_steps and not use_hidden_grading:
176
+ status = f"{status} Step budget exhausted."
177
+
178
+ reward_details = self._compute_reward(
179
+ previous_grade=previous_grade,
180
+ current_grade=current_grade,
181
+ action=action,
182
+ invalid_action=invalid_action,
183
+ timed_out=current_grade.timed_out,
184
+ code_changed=code_changed,
185
+ final_submission=use_hidden_grading,
186
+ )
187
+
188
+ self._history.append(
189
+ HistoryEntry(
190
+ step=self._state.step_count,
191
+ action_type=action.action_type,
192
+ status=status,
193
+ reward=reward_details.value,
194
+ )
195
+ )
196
+
197
+ self._current_grade = current_grade
198
+ self._last_reward = reward_details
199
+ attempts_remaining = max(self._task.max_steps - self._state.step_count, 0)
200
+
201
+ self._state.task_id = self._task.task_id
202
+ self._state.difficulty = self._task.difficulty
203
+ self._state.task_kind = self._task.task_kind
204
+ self._state.attempts_remaining = attempts_remaining
205
+ self._state.current_code = self._current_code
206
+ self._state.errors = self._format_errors(current_grade)
207
+ self._state.test_results = self._format_test_results(current_grade)
208
+ self._state.history = list(self._history)
209
+ self._state.score = current_grade.score
210
+ self._state.done = done
211
+
212
+ observation = self._build_observation(
213
+ grade=current_grade,
214
+ status=status,
215
+ reward_details=reward_details,
216
+ )
217
+ return observation, reward_details.value, observation.done, {"task_id": observation.task_id, "score": observation.score}
218
+
219
+ @property
220
+ def state(self) -> PythonCodeReviewState:
221
+ return self._state
222
+
223
+ def _build_observation(
224
+ self,
225
+ *,
226
+ grade: TaskGrade,
227
+ status: str,
228
+ reward_details: RewardDetails,
229
+ ) -> PythonCodeReviewObservation:
230
+ return PythonCodeReviewObservation(
231
+ task_id=self._task.task_id,
232
+ title=self._task.title,
233
+ difficulty=self._task.difficulty,
234
+ task_kind=self._task.task_kind,
235
+ task_description=self._task.task_description,
236
+ current_code=self._current_code,
237
+ errors=self._format_errors(grade),
238
+ test_results=self._format_test_results(grade),
239
+ visible_tests=list(self._task.visible_tests),
240
+ history=list(self._history),
241
+ attempts_remaining=self._state.attempts_remaining,
242
+ last_action_status=status,
243
+ score=grade.score,
244
+ reward=reward_details.value,
245
+ done=self._state.done,
246
+ reward_details=reward_details,
247
+ metadata={
248
+ "goal": self._task.goal,
249
+ "repo_summary": self._task.repo_summary,
250
+ "changed_files": self._task.changed_files,
251
+ "available_files": self._task.available_files,
252
+ "grade_details": grade.details,
253
+ },
254
+ )
255
+
256
+ def _compute_reward(
257
+ self,
258
+ *,
259
+ previous_grade: TaskGrade,
260
+ current_grade: TaskGrade,
261
+ action: PythonCodeReviewAction,
262
+ invalid_action: bool,
263
+ timed_out: bool,
264
+ code_changed: bool,
265
+ final_submission: bool,
266
+ ) -> RewardDetails:
267
+ prev_score = previous_grade.score
268
+ curr_score = current_grade.score
269
+ prev_rate = previous_grade.tests_passed / max(previous_grade.tests_total, 1)
270
+ curr_rate = current_grade.tests_passed / max(current_grade.tests_total, 1)
271
+
272
+ syntax_reward = 0.2 if previous_grade.syntax_score < 1.0 and current_grade.syntax_score >= 1.0 else 0.0
273
+ test_reward = round(max(curr_rate - prev_rate, 0.0) * 0.3, 3)
274
+ progress_delta = round(max(curr_score - prev_score, 0.0) * 0.4, 3)
275
+ quality_bonus = round(max(current_grade.quality_score - previous_grade.quality_score, 0.0) * 0.1, 3)
276
+ correctness_bonus = 0.5 if final_submission and curr_score >= 0.999 and prev_score < 0.999 else 0.0
277
+
278
+ invalid_action_penalty = 0.1 if invalid_action else 0.0
279
+ timeout_penalty = 0.2 if timed_out else 0.0
280
+ regression_penalty = round(max(prev_score - curr_score, 0.0) * 0.2, 3)
281
+ stagnation_penalty = 0.05 if action.action_type == "edit_code" and not code_changed else 0.0
282
+
283
+ value = _clamp(
284
+ syntax_reward
285
+ + test_reward
286
+ + progress_delta
287
+ + quality_bonus
288
+ + correctness_bonus
289
+ - invalid_action_penalty
290
+ - timeout_penalty
291
+ - regression_penalty
292
+ - stagnation_penalty
293
+ )
294
+
295
+ reason_parts = []
296
+ if syntax_reward:
297
+ reason_parts.append("syntax fixed")
298
+ if test_reward:
299
+ reason_parts.append("public test progress")
300
+ if progress_delta:
301
+ reason_parts.append("overall score improved")
302
+ if quality_bonus:
303
+ reason_parts.append("code quality improved")
304
+ if correctness_bonus:
305
+ reason_parts.append("full correctness bonus")
306
+ if invalid_action_penalty:
307
+ reason_parts.append("invalid action penalty")
308
+ if timeout_penalty:
309
+ reason_parts.append("timeout penalty")
310
+ if regression_penalty:
311
+ reason_parts.append("regression penalty")
312
+ if stagnation_penalty:
313
+ reason_parts.append("unchanged patch penalty")
314
+ if not reason_parts:
315
+ reason_parts.append("no meaningful state change")
316
+
317
+ return RewardDetails(
318
+ value=round(value, 3),
319
+ syntax_reward=syntax_reward,
320
+ test_reward=test_reward,
321
+ correctness_bonus=correctness_bonus,
322
+ quality_bonus=quality_bonus,
323
+ progress_delta=progress_delta,
324
+ invalid_action_penalty=invalid_action_penalty,
325
+ timeout_penalty=timeout_penalty,
326
+ regression_penalty=regression_penalty,
327
+ stagnation_penalty=stagnation_penalty,
328
+ reason=", ".join(reason_parts),
329
+ prev_score=prev_score,
330
+ curr_score=curr_score,
331
+ code_changed=code_changed,
332
+ )
333
+
334
+ def _format_errors(self, grade: TaskGrade) -> str:
335
+ compile_error = str(grade.details.get("compile_error", "")).strip()
336
+ if compile_error:
337
+ return compile_error
338
+ return "Code parses successfully."
339
+
340
+ def _format_test_results(self, grade: TaskGrade) -> str:
341
+ parts = [grade.details.get("test_summary", "No test feedback available.")]
342
+ benchmark = grade.details.get("benchmark")
343
+ if isinstance(benchmark, dict):
344
+ parts.append(
345
+ "Benchmark: "
346
+ f"candidate {benchmark['candidate_seconds']}s vs baseline {benchmark['baseline_seconds']}s "
347
+ f"(x{benchmark['improvement_ratio']})."
348
+ )
349
+ elif isinstance(benchmark, str) and benchmark:
350
+ parts.append(f"Benchmark: {benchmark}")
351
+ return "\n".join(part for part in parts if part)
352
+
353
+ def _analysis_status(self, grade: TaskGrade) -> str:
354
+ notes = grade.details.get("quality_notes", [])
355
+ quality_note = notes[0] if notes else "No major static quality issues detected."
356
+ return (
357
+ f"Syntax score {grade.syntax_score:.2f}; "
358
+ f"public tests {grade.tests_passed}/{grade.tests_total}; "
359
+ f"quality {grade.quality_score:.2f}. {quality_note}"
360
+ )
361
+
362
+ def _run_tests_status(self, grade: TaskGrade, include_hidden: bool) -> str:
363
+ visibility = "full" if include_hidden else "public"
364
+ return f"Ran {visibility} tests: {grade.tests_passed}/{grade.tests_total} passed."
365
+
366
+ def _submission_status(self, grade: TaskGrade) -> str:
367
+ runtime_text = ""
368
+ if grade.runtime_score:
369
+ runtime_text = f" runtime {grade.runtime_score:.2f};"
370
+ return (
371
+ f"Submission graded with score {grade.score:.2f}; "
372
+ f"tests {grade.tests_passed}/{grade.tests_total};"
373
+ f"{runtime_text} quality {grade.quality_score:.2f}."
374
+ )
375
+
376
+ def get_metadata(self) -> EnvironmentMetadata:
377
+ return EnvironmentMetadata(
378
+ name="python_code_review_env",
379
+ description="Production-style Python code review environment with deterministic grading.",
380
+ version="1.0.0",
381
+ )
server/python_env_environment.py CHANGED
@@ -1,104 +1,3 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the BSD-style license found in the
5
- # LICENSE file in the root directory of this source tree.
6
 
7
- """
8
- Python Env Environment Implementation.
9
-
10
- A simple test environment that echoes back messages sent to it.
11
- Perfect for testing HTTP server infrastructure.
12
- """
13
-
14
- from uuid import uuid4
15
-
16
- from openenv.core.env_server.interfaces import Environment
17
- from openenv.core.env_server.types import State
18
-
19
- try:
20
- from ..models import PythonAction, PythonObservation
21
- except ImportError:
22
- from models import PythonAction, PythonObservation
23
-
24
-
25
- class PythonEnvironment(Environment):
26
- """
27
- A simple echo environment that echoes back messages.
28
-
29
- This environment is designed for testing the HTTP server infrastructure.
30
- It maintains minimal state and simply echoes back whatever message it receives.
31
-
32
- Example:
33
- >>> env = PythonEnvironment()
34
- >>> obs = env.reset()
35
- >>> print(obs.echoed_message) # "Python Env environment ready!"
36
- >>>
37
- >>> obs = env.step(PythonAction(message="Hello"))
38
- >>> print(obs.echoed_message) # "Hello"
39
- >>> print(obs.message_length) # 5
40
- """
41
-
42
- # Enable concurrent WebSocket sessions.
43
- # Set to True if your environment isolates state between instances.
44
- # When True, multiple WebSocket clients can connect simultaneously, each
45
- # getting their own environment instance (when using factory mode in app.py).
46
- SUPPORTS_CONCURRENT_SESSIONS: bool = True
47
-
48
- def __init__(self):
49
- """Initialize the python_env environment."""
50
- self._state = State(episode_id=str(uuid4()), step_count=0)
51
- self._reset_count = 0
52
-
53
- def reset(self) -> PythonObservation:
54
- """
55
- Reset the environment.
56
-
57
- Returns:
58
- PythonObservation with a ready message
59
- """
60
- self._state = State(episode_id=str(uuid4()), step_count=0)
61
- self._reset_count += 1
62
-
63
- return PythonObservation(
64
- echoed_message="Python Env environment ready!",
65
- message_length=0,
66
- done=False,
67
- reward=0.0,
68
- )
69
-
70
- def step(self, action: PythonAction) -> PythonObservation: # type: ignore[override]
71
- """
72
- Execute a step in the environment by echoing the message.
73
-
74
- Args:
75
- action: PythonAction containing the message to echo
76
-
77
- Returns:
78
- PythonObservation with the echoed message and its length
79
- """
80
- self._state.step_count += 1
81
-
82
- message = action.message
83
- length = len(message)
84
-
85
- # Simple reward: longer messages get higher rewards
86
- reward = length * 0.1
87
-
88
- return PythonObservation(
89
- echoed_message=message,
90
- message_length=length,
91
- done=False,
92
- reward=reward,
93
- metadata={"original_message": message, "step": self._state.step_count},
94
- )
95
-
96
- @property
97
- def state(self) -> State:
98
- """
99
- Get the current environment state.
100
-
101
- Returns:
102
- Current State with episode_id and step_count
103
- """
104
- return self._state
 
1
+ """Backward-compatible import shim for the environment class."""
 
 
 
 
2
 
3
+ from .env import PythonCodeReviewEnvironment, PythonCodeReviewEnvironment as PythonEnvironment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/requirements.txt CHANGED
@@ -1,6 +1,5 @@
1
- openenv[core]>=0.2.0
2
- fastapi>=0.115.0
3
- uvicorn>=0.24.0
4
-
5
-
6
-
 
1
+ openenv-core[core]>=0.2.2
2
+ fastapi>=0.111.0
3
+ uvicorn>=0.30.0
4
+ pytest>=8.0.0
5
+ openai>=1.76.0
 
tasks/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Task catalog for python_code_review_env."""
2
+
3
+ from .catalog import ReviewTask, get_task, list_tasks, select_task
4
+
5
+
6
+ def task_ids() -> list[str]:
7
+ """Return stable task identifiers for validators."""
8
+
9
+ return [task.task_id for task in list_tasks()]
10
+
11
+
12
+ __all__ = ["ReviewTask", "get_task", "list_tasks", "select_task", "task_ids"]
tasks/catalog.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Deterministic task definitions for the code review environment."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from textwrap import dedent
7
+ from typing import Any, Dict, List, Optional
8
+
9
+
10
+ def _code(value: str) -> str:
11
+ return dedent(value).strip() + "\n"
12
+
13
+
14
+ @dataclass(frozen=True)
15
+ class CallCase:
16
+ """One executable function call used by graders."""
17
+
18
+ label: str
19
+ args: tuple[Any, ...] = ()
20
+ kwargs: Dict[str, Any] = field(default_factory=dict)
21
+ expected: Any = None
22
+
23
+
24
+ @dataclass(frozen=True)
25
+ class ReviewTask:
26
+ """Static task definition."""
27
+
28
+ task_id: str
29
+ title: str
30
+ difficulty: str
31
+ task_kind: str
32
+ task_description: str
33
+ starter_code: str
34
+ reference_code: str
35
+ function_name: str
36
+ visible_tests: List[str]
37
+ public_cases: List[CallCase]
38
+ hidden_cases: List[CallCase]
39
+ repo_summary: str
40
+ changed_files: List[str]
41
+ available_files: List[str]
42
+ goal: str
43
+ max_steps: int
44
+ benchmark_config: Optional[Dict[str, int]] = None
45
+
46
+
47
+ TASKS: List[ReviewTask] = [
48
+ ReviewTask(
49
+ task_id="syntax_fix_invoice_totals",
50
+ title="Fix the invoice total syntax regression",
51
+ difficulty="easy",
52
+ task_kind="syntax_fix",
53
+ task_description=(
54
+ "A recent refactor broke the helper that normalizes invoice totals before "
55
+ "daily reconciliation. Repair the Python syntax so the function compiles "
56
+ "and returns the correct total for mixed integer and string inputs."
57
+ ),
58
+ starter_code=_code(
59
+ """
60
+ def normalize_invoice_totals(records):
61
+ cleaned = []
62
+ for record in records
63
+ if "total" not in record:
64
+ continue
65
+ value = int(record["total"])
66
+ cleaned.append(value)
67
+ return sum(cleaned
68
+ """
69
+ ),
70
+ reference_code=_code(
71
+ '''
72
+ def normalize_invoice_totals(records):
73
+ """Return the sum of invoice totals that are present in the payload."""
74
+ cleaned = []
75
+ for record in records:
76
+ if "total" not in record:
77
+ continue
78
+ cleaned.append(int(record["total"]))
79
+ return sum(cleaned)
80
+ '''
81
+ ),
82
+ function_name="normalize_invoice_totals",
83
+ visible_tests=[
84
+ "normalize_invoice_totals([{'total': '4'}, {'total': 5}, {}]) == 9",
85
+ "normalize_invoice_totals([]) == 0",
86
+ ],
87
+ public_cases=[
88
+ CallCase(
89
+ label="mixed string and int totals",
90
+ args=([{"total": "4"}, {"total": 5}, {}],),
91
+ expected=9,
92
+ ),
93
+ CallCase(label="empty input", args=([],), expected=0),
94
+ ],
95
+ hidden_cases=[
96
+ CallCase(
97
+ label="skip missing totals",
98
+ args=([{}, {"total": "2"}, {"total": "8"}],),
99
+ expected=10,
100
+ ),
101
+ CallCase(
102
+ label="handle negative adjustments",
103
+ args=([{"total": "11"}, {"total": -3}],),
104
+ expected=8,
105
+ ),
106
+ ],
107
+ repo_summary=(
108
+ "services/billing/reconciliation.py computes end-of-day invoice totals for "
109
+ "a CPU-only batch job."
110
+ ),
111
+ changed_files=["services/billing/reconciliation.py"],
112
+ available_files=["services/billing/reconciliation.py", "tests/test_reconciliation.py"],
113
+ goal="Restore a compiling implementation for invoice total normalization.",
114
+ max_steps=6,
115
+ ),
116
+ ReviewTask(
117
+ task_id="bug_fix_session_windows",
118
+ title="Repair session window collapsing logic",
119
+ difficulty="medium",
120
+ task_kind="bug_fix",
121
+ task_description=(
122
+ "The session aggregator regressed after a cleanup pass. Public tests expose "
123
+ "incorrect boundary handling and the final session is missing. Fix the logic "
124
+ "without changing the function contract."
125
+ ),
126
+ starter_code=_code(
127
+ """
128
+ def collapse_sessions(events, idle_timeout_minutes):
129
+ if not events:
130
+ return []
131
+
132
+ sessions = []
133
+ current_start = events[0]["minute"]
134
+ current_end = current_start
135
+
136
+ for event in events[1:]:
137
+ minute = event["minute"]
138
+ if minute - current_end > idle_timeout_minutes:
139
+ sessions.append((current_start, current_end))
140
+ current_start = minute
141
+ current_end = minute
142
+
143
+ return sessions
144
+ """
145
+ ),
146
+ reference_code=_code(
147
+ '''
148
+ def collapse_sessions(events, idle_timeout_minutes):
149
+ """Collapse activity events into inclusive session windows."""
150
+ if not events:
151
+ return []
152
+
153
+ sessions = []
154
+ current_start = events[0]["minute"]
155
+ current_end = current_start
156
+
157
+ for event in events[1:]:
158
+ minute = event["minute"]
159
+ if minute - current_end >= idle_timeout_minutes:
160
+ sessions.append((current_start, current_end))
161
+ current_start = minute
162
+ current_end = minute
163
+
164
+ sessions.append((current_start, current_end))
165
+ return sessions
166
+ '''
167
+ ),
168
+ function_name="collapse_sessions",
169
+ visible_tests=[
170
+ "collapse_sessions([{'minute': 1}, {'minute': 3}, {'minute': 8}], 4) == [(1, 3), (8, 8)]",
171
+ "collapse_sessions([{'minute': 5}, {'minute': 9}], 4) == [(5, 5), (9, 9)]",
172
+ ],
173
+ public_cases=[
174
+ CallCase(
175
+ label="split when idle timeout is exceeded",
176
+ args=([{"minute": 1}, {"minute": 3}, {"minute": 8}], 4),
177
+ expected=[(1, 3), (8, 8)],
178
+ ),
179
+ CallCase(
180
+ label="boundary is inclusive",
181
+ args=([{"minute": 5}, {"minute": 9}], 4),
182
+ expected=[(5, 5), (9, 9)],
183
+ ),
184
+ ],
185
+ hidden_cases=[
186
+ CallCase(
187
+ label="single continuous session",
188
+ args=([{"minute": 2}, {"minute": 4}, {"minute": 5}], 4),
189
+ expected=[(2, 5)],
190
+ ),
191
+ CallCase(label="empty input", args=([], 10), expected=[]),
192
+ CallCase(
193
+ label="multiple boundaries",
194
+ args=([{"minute": 1}, {"minute": 5}, {"minute": 9}, {"minute": 14}], 4),
195
+ expected=[(1, 1), (5, 5), (9, 9), (14, 14)],
196
+ ),
197
+ ],
198
+ repo_summary=(
199
+ "analytics/sessionizer.py condenses sorted clickstream events into user "
200
+ "sessions for downstream retention reports."
201
+ ),
202
+ changed_files=["analytics/sessionizer.py"],
203
+ available_files=["analytics/sessionizer.py", "tests/test_sessionizer.py"],
204
+ goal="Make session collapsing match the expected timeout semantics.",
205
+ max_steps=8,
206
+ ),
207
+ ReviewTask(
208
+ task_id="optimization_rank_active_users",
209
+ title="Optimize the active-user ranking pipeline",
210
+ difficulty="hard",
211
+ task_kind="optimization",
212
+ task_description=(
213
+ "The reporting job is correct enough for small fixtures but too slow for the "
214
+ "daily production export. Preserve the API, keep the output deterministic, "
215
+ "and refactor the implementation for speed and readability."
216
+ ),
217
+ starter_code=_code(
218
+ """
219
+ def rank_active_users(events):
220
+ users = []
221
+ for event in events:
222
+ if event["status"] == "active":
223
+ found = False
224
+ for existing in users:
225
+ if existing == event["user_id"]:
226
+ found = True
227
+ if not found:
228
+ users.append(event["user_id"])
229
+
230
+ totals = []
231
+ for user in users:
232
+ count = 0
233
+ for event in events:
234
+ if event["status"] == "active" and event["user_id"] == user:
235
+ count = count + 1
236
+ totals.append((user, count))
237
+
238
+ totals.sort(key=lambda item: (-item[1], item[0]))
239
+ return totals
240
+ """
241
+ ),
242
+ reference_code=_code(
243
+ '''
244
+ from collections import Counter
245
+
246
+
247
+ def rank_active_users(events):
248
+ """Return users ranked by number of active events."""
249
+ counts = Counter(
250
+ event["user_id"]
251
+ for event in events
252
+ if event["status"] == "active"
253
+ )
254
+ return sorted(counts.items(), key=lambda item: (-item[1], item[0]))
255
+ '''
256
+ ),
257
+ function_name="rank_active_users",
258
+ visible_tests=[
259
+ "rank_active_users([{'user_id': 'b', 'status': 'active'}, {'user_id': 'a', 'status': 'active'}, {'user_id': 'b', 'status': 'inactive'}]) == [('a', 1), ('b', 1)]",
260
+ "rank_active_users([{'user_id': 'u1', 'status': 'active'}, {'user_id': 'u1', 'status': 'active'}, {'user_id': 'u2', 'status': 'active'}]) == [('u1', 2), ('u2', 1)]",
261
+ ],
262
+ public_cases=[
263
+ CallCase(
264
+ label="inactive events are ignored",
265
+ args=([{"user_id": "b", "status": "active"}, {"user_id": "a", "status": "active"}, {"user_id": "b", "status": "inactive"}],),
266
+ expected=[("a", 1), ("b", 1)],
267
+ ),
268
+ CallCase(
269
+ label="counts repeated active users",
270
+ args=([{"user_id": "u1", "status": "active"}, {"user_id": "u1", "status": "active"}, {"user_id": "u2", "status": "active"}],),
271
+ expected=[("u1", 2), ("u2", 1)],
272
+ ),
273
+ ],
274
+ hidden_cases=[
275
+ CallCase(
276
+ label="stable alphabetical tie-break",
277
+ args=([{"user_id": "u3", "status": "active"}, {"user_id": "u2", "status": "active"}, {"user_id": "u3", "status": "active"}, {"user_id": "u2", "status": "active"}],),
278
+ expected=[("u2", 2), ("u3", 2)],
279
+ ),
280
+ CallCase(label="empty input", args=([],), expected=[]),
281
+ CallCase(
282
+ label="mixed active and inactive states",
283
+ args=([{"user_id": "x", "status": "inactive"}, {"user_id": "x", "status": "active"}, {"user_id": "y", "status": "active"}, {"user_id": "x", "status": "active"}],),
284
+ expected=[("x", 2), ("y", 1)],
285
+ ),
286
+ ],
287
+ repo_summary=(
288
+ "reports/activity_rankings.py feeds a nightly export that runs on a small CPU "
289
+ "instance and has become too slow after customer growth."
290
+ ),
291
+ changed_files=["reports/activity_rankings.py"],
292
+ available_files=["reports/activity_rankings.py", "tests/test_activity_rankings.py"],
293
+ goal="Keep the output stable while improving runtime and code quality.",
294
+ max_steps=10,
295
+ benchmark_config={"user_pool": 240, "events_per_user": 36, "iterations": 8},
296
+ ),
297
+ ]
298
+
299
+ TASK_BY_ID = {task.task_id: task for task in TASKS}
300
+
301
+
302
+ def list_tasks() -> List[ReviewTask]:
303
+ """Return all supported tasks."""
304
+
305
+ return list(TASKS)
306
+
307
+
308
+ def get_task(task_id: str) -> ReviewTask:
309
+ """Fetch a task by identifier."""
310
+
311
+ try:
312
+ return TASK_BY_ID[task_id]
313
+ except KeyError as exc:
314
+ raise ValueError(f"Unknown task_id: {task_id}") from exc
315
+
316
+
317
+ def select_task(seed: Optional[int] = None, task_id: Optional[str] = None) -> ReviewTask:
318
+ """Select a task deterministically by explicit id or seed."""
319
+
320
+ if task_id:
321
+ return get_task(task_id)
322
+ if seed is None:
323
+ return TASKS[0]
324
+ return TASKS[seed % len(TASKS)]