guanning commited on
Commit
6cf60f3
·
verified ·
1 Parent(s): d9c474f

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +165 -0
  2. ERRATA.md +32 -0
  3. LICENSE +21 -0
  4. README.md +217 -0
  5. assets/images/contamination1.png +3 -0
  6. assets/images/contamination2.png +3 -0
  7. assets/images/lc_barchart.png +3 -0
  8. assets/images/lcb.png +3 -0
  9. assets/images/lcb_vs_he.png +3 -0
  10. assets/images/tasks_radar.png +3 -0
  11. eval_ckpts.sh +32 -0
  12. instruct.png +3 -0
  13. lcb_runner/benchmarks/__init__.py +13 -0
  14. lcb_runner/benchmarks/code_execution.py +67 -0
  15. lcb_runner/benchmarks/code_generation.py +147 -0
  16. lcb_runner/benchmarks/test_output_prediction.py +70 -0
  17. lcb_runner/evaluation/__init__.py +6 -0
  18. lcb_runner/evaluation/compute_code_execution_metrics.py +56 -0
  19. lcb_runner/evaluation/compute_code_generation_metrics.py +255 -0
  20. lcb_runner/evaluation/compute_scores.py +155 -0
  21. lcb_runner/evaluation/compute_test_output_prediction_metrics.py +100 -0
  22. lcb_runner/evaluation/old_results_check.py +73 -0
  23. lcb_runner/evaluation/pass_k_utils.py +66 -0
  24. lcb_runner/evaluation/testing_util.py +592 -0
  25. lcb_runner/evaluation/utils_execute.py +267 -0
  26. lcb_runner/lm_styles.py +920 -0
  27. lcb_runner/prompts/__init__.py +4 -0
  28. lcb_runner/prompts/code_execution.py +183 -0
  29. lcb_runner/prompts/code_generation.py +384 -0
  30. lcb_runner/prompts/few_shot_examples/generation/func.json +12 -0
  31. lcb_runner/prompts/few_shot_examples/generation/stdin.json +10 -0
  32. lcb_runner/prompts/self_repair.py +352 -0
  33. lcb_runner/prompts/test_output_prediction.py +311 -0
  34. lcb_runner/runner/base_runner.py +181 -0
  35. lcb_runner/runner/claude3_runner.py +65 -0
  36. lcb_runner/runner/claude_runner.py +51 -0
  37. lcb_runner/runner/cohere_runner.py +50 -0
  38. lcb_runner/runner/custom_evaluator.py +114 -0
  39. lcb_runner/runner/deepseek_runner.py +70 -0
  40. lcb_runner/runner/fireworks_runner.py +73 -0
  41. lcb_runner/runner/gemini_runner.py +101 -0
  42. lcb_runner/runner/grok_runner.py +72 -0
  43. lcb_runner/runner/main.py +240 -0
  44. lcb_runner/runner/mistral_runner.py +53 -0
  45. lcb_runner/runner/oai_runner.py +79 -0
  46. lcb_runner/runner/parser.py +166 -0
  47. lcb_runner/runner/runner_utils.py +60 -0
  48. lcb_runner/runner/scenario_router.py +217 -0
  49. lcb_runner/runner/together_runner.py +53 -0
  50. lcb_runner/runner/vllm_runner.py +63 -0
.gitignore ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ logs/
2
+ cache/
3
+ output/
4
+ !output/pass_at_k_results.json
5
+
6
+ # Byte-compiled / optimized / DLL files
7
+ __pycache__/
8
+ *.py[cod]
9
+ *$py.class
10
+
11
+ # C extensions
12
+ *.so
13
+
14
+ # Distribution / packaging
15
+ .Python
16
+ build/
17
+ develop-eggs/
18
+ dist/
19
+ downloads/
20
+ eggs/
21
+ .eggs/
22
+ lib/
23
+ lib64/
24
+ parts/
25
+ sdist/
26
+ var/
27
+ wheels/
28
+ share/python-wheels/
29
+ *.egg-info/
30
+ .installed.cfg
31
+ *.egg
32
+ MANIFEST
33
+
34
+ # PyInstaller
35
+ # Usually these files are written by a python script from a template
36
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
37
+ *.manifest
38
+ *.spec
39
+
40
+ # Installer logs
41
+ pip-log.txt
42
+ pip-delete-this-directory.txt
43
+
44
+ # Unit test / coverage reports
45
+ htmlcov/
46
+ .tox/
47
+ .nox/
48
+ .coverage
49
+ .coverage.*
50
+ .cache
51
+ nosetests.xml
52
+ coverage.xml
53
+ *.cover
54
+ *.py,cover
55
+ .hypothesis/
56
+ .pytest_cache/
57
+ cover/
58
+
59
+ # Translations
60
+ *.mo
61
+ *.pot
62
+
63
+ # Django stuff:
64
+ *.log
65
+ local_settings.py
66
+ db.sqlite3
67
+ db.sqlite3-journal
68
+
69
+ # Flask stuff:
70
+ instance/
71
+ .webassets-cache
72
+
73
+ # Scrapy stuff:
74
+ .scrapy
75
+
76
+ # Sphinx documentation
77
+ docs/_build/
78
+
79
+ # PyBuilder
80
+ .pybuilder/
81
+ target/
82
+
83
+ # Jupyter Notebook
84
+ .ipynb_checkpoints
85
+
86
+ # IPython
87
+ profile_default/
88
+ ipython_config.py
89
+
90
+ # pyenv
91
+ # For a library or package, you might want to ignore these files since the code is
92
+ # intended to run in multiple environments; otherwise, check them in:
93
+ # .python-version
94
+
95
+ # pipenv
96
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
97
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
98
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
99
+ # install all needed dependencies.
100
+ #Pipfile.lock
101
+
102
+ # poetry
103
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
104
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
105
+ # commonly ignored for libraries.
106
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
107
+ #poetry.lock
108
+
109
+ # pdm
110
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
111
+ #pdm.lock
112
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
113
+ # in version control.
114
+ # https://pdm.fming.dev/#use-with-ide
115
+ .pdm.toml
116
+
117
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
118
+ __pypackages__/
119
+
120
+ # Celery stuff
121
+ celerybeat-schedule
122
+ celerybeat.pid
123
+
124
+ # SageMath parsed files
125
+ *.sage.py
126
+
127
+ # Environments
128
+ .env
129
+ .venv
130
+ env/
131
+ venv/
132
+ ENV/
133
+ env.bak/
134
+ venv.bak/
135
+
136
+ # Spyder project settings
137
+ .spyderproject
138
+ .spyproject
139
+
140
+ # Rope project settings
141
+ .ropeproject
142
+
143
+ # mkdocs documentation
144
+ /site
145
+
146
+ # mypy
147
+ .mypy_cache/
148
+ .dmypy.json
149
+ dmypy.json
150
+
151
+ # Pyre type checker
152
+ .pyre/
153
+
154
+ # pytype static type analyzer
155
+ .pytype/
156
+
157
+ # Cython debug symbols
158
+ cython_debug/
159
+
160
+ # PyCharm
161
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
162
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
163
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
164
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
165
+ #.idea/
ERRATA.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Livecodebench Errata
2
+ While we have tries to ensure the correctness of the benchmark in terms of test cases and problems, we have received feedback on issues regarding erroneous tests and problems not amenable to autograding. Here, we document the known issues and are also constantly using this feedback to improve our problem selection heuristics as we update LiveCodeBench.
3
+
4
+ ## Multiple Solutions Accepted
5
+ 7 problems have been identified with test case issues. Particularly, these problems accept multiple possible outputs, while the benchmark grades for only one specific output. Thus some correct solutions may be marked as incorrect and can add noise to the benchmark results.
6
+
7
+ 1. abc311_c - Multiple solutions accepted
8
+ 2. abc326_d - Multiple solutions accepted
9
+ 3. abc327_b - Multiple solutions accepted
10
+ 4. abc333_e - Multiple solutions accepted
11
+ 5. abc343_e - Multiple solutions accepted
12
+ 6. abc362_c - Multiple solutions accepted
13
+ 7. find-words-containing-character - Multiple solutions accepted
14
+ 8. find-the-peaks - Multiple solutions accepted
15
+ 10. generate-binary-strings-without-adjacent-zeros - Multiple solutions accepted
16
+ 11. arc185_c - Multiple solutions accepted
17
+ 12. abc343_a - Multiple solutions accepted
18
+
19
+
20
+ ## Interactive Problems
21
+ 2 problems have been identified as interactive problems. These problems require the submission to interact with the judge to get the final answer. The benchmark evaluation suite does not support interactive problems and thus these problems cannot be solved correctly. Note that these problems will not affect model comparison results since no model can solve these problems.
22
+
23
+ 1. abc337_e - Interactive problem
24
+ 2. abc355_e - Interactive problem
25
+
26
+ ## Erroneous Test Cases
27
+ 1 problem has been identified with erroneous test cases during scraping. This problem cannot be solved correctly with the current test cases. Note that these problems will not affect model comparison results since no model can solve these problems.
28
+
29
+ 1. abc350_c - Erroneous test cases
30
+ 2. apply-operations-to-make-string-empty - Erroneous test case of empty string
31
+ 3. most-frequent-ids - Adversarian input not following constraints
32
+ 4. arc189_a - Single erroneous test case with incorrect output
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 LiveCodeBench
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LiveCodeBench
2
+ Official repository for the paper "LiveCodeBench: Holistic and Contamination Free Evaluation of Large Language Models for Code"
3
+
4
+ <p align="center">
5
+ <a href="https://livecodebench.github.io/">🏠 Home Page</a> •
6
+ <a href="https://huggingface.co/livecodebench/">💻 Data </a> •
7
+ <a href="https://livecodebench.github.io/leaderboard.html">🏆 Leaderboard</a> •
8
+ <a href="https://huggingface.co/spaces/livecodebench/code_generation_samples">🔍 Explorer</a>
9
+ </p>
10
+
11
+ ## Introduction
12
+ LiveCodeBench provides holistic and contamination-free evaluation of coding capabilities of LLMs. Particularly, LiveCodeBench continuously collects new problems over time from contests across three competition platforms -- LeetCode, AtCoder, and CodeForces. Next, LiveCodeBench also focuses on a broader range of code-related capabilities, such as self-repair, code execution, and test output prediction, beyond just code generation. Currently, LiveCodeBench hosts four hundred high-quality coding problems that were published between May 2023 and March 2024.
13
+
14
+
15
+ ## Installation
16
+ You can clone the repository using the following command:
17
+
18
+ ```bash
19
+ git clone https://github.com/LiveCodeBench/LiveCodeBench.git
20
+ cd LiveCodeBench
21
+ ```
22
+
23
+ We recommend using [uv](https://github.com/astral-sh/uv)
24
+ for managing dependencies, which can be installed a [number of ways](https://github.com/astral-sh/uv?tab=readme-ov-file#installation).
25
+
26
+ Verify that `uv` is installed on your system by running:
27
+
28
+ ```bash
29
+ uv --version
30
+ ```
31
+
32
+ Once `uv` has been installed, use it to create a virtual environment for
33
+ LiveCodeBench and install its dependencies with the following commands:
34
+
35
+ ```bash
36
+ uv venv --python 3.11
37
+ source .venv/bin/activate
38
+
39
+ uv pip install -e .
40
+ ```
41
+
42
+ ## Data
43
+ We provide a benchmark for different code capability scenarios
44
+ - [Code Generation](https://huggingface.co/datasets/livecodebench/code_generation_lite)
45
+ - [Code Execution](https://huggingface.co/datasets/livecodebench/execution)
46
+ - [Test Output Prediction](https://huggingface.co/datasets/livecodebench/test_generation)
47
+
48
+ ## Inference and Evaluation
49
+
50
+ ### Dataset Versions
51
+ Since LiveCodeBench is a continuously updated benchmark, we provide different versions of the dataset. Particularly, we provide the following versions of the dataset:
52
+ - `release_v1`: The initial release of the dataset with problems released between May 2023 and Mar 2024 containing 400 problems.
53
+ - `release_v2`: The updated release of the dataset with problems released between May 2023 and May 2024 containing 511 problems.
54
+ - `release_v3`: The updated release of the dataset with problems released between May 2023 and Jul 2024 containing 612 problems.
55
+ - `release_v4`: The updated release of the dataset with problems released between May 2023 and Sep 2024 containing 713 problems.
56
+ - `release_v5`: The updated release of the dataset with problems released between May 2023 and Jan 2025 containing 880 problems.
57
+ - `release_v6`: The updated release of the dataset with problems released between May 2023 and Apr 2025 containing 1055 problems.
58
+
59
+ You can use the `--release_version` flag to specify the dataset version you wish to use. Particularly, you can use the following command to run the evaluation on the `release_v2` dataset. Release version defaults to `release_latest`. Additionally, we have introduced fine-grained release versions such as `v1`, `v2`, `v1_v3`, `v4_v5` for specific versions of the dataset.
60
+
61
+ ```bash
62
+ python -m lcb_runner.runner.main --model {model_name} --scenario codegeneration --evaluate --release_version release_v2
63
+ ```
64
+
65
+ ### Code Generation
66
+
67
+ We use `vllm` for inference using open models. By default, we use `tensor_parallel_size=${num_gpus}` to parallelize inference across all available GPUs. It can be configured using the `--tensor_parallel_size` flag as required.
68
+
69
+ For running the inference, please provide the `model_name` based on the [./lcb_runner/lm_styles.py](./lcb_runner/lm_styles.py) file.
70
+ The scenario (here `codegeneration`) can be used to specify the scenario for the model.
71
+
72
+ ```bash
73
+ python -m lcb_runner.runner.main --model {model_name} --scenario codegeneration
74
+ ```
75
+
76
+ Additionally, `--use_cache` flag can be used to cache the generated outputs and `--continue_existing` flag can be used to use the existing dumped results. In case you wish to use model from a local path, you can additionally provide `--local_model_path` flag with the path to the model. We use `n=10` and `temperature=0.2` for generation. Please check the [./lcb_runner/runner/parser.py](./lcb_runner/runner/parser.py) file for more details on the flags.
77
+
78
+ For closed API models, `--multiprocess` flag can be used to parallelize queries to API servers (adjustable according to rate limits).
79
+
80
+
81
+ #### Evaluation
82
+ We compute `pass@1` and `pass@5` metrics for model evaluations.
83
+ We use a modified version of the checker released with the [`apps` benchmark](https://github.com/hendrycks/apps/blob/main/eval/testing_util.py) to compute the metrics. Particularly, we identified some unhandled edge cases in the original checker and fixed them and additionally simplified the checker based on our collected dataset. To run the evaluation, you can add the `--evaluate` flag:
84
+
85
+
86
+ ```bash
87
+ python -m lcb_runner.runner.main --model {model_name} --scenario codegeneration --evaluate
88
+ ```
89
+
90
+ Note that time limits can cause slight (`< 0.5`) points of variation in the computation of the `pass@1` and `pass@5` metrics.
91
+ If you observe a significant variation in performance, adjust the `--num_process_evaluate` flag to a lower value or increase the `--timeout` flag. Please report particular issues caused by improper timeouts here.
92
+
93
+ Finally, to get scores over different time windows, you can use [./lcb_runner/evaluation/compute_scores.py](./lcb_runner/evaluation/compute_scores.py) file.
94
+ Particularly, you can provide `--start_date` and `--end_date` flags (using the `YYYY-MM-DD` format) to get scores over the specified time window. In our paper, to counter contamination in the DeepSeek models, we only report results on problems released after August 2023. You can replicate those evaluations using:
95
+
96
+ ```bash
97
+ python -m lcb_runner.evaluation.compute_scores --eval_all_file {saved_eval_all_file} --start_date 2023-09-01
98
+ ```
99
+
100
+ **NOTE: We have pruned a large number of test cases from the original benchmark and created `code_generation_lite` which is set as the default benchmark offering similar performance estimation much faster. If you wish to use the original benchmark, please use the `--not_fast` flag. We are in the process of updating the leaderboard scores with this updated setting.**
101
+
102
+ **NOTE: V2 Update: to run the update LiveCodeBench please use `--release_version release_v2`. In addition, if you have existing results from `release_v1` you can add `--continue_existing` or better `--continue_existing_with_eval` flags to reuse the old completions or evaluations respectively.**
103
+
104
+
105
+ ### Self Repair
106
+ For running self repair, you need to provide an additional `--codegen_n` flag that maps to the number of codes that were generated during code generation. Additionally, the `--temperature` flag is used to resolve the old code generation eval file which must be present in the `output` directory.
107
+
108
+ ```bash
109
+ python -m lcb_runner.runner.main --model {model_name --scenario selfrepair --codegen_n {num_codes_codegen} --n 1 # only n=1 supported
110
+ ```
111
+
112
+ In case you have results on a smaller subset or version of the benchmark, you can use `--continue_existing` and `--continue_existing_with_eval` flags to reuse the old computations. Particularly, you can run the following command to continue from existing generated solutions.
113
+
114
+ ```bash
115
+ python -m lcb_runner.runner.main --model {model_name} --scenario selfrepair --evaluate --continue_existing
116
+ ```
117
+
118
+ Note that this will only reuse the generated samples and rerun evaluations. To reuse the old evaluations, you can add the `--continue_existing_with_eval` flag.
119
+
120
+ ### Test Output Prediction
121
+ For running the test output prediction scenario you can simply run
122
+
123
+ ```bash
124
+ python -m lcb_runner.runner.main --model {model_name} --scenario testoutputprediction --evaluate
125
+ ```
126
+
127
+ ### Code Execution
128
+ For running the test output prediction scenario you can simply run
129
+
130
+ ```bash
131
+ python -m lcb_runner.runner.main --model {model_name} --scenario codeexecution --evaluate
132
+ ```
133
+
134
+ Additionally, we support the COT setting with
135
+
136
+ ```bash
137
+ python -m lcb_runner.runner.main --model {model_name} --scenario codeexecution --cot_code_execution --evaluate
138
+ ```
139
+
140
+ ## Custom Evaluation
141
+ Alternatively, you can using [`lcb_runner/runner/custom_evaluator.py`](./lcb_runner/runner/custom_evaluator.py) to directly evaluated model generations in a custom file. The file should contain a list of model outputs, appropriately formatted for evaluation in the order of benchmark problems.
142
+
143
+ ```bash
144
+ python -m lcb_runner.runner.custom_evaluator --custom_output_file {path_to_custom_outputs}
145
+ ```
146
+
147
+ Particularly, arrange the outputs in the following format
148
+
149
+ ```json
150
+ [
151
+ {"question_id": "id1", "code_list": ["code1", "code2"]},
152
+ {"question_id": "id2", "code_list": ["code1", "code2"]}
153
+ ]
154
+ ```
155
+
156
+
157
+ ## Adding Support for New Models
158
+
159
+ To add support for new models, we have implemented an extensible framework to add new models and customize prompts appropriately.
160
+
161
+ Step 1: Add a new model to the [./lcb_runner/lm_styles.py](./lcb_runner/lm_styles.py) file. Particularly, extend the `LMStyle` class to add a new model family and extend the model to the `LanguageModelList` array.
162
+
163
+ Step 2: Since we use instruction tuned models, we allow configuring the instruction for each model. Modify the [./lcb_runner/prompts/generation.py](./lcb_runner/prompts/generation.py) file to add a new prompt for the model in the `format_prompt_generation` function.
164
+ For example, the prompt for `DeepSeekCodeInstruct` family of models looks as follows
165
+
166
+ ```python
167
+ # ./lcb_runner/prompts/generation.py
168
+ if LanguageModelStyle == LMStyle.DeepSeekCodeInstruct:
169
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_DEEPSEEK}\n\n"
170
+ prompt += f"{get_deepseekcode_question_template_answer(question)}"
171
+ return prompt
172
+ ```
173
+
174
+ ## Submit Models to Leaderboard
175
+ We are currently only accepting submissions for only the code generation scenario. To submit models you can create a pull request on our [submissions](https://github.com/LiveCodeBench/submissions). Particularly, you can copy your model generations folder from `output` to the `submissions` folder and create a pull request. We will review the submission and add the model to the leaderboard accordingly.
176
+
177
+ ## ERRATA
178
+ We maintain a list of known issues and updates in the [ERRATA.md](./ERRATA.md) file. Particularly, we document issues regarding erroneous tests and problems not amenable to autograding. We are constantly using this feedback to improve our problem selection heuristics as we update LiveCodeBench.
179
+
180
+ ## Results
181
+ LiveCodeBench can be used to evaluate performance of LLMs on different time-windows (using problem release date to filter the models).
182
+ Thus we can detect and prevent potential contamination in the evaluation process and evaluate LLMs on _new_ problems.
183
+
184
+ <div style="text-align: center;">
185
+ <img src="./assets/images/contamination1.png" alt="Code Generation Live Evaluation" class="teaser-image"
186
+ width="40%" />
187
+ <img src="./assets/images/contamination2.png" alt="Test Output Prediction Live Evaluation" class="teaser-image"
188
+ width="40%" />
189
+ </div>
190
+
191
+ Next, we evaluate models on different code capabilities and find that relative performances of models do change over tasks (left).
192
+ Thus, it highlights the need for holistic evaluation of LLMs for code.
193
+
194
+ <div style="text-align: center;">
195
+ <img src="./assets/images/tasks_radar.png" alt="Holistic Tasks Evaluation" class="teaser-image"
196
+ width="36.1%" />
197
+ <img src="./assets/images/lcb_vs_he.png" alt="Comparing LCB vs HumanEval" class="teaser-image"
198
+ width="46%" />
199
+ </div>
200
+
201
+ We also find evidence of possible overfitting on HumanEval (right).
202
+ Particularly, models that perform well on HumanEval do not necessarily perform well on LiveCodeBench.
203
+ In the scatterplot above, we find the models get clustered into two groups, shaded in red and green.
204
+ The red group contains models that perform well on HumanEval but poorly on LiveCodeBench, while the green group contains models that perform well on both.
205
+
206
+ For more details, please refer to our website at [livecodebench.github.io](https://livecodebench.github.io).
207
+
208
+ ## Citation
209
+
210
+ ```bibtex
211
+ @article{jain2024livecodebench,
212
+ author = {Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, Ion Stoica},
213
+ title = {LiveCodeBench: Holistic and Contamination Free Evaluation of Large Language Models for Code},
214
+ year = {2024},
215
+ journal = {arXiv preprint},
216
+ }
217
+ ```
assets/images/contamination1.png ADDED

Git LFS Details

  • SHA256: 80a6f89eb9080b0e5d8f8a1c13c0a391d7eb6950c21b73c4fd456b5c79fa8c16
  • Pointer size: 131 Bytes
  • Size of remote file: 184 kB
assets/images/contamination2.png ADDED

Git LFS Details

  • SHA256: d84f769aec86bc1d3b9a456220e0b10d6819370be8c5a323af3963cf525e9263
  • Pointer size: 131 Bytes
  • Size of remote file: 170 kB
assets/images/lc_barchart.png ADDED

Git LFS Details

  • SHA256: fae76b79fb175c282bc759b543dbba6770a64aa0fcb9d53dd61d465a7f6a96a0
  • Pointer size: 131 Bytes
  • Size of remote file: 219 kB
assets/images/lcb.png ADDED

Git LFS Details

  • SHA256: 247f3c0a7d343a22056527cb38228202dce5f3dd6890351330ed26ea1af05955
  • Pointer size: 131 Bytes
  • Size of remote file: 782 kB
assets/images/lcb_vs_he.png ADDED

Git LFS Details

  • SHA256: c1a7ee977370b6ba520579cf65d161ff7579f19576e399d842001d6640c98966
  • Pointer size: 131 Bytes
  • Size of remote file: 794 kB
assets/images/tasks_radar.png ADDED

Git LFS Details

  • SHA256: 7525e72296171ce8345143a2aec658aef0205676c59a9ebcd76ef4e9e115eda1
  • Pointer size: 132 Bytes
  • Size of remote file: 1.33 MB
eval_ckpts.sh ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ BASE="/data/repos/agentica-rllm/scripts/train/deepcoder/checkpoints/deepcoder-1.7b"
5
+
6
+ run_eval() {
7
+ local model_path="$1"
8
+ local nickname="$2"
9
+ echo "Running eval: $nickname"
10
+ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python -m lcb_runner.runner.main \
11
+ --model "$model_path" \
12
+ --nickname "$nickname" \
13
+ --scenario codegeneration \
14
+ --evaluate \
15
+ --release_version v4_v6 \
16
+ --n 256 \
17
+ --temperature 0.6 \
18
+ --top_p 0.95 \
19
+ --tensor_parallel_size 8 \
20
+ --num_process_evaluate 60
21
+ }
22
+
23
+
24
+
25
+ # run_eval "$BASE/adv:maxrl-cont:false/global_step_100/actor/checkpoint" maxrl_binary_100
26
+ # run_eval "$BASE/adv:tailrl-cont:true/global_step_100/actor/checkpoint" tailrl_cont_100
27
+ # run_eval "$BASE/adv:maxrl-cont:false/global_step_200/actor/checkpoint" maxrl_binary_200
28
+ # run_eval "$BASE/adv:tailrl-cont:true/global_step_200/actor/checkpoint" tailrl_cont_200
29
+ # run_eval "$BASE/adv:maxrl-cont:false/global_step_300/actor/checkpoint" maxrl_binary_300
30
+ # run_eval "$BASE/adv:tailrl-cont:true/global_step_300/actor/checkpoint" tailrl_cont_300
31
+ run_eval "$BASE/adv:maxrl-cont:false/global_step_400/actor/checkpoint" maxrl_binary_400
32
+ run_eval "$BASE/adv:tailrl-cont:true/global_step_400/actor/checkpoint" tailrl_cont_400
instruct.png ADDED

Git LFS Details

  • SHA256: a1d76eb18c477711c99e9e65203d7ad63fc574765af4a429bdb37d690c1d8741
  • Pointer size: 130 Bytes
  • Size of remote file: 83.4 kB
lcb_runner/benchmarks/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lcb_runner.benchmarks.code_generation import (
2
+ CodeGenerationProblem,
3
+ load_code_generation_dataset,
4
+ load_code_generation_dataset_not_fast,
5
+ )
6
+ from lcb_runner.benchmarks.test_output_prediction import (
7
+ TestOutputPredictionProblem,
8
+ load_test_prediction_dataset,
9
+ )
10
+ from lcb_runner.benchmarks.code_execution import (
11
+ CodeExecutionProblem,
12
+ load_code_execution_dataset,
13
+ )
lcb_runner/benchmarks/code_execution.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from enum import Enum
3
+ from datetime import datetime
4
+ from dataclasses import dataclass
5
+
6
+ from datasets import load_dataset
7
+
8
+
9
+ @dataclass
10
+ class CodeExecutionProblem:
11
+ question_id: str
12
+ contest_id: str
13
+ contest_date: datetime
14
+ difficulty: str
15
+ function_name: str
16
+ code: str
17
+ input: str
18
+ output: str
19
+ id: str
20
+ problem_id: str
21
+ numsteps: int
22
+
23
+ def __post_init__(self):
24
+ pass
25
+
26
+ def insert_output(self, output_list: list[str], pred_list: list[str]) -> dict:
27
+ return {
28
+ "question_id": self.question_id,
29
+ "contest_id": self.contest_id,
30
+ "contest_date": self.contest_date.isoformat(),
31
+ "difficulty": self.difficulty,
32
+ "function_name": self.function_name,
33
+ "code": self.code,
34
+ "input": self.input,
35
+ "output": self.output,
36
+ "id": self.id,
37
+ "problem_id": self.problem_id,
38
+ "numsteps": self.numsteps,
39
+ "output_list": output_list,
40
+ "pred_list": pred_list,
41
+ }
42
+
43
+ def insert_output_evaluation(
44
+ self, output_list: list[str], code_list: list[str], graded_list: list[bool]
45
+ ) -> dict:
46
+ output = self.insert_output(output_list, code_list)
47
+ output["graded_list"] = graded_list
48
+ output["pass@1"] = graded_list.count(True) / len(graded_list)
49
+ return output
50
+
51
+ def get_evaluation_sample(self) -> dict:
52
+ return {
53
+ "code": self.code,
54
+ "input": self.input,
55
+ "output": self.output,
56
+ }
57
+
58
+
59
+ def load_code_execution_dataset(release_version="release_v1") -> list[CodeExecutionProblem]:
60
+ dataset = load_dataset("livecodebench/execution-v2", split="test")
61
+ dataset = [CodeExecutionProblem(**p) for p in dataset] # type: ignore
62
+ print(f"Loaded {len(dataset)} problems")
63
+ return dataset
64
+
65
+
66
+ if __name__ == "__main__":
67
+ dataset = load_code_execution_dataset()
lcb_runner/benchmarks/code_generation.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import zlib
3
+ import pickle
4
+ import base64
5
+ from enum import Enum
6
+ from datetime import datetime
7
+ from dataclasses import dataclass
8
+
9
+ from datasets import load_dataset
10
+
11
+
12
+ class Platform(Enum):
13
+ LEETCODE = "leetcode"
14
+ CODEFORCES = "codeforces"
15
+ ATCODER = "atcoder"
16
+
17
+
18
+ class Difficulty(Enum):
19
+ EASY = "easy"
20
+ MEDIUM = "medium"
21
+ HARD = "hard"
22
+
23
+
24
+ class TestType(Enum):
25
+ STDIN = "stdin"
26
+ FUNCTIONAL = "functional"
27
+
28
+
29
+ @dataclass
30
+ class Test:
31
+ input: str
32
+ output: str
33
+ testtype: TestType
34
+
35
+ def __post_init__(self):
36
+ self.testtype = TestType(self.testtype)
37
+ # if self.testtype == TestType.FUNCTIONAL:
38
+ # self.input = json.loads(self.input)
39
+ # self.output = json.loads(self.output)
40
+
41
+
42
+ @dataclass
43
+ class CodeGenerationProblem:
44
+ question_title: str
45
+ question_content: str
46
+ platform: Platform
47
+ question_id: str
48
+ contest_id: str
49
+ contest_date: datetime
50
+ starter_code: str
51
+ difficulty: Difficulty
52
+ public_test_cases: list[Test]
53
+ private_test_cases: list[Test]
54
+ metadata: dict
55
+
56
+ def __post_init__(self):
57
+ self.platform = Platform(self.platform)
58
+ self.difficulty = Difficulty(self.difficulty)
59
+ self.contest_date = datetime.fromisoformat(self.contest_date)
60
+
61
+ self.public_test_cases = json.loads(self.public_test_cases) # type: ignore
62
+ self.public_test_cases = [Test(**t) for t in self.public_test_cases]
63
+
64
+ try:
65
+ self.private_test_cases = json.loads(self.private_test_cases) # type: ignore
66
+ except:
67
+ self.private_test_cases = json.loads(
68
+ pickle.loads(
69
+ zlib.decompress(
70
+ base64.b64decode(self.private_test_cases.encode("utf-8")) # type: ignore
71
+ )
72
+ )
73
+ ) # type: ignore
74
+ self.private_test_cases = [Test(**t) for t in self.private_test_cases]
75
+
76
+ self.metadata = json.loads(self.metadata) # type: ignore
77
+
78
+ def insert_output(self, output_list: list[str], code_list: list[str]) -> dict:
79
+ return {
80
+ "question_title": self.question_title,
81
+ "question_content": self.question_content,
82
+ "platform": self.platform.value,
83
+ "question_id": self.question_id,
84
+ "contest_id": self.contest_id,
85
+ "contest_date": self.contest_date.isoformat(),
86
+ "starter_code": self.starter_code,
87
+ "difficulty": self.difficulty.value,
88
+ "output_list": output_list,
89
+ "code_list": code_list,
90
+ }
91
+
92
+ def insert_output_evaluation(
93
+ self,
94
+ output_list: list[str],
95
+ code_list: list[str],
96
+ graded_list: list[bool],
97
+ **kwargs,
98
+ ) -> dict:
99
+ output = self.insert_output(output_list, code_list)
100
+ output["graded_list"] = graded_list
101
+ output["pass@1"] = graded_list.count(True) / len(graded_list)
102
+ for k, v in kwargs.items():
103
+ output[k] = v
104
+ return output
105
+
106
+ def get_evaluation_sample(self):
107
+ return {
108
+ "input_output": json.dumps(
109
+ {
110
+ "inputs": [
111
+ t.input
112
+ for t in self.public_test_cases + self.private_test_cases
113
+ ],
114
+ "outputs": [
115
+ t.output
116
+ for t in self.public_test_cases + self.private_test_cases
117
+ ],
118
+ "fn_name": self.metadata.get("func_name", None),
119
+ }
120
+ ),
121
+ }
122
+
123
+
124
+ def load_code_generation_dataset(release_version="release_v1", start_date=None, end_date=None) -> list[CodeGenerationProblem]:
125
+ dataset = load_dataset("livecodebench/code_generation_lite", split="test", version_tag=release_version, trust_remote_code=True)
126
+ dataset = [CodeGenerationProblem(**p) for p in dataset] # type: ignore
127
+ if start_date is not None:
128
+ p_start_date = datetime.strptime(start_date, "%Y-%m-%d")
129
+ dataset = [e for e in dataset if p_start_date <= e.contest_date]
130
+
131
+ if end_date is not None:
132
+ p_end_date = datetime.strptime(end_date, "%Y-%m-%d")
133
+ dataset = [e for e in dataset if e.contest_date <= p_end_date]
134
+
135
+ print(f"Loaded {len(dataset)} problems")
136
+ return dataset
137
+
138
+
139
+ def load_code_generation_dataset_not_fast(release_version="release_v1") -> list[CodeGenerationProblem]:
140
+ dataset = load_dataset("livecodebench/code_generation", split="test")
141
+ dataset = [CodeGenerationProblem(**p) for p in dataset] # type: ignore
142
+ print(f"Loaded {len(dataset)} problems")
143
+ return dataset
144
+
145
+
146
+ if __name__ == "__main__":
147
+ dataset = load_code_generation_dataset()
lcb_runner/benchmarks/test_output_prediction.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from enum import Enum
3
+ from datetime import datetime
4
+ from dataclasses import dataclass
5
+
6
+ from datasets import load_dataset
7
+
8
+
9
+ @dataclass
10
+ class Test:
11
+ input: str
12
+ output: str
13
+ testtype: str
14
+
15
+
16
+ @dataclass
17
+ class TestOutputPredictionProblem:
18
+ question_title: str
19
+ question_content: str
20
+ question_id: str
21
+ contest_id: str
22
+ contest_date: datetime
23
+ difficulty: str
24
+ test: list[Test]
25
+ starter_code: str
26
+ function_name: str
27
+ test_id: int
28
+
29
+ def __post_init__(self):
30
+ self.test = [Test(**t) for t in json.loads(self.test)] # type: ignore
31
+
32
+ def insert_output(self, output_list: list[str], pred_list: list[str]) -> dict:
33
+ return {
34
+ "question_title": self.question_title,
35
+ "question_content": self.question_content,
36
+ "question_id": self.question_id,
37
+ "contest_id": self.contest_id,
38
+ "contest_date": self.contest_date.isoformat(),
39
+ "difficulty": self.difficulty,
40
+ "output_list": output_list,
41
+ "pred_list": pred_list,
42
+ "test_id": self.test_id,
43
+ "function_name": self.function_name,
44
+ "starter_code": self.starter_code,
45
+ }
46
+
47
+ def insert_output_evaluation(
48
+ self, output_list: list[str], code_list: list[str], graded_list: list[bool]
49
+ ) -> dict:
50
+ output = self.insert_output(output_list, code_list)
51
+ output["graded_list"] = graded_list
52
+ output["pass@1"] = graded_list.count(True) / len(graded_list)
53
+ return output
54
+
55
+ def get_evaluation_sample(self) -> dict:
56
+ return {
57
+ "input": self.question_content,
58
+ "output": self.test[0].output,
59
+ }
60
+
61
+
62
+ def load_test_prediction_dataset(release_version="release_v1") -> list[TestOutputPredictionProblem]:
63
+ dataset = load_dataset("livecodebench/test_generation", split="test") # type: ignore
64
+ dataset = [TestOutputPredictionProblem(**d) for d in dataset]
65
+ print(f"Loaded {len(dataset)} prediction problems")
66
+ return dataset
67
+
68
+
69
+ if __name__ == "__main__":
70
+ dataset = load_test_prediction_dataset()
lcb_runner/evaluation/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from lcb_runner.evaluation.compute_code_generation_metrics import codegen_metrics
2
+ from lcb_runner.evaluation.compute_code_execution_metrics import code_execution_metrics
3
+ from lcb_runner.evaluation.compute_test_output_prediction_metrics import (
4
+ test_output_metrics,
5
+ )
6
+ from lcb_runner.evaluation.pass_k_utils import extract_instance_results
lcb_runner/evaluation/compute_code_execution_metrics.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from concurrent.futures import ProcessPoolExecutor
3
+ import tqdm
4
+
5
+ from lcb_runner.evaluation.utils_execute import BASE_IMPORTS, check_correctness
6
+
7
+ def evaluate_score(args) -> list[bool]:
8
+ gs, (c, i, o) = args
9
+
10
+ execution_results = []
11
+ for g in gs:
12
+ if i in g:
13
+ pass
14
+ else:
15
+ code_to_execute = f"{BASE_IMPORTS}\n{c}\nassert {o} == {g}"
16
+ execution_results.append(check_correctness(code_to_execute, 3))
17
+ if len(execution_results) == 0:
18
+ execution_results = [False] * len(gs)
19
+ return execution_results
20
+
21
+ def pass_at_k(n, c, k):
22
+ if n - c < k: return 1.0
23
+ return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
24
+
25
+ def code_execution_metrics(
26
+ samples,
27
+ generations,
28
+ ):
29
+ # execute the code
30
+ references = [(doc["code"], doc["input"], doc["output"]) for doc in samples]
31
+ with ProcessPoolExecutor() as executor:
32
+ args_list = zip(generations, references)
33
+ results = executor.map(evaluate_score, args_list)
34
+ all_results = list(results)
35
+
36
+ # serial version
37
+ # all_results = []
38
+ # for i in range(len(generations)):
39
+ # generation = generations[i]
40
+ # result = evaluate_score([generation, references[i]])
41
+ # all_results.append(result)
42
+
43
+ # compute pass@1
44
+ pass_at_1s = []
45
+ for execution_result in all_results:
46
+ c, n = execution_result.count(True), len(execution_result)
47
+ pass_at_1s.append(pass_at_k(n, c, 1))
48
+ metrics = {"pass@1": sum(pass_at_1s) / len(pass_at_1s) * 100}
49
+
50
+ results = {}
51
+ for i, r in enumerate(all_results):
52
+ r_new = []
53
+ for _r in r:
54
+ r_new.append([_r])
55
+ results[i] = r_new
56
+ return [metrics, results]
lcb_runner/evaluation/compute_code_generation_metrics.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # borrowed and extended from
2
+ # https://github.com/Naman-ntc/codescratch/blob/main/evaluation/bigcode-evaluation-harness/lm_eval/tasks/custom_metrics/apps_custom_metrics/utils.py
3
+
4
+ import os
5
+ import sys
6
+
7
+ sys.set_int_max_str_digits(50000)
8
+
9
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
10
+ import json
11
+ import multiprocessing
12
+ from collections import defaultdict
13
+ from concurrent.futures import ProcessPoolExecutor, as_completed
14
+
15
+
16
+ import numpy as np
17
+ from tqdm import tqdm
18
+
19
+ from lcb_runner.evaluation.testing_util import run_test
20
+ from lcb_runner.evaluation.pass_k_utils import compute_metrics_from_results
21
+
22
+
23
+ def _temp_run(sample, generation, debug, result, metadata_list, timeout):
24
+ res, metadata = run_test(sample, test=generation, debug=debug, timeout=timeout)
25
+ result.append(res)
26
+ metadata_list.append(metadata)
27
+
28
+
29
+ def check_correctness(sample, generation, timeout, debug=True):
30
+ """Check correctness of code generation with a global timeout.
31
+ The global timeout is to catch some extreme/rare cases not handled by the timeouts
32
+ inside `run_test`"""
33
+
34
+ manager = multiprocessing.Manager()
35
+ result = manager.list()
36
+ metadata_list = manager.list()
37
+ p = multiprocessing.Process(
38
+ target=_temp_run,
39
+ args=(sample, generation, debug, result, metadata_list, timeout),
40
+ )
41
+ p.start()
42
+ p.join(
43
+ timeout=(timeout + 1) * len(json.loads(sample["input_output"])["inputs"]) + 5
44
+ )
45
+ try:
46
+ os.kill(p.pid, 9) # Force to kill the process by PID for safety
47
+ except:
48
+ None
49
+ if p.is_alive():
50
+ p.kill()
51
+ if not result:
52
+ in_outs = json.loads(sample["input_output"])
53
+ # consider that all tests failed
54
+ result = [[-1 for i in range(len(in_outs["inputs"]))]]
55
+ if debug:
56
+ print(f"global timeout")
57
+ return result[0], metadata_list[0]
58
+
59
+
60
+ def evaluate_generations_by_problem(args):
61
+ problem_generations: list[str] = args[0]
62
+ sample = args[1]
63
+ debug: bool = args[2]
64
+ timeout: int = args[3]
65
+
66
+ res = []
67
+ metadata = []
68
+ for o_idx, o in enumerate(problem_generations):
69
+ curr_res = [-2]
70
+ try:
71
+ curr_res, curr_metadata = check_correctness(
72
+ sample, o, timeout=timeout, debug=debug
73
+ )
74
+ if debug:
75
+ print(f"\nSuccessful compilation of task {o_idx}!")
76
+ fixed = []
77
+ for e in curr_res:
78
+ if isinstance(e, np.ndarray):
79
+ e = e.item(0)
80
+ if isinstance(e, np.bool_):
81
+ e = bool(e)
82
+ fixed.append(e)
83
+ curr_res = fixed
84
+ if not np.all(curr_res):
85
+ if debug:
86
+ print(f"Results were not True for all test cases {curr_res=}\n")
87
+ except Exception as e:
88
+ if debug:
89
+ print(f"Compilation failed, test framework exception = {repr(e)}{e}\n")
90
+ # break
91
+ curr_metadata = {
92
+ "error": repr(e),
93
+ "error_code": -5,
94
+ "error_message": "TestRunnerError",
95
+ }
96
+ finally:
97
+ assert isinstance(curr_res, list), curr_res
98
+ assert isinstance(curr_metadata, dict), curr_metadata
99
+ res.append(curr_res)
100
+ metadata.append(curr_metadata)
101
+ if debug:
102
+ for i, r in enumerate(problem_generations):
103
+ print("Sample\n")
104
+ print(r)
105
+ print("\n")
106
+ print("Result\n")
107
+ print(res[i])
108
+ print("*" * 30 + "\n\n")
109
+ return res, metadata
110
+
111
+
112
+ def evaluate_generations(
113
+ samples_list: list,
114
+ generations_list: list[list[str]],
115
+ debug: bool = False,
116
+ num_process_evaluate: int = 16,
117
+ timeout=6,
118
+ ):
119
+ """We take the list of code generations and try to compile them
120
+ and the run their corresponding unit tests which are retrieved from the APPS dataset.
121
+
122
+ Args:
123
+ generations: list of code generations (same order as samples in APPS dataset)
124
+ level: difficulty level used in the generation, can be "all", "introductory", "interview" or "competition"
125
+
126
+ Returns:
127
+ results: dictionary of results, key is the problem index, value is a list of results for each generation
128
+ """
129
+
130
+ # generations are code generations in the same order of the dataset
131
+
132
+ inputs = [
133
+ [(generations_list[index], samples_list[index], debug, timeout), index]
134
+ for index in range(len(generations_list))
135
+ ]
136
+
137
+ with tqdm(total=len(inputs)) as pbar:
138
+ with ProcessPoolExecutor(
139
+ max_workers=1 if debug else num_process_evaluate
140
+ ) as executor:
141
+ futures = {
142
+ executor.submit(evaluate_generations_by_problem, arg): index
143
+ for arg, index in inputs
144
+ }
145
+
146
+ results = {}
147
+ metadata = {}
148
+ for future in as_completed(futures):
149
+ index = futures[future]
150
+ results[index], metadata[index] = future.result()
151
+ pbar.update(1)
152
+
153
+ assert len(results) == len(
154
+ inputs
155
+ ), f"results = {len(results)} inputs = {len(inputs)} {results=}"
156
+ # results = {i: r for r, (_, i) in zip(results, inputs)}
157
+
158
+ return results, metadata
159
+
160
+
161
+ def codegen_metrics(
162
+ samples_list,
163
+ generations_list,
164
+ k_list=[1, 5, 10, 20, 40, 50, 75, 100, 125, 150, 200, 500, 1000],
165
+ num_process_evaluate=16,
166
+ timeout=6,
167
+ debug=False,
168
+ ):
169
+
170
+ samples_linear = []
171
+ generations_linear = []
172
+ remap_index = []
173
+ results = defaultdict(list)
174
+ metadatas = defaultdict(list)
175
+ for idx, (sample, generation_list) in enumerate(
176
+ zip(samples_list, generations_list)
177
+ ):
178
+ assert isinstance(generation_list, list), generations_list[0]
179
+ for generation in generation_list:
180
+ assert isinstance(generation, str), generations_list[0]
181
+ samples_linear.append(sample)
182
+ generations_linear.append([generation])
183
+ remap_index.append(idx)
184
+
185
+ print(f"Evaluating {len(samples_linear)}...")
186
+
187
+ results_linear, metadatas_linear = evaluate_generations(
188
+ samples_linear,
189
+ generations_linear,
190
+ debug=debug,
191
+ num_process_evaluate=num_process_evaluate,
192
+ timeout=timeout,
193
+ )
194
+
195
+ for idx, sub_results in sorted(results_linear.items(), key=lambda x: x[0]):
196
+ results[remap_index[idx]].append(sub_results[0])
197
+
198
+ for idx, sub_metadatas in sorted(metadatas_linear.items(), key=lambda x: x[0]):
199
+ metadatas[remap_index[idx]].append(sub_metadatas[0])
200
+
201
+ metrics = compute_metrics_from_results(results, k_list=k_list)
202
+
203
+ final_metadata = []
204
+ for key in sorted(list(metadatas.keys())):
205
+ final_metadata.append(metadatas[key])
206
+ for i in range(len(final_metadata)):
207
+ if type(final_metadata[i]) is not list:
208
+ final_metadata[i] = [json.dumps(final_metadata[i])]
209
+ else:
210
+ final_metadata[i] = [json.dumps(x) for x in final_metadata[i]]
211
+
212
+ assert len(final_metadata[i]) == len(
213
+ generations_list[0]
214
+ ), f"{len(final_metadata[i])=}"
215
+
216
+ return [metrics, results, final_metadata]
217
+
218
+
219
+ if __name__ == "__main__":
220
+ # print(
221
+ # check_correctness(
222
+ # {
223
+ # "input_output": json.dumps(
224
+ # {
225
+ # "inputs": [
226
+ # json.dumps([1] * 100000)
227
+ # + "\n"
228
+ # + json.dumps([100000, -100000] * (100000 // 2))
229
+ # ],
230
+ # "outputs": [json.dumps([100000, 0] * (100000 // 2))],
231
+ # "fn_name": "mostFrequentIDs",
232
+ # }
233
+ # )
234
+ # },
235
+ # "class Solution:\n def mostFrequentIDs(self, nums: List[int], freq: List[int]) -> List[int]:\n from collections import defaultdict\n \n # Count of each ID\n count = defaultdict(int)\n # How many IDs exist for a given frequency\n freq_of_count = defaultdict(int)\n \n max_freq = 0\n ans = []\n \n for i in range(len(nums)):\n x = nums[i]\n change = freq[i]\n \n old_freq = count[x]\n new_freq = old_freq + change\n \n # If there was an old frequency, decrease its usage\n if old_freq > 0:\n freq_of_count[old_freq] -= 1\n if freq_of_count[old_freq] == 0:\n del freq_of_count[old_freq]\n \n # Update with the new frequency\n count[x] = new_freq\n freq_of_count[new_freq] += 1\n \n # Update max_freq if needed\n if new_freq > max_freq:\n max_freq = new_freq\n \n # If the collection at max_freq is empty, reduce max_freq until we find a non-empty bin\n while max_freq > 0 and max_freq not in freq_of_count:\n max_freq -= 1\n \n # If the collection is empty, max_freq will be 0\n ans.append(max_freq)\n \n return ans",
236
+ # 6,
237
+ # debug=True,
238
+ # )
239
+ # )
240
+
241
+ print(
242
+ check_correctness(
243
+ {
244
+ "input_output": json.dumps(
245
+ {
246
+ "inputs": ")))))",
247
+ "outputs": "0",
248
+ },
249
+ )
250
+ },
251
+ "\nMOD = 998244353\n\nS = input().strip()\nn = len(S)\n\nif n % 2 != 0:\n print(0)\n exit()\n\n# Initialize DP table\ndp = [[0] * (n + 2) for _ in range(n + 1)]\ndp[0][0] = 1\n\nfor i in range(1, n + 1):\n c = S[i-1]\n for b in range(n + 1):\n if dp[i-1][b] == 0:\n continue\n if c == '(':\n new_b = b + 1\n if new_b <= n:\n dp[i][new_b] = (dp[i][new_b] + dp[i-1][b]) % MOD\n elif c == ')':\n if b > 0:\n new_b = b - 1\n dp[i][new_b] = (dp[i][new_b] + dp[i-1][b]) % MOD\n else: # '?'\n # Replace with '('\n new_b = b + 1\n if new_b <= n:\n dp[i][new_b] = (dp[i][new_b] + dp[i-1][b]) % MOD\n # Replace with ')'\n if b > 0:\n new_b = b - 1\n dp[i][new_b] = (dp[i][new_b] + dp[i-1][b]) % MOD\n\nprint(dp[n][0] % MOD)\n",
252
+ 6,
253
+ debug=True,
254
+ )
255
+ )
lcb_runner/evaluation/compute_scores.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ import numpy as np
4
+ from datetime import datetime
5
+
6
+ from lcb_runner.lm_styles import LanguageModelStore
7
+ from lcb_runner.evaluation.pass_k_utils import (
8
+ estimate_pass_at_k,
9
+ compute_metrics_from_results,
10
+ )
11
+ from lcb_runner.utils.scenarios import Scenario
12
+ from lcb_runner.utils.path_utils import get_eval_all_output_path
13
+
14
+
15
+ def get_parser():
16
+ parser = argparse.ArgumentParser()
17
+ parser.add_argument(
18
+ "--model",
19
+ type=str,
20
+ default="gpt-3.5-turbo-0301",
21
+ help="Name of the model to use matching `lm_styles.py`",
22
+ )
23
+ parser.add_argument(
24
+ "--scenario",
25
+ type=Scenario,
26
+ default=Scenario.codegeneration,
27
+ help="Type of scenario to run",
28
+ )
29
+ parser.add_argument(
30
+ "--n", type=int, default=10, help="Number of samples to generate"
31
+ )
32
+ parser.add_argument(
33
+ "--temperature", type=float, default=0.2, help="Temperature for sampling"
34
+ )
35
+
36
+ parser.add_argument(
37
+ "--eval_all_file",
38
+ type=str,
39
+ default=None,
40
+ help="Alternative way to provide the evaluation file",
41
+ )
42
+
43
+ parser.add_argument(
44
+ "--start_date",
45
+ type=str,
46
+ default=None,
47
+ help="Start date for the contest to filter the evaluation file (format - YYYY-MM-DD)",
48
+ )
49
+ parser.add_argument(
50
+ "--end_date",
51
+ type=str,
52
+ default=None,
53
+ help="End date for the contest to filter the evaluation file (format - YYYY-MM-DD)",
54
+ )
55
+
56
+ parser.add_argument(
57
+ "--platform",
58
+ type=str,
59
+ default=None,
60
+ help="Platform to filter the evaluation file",
61
+ )
62
+
63
+ args = parser.parse_args()
64
+
65
+ if args.eval_all_file is None:
66
+ model = LanguageModelStore[args.model]
67
+ args.eval_all_file = get_eval_all_output_path(model, args)
68
+
69
+ return args
70
+
71
+
72
+ def compute_scores(args):
73
+ with open(args.eval_all_file, "r") as f:
74
+ results = json.load(f)
75
+
76
+ for res in results:
77
+ res["contest_date"] = datetime.fromisoformat(res["contest_date"])
78
+
79
+ if args.start_date is not None:
80
+ args.start_date = datetime.strptime(args.start_date, "%Y-%m-%d")
81
+ results = [
82
+ result for result in results if args.start_date <= result["contest_date"]
83
+ ]
84
+
85
+ if args.end_date is not None:
86
+ args.end_date = datetime.strptime(args.end_date, "%Y-%m-%d")
87
+ results = [
88
+ result for result in results if result["contest_date"] <= args.end_date
89
+ ]
90
+
91
+ if args.platform is not None:
92
+ results = [result for result in results if result["platform"] == args.platform]
93
+
94
+ print(len(results))
95
+ totals = [len(x["graded_list"]) for x in results]
96
+ corrects = [sum(x["graded_list"]) for x in results]
97
+
98
+ easy_totals = [len(x["graded_list"]) for x in results if x["difficulty"] == "easy"]
99
+ med_totals = [len(x["graded_list"]) for x in results if x["difficulty"] == "medium"]
100
+ hard_totals = [len(x["graded_list"]) for x in results if x["difficulty"] == "hard"]
101
+ easy_corrects = [sum(x["graded_list"]) for x in results if x["difficulty"] == "easy"]
102
+ med_corrects = [sum(x["graded_list"]) for x in results if x["difficulty"] == "medium"]
103
+ hard_corrects = [sum(x["graded_list"]) for x in results if x["difficulty"] == "hard"]
104
+ for k in [1, 5, 10, 25, 50, 100, 150, 200]:
105
+ print(
106
+ f"Pass@{k} = ",
107
+ estimate_pass_at_k(totals, corrects, k).mean(),
108
+ # np.array(
109
+ # [estimate_pass_at_k(t, c, k) for t, c in zip(totals, corrects)]
110
+ # ).mean(),
111
+ )
112
+ print(
113
+ f"Easy Pass@{k} = ",
114
+ estimate_pass_at_k(easy_totals, easy_corrects, k).mean(),
115
+ )
116
+ print(
117
+ f"Medium Pass@{k} = ",
118
+ estimate_pass_at_k(med_totals, med_corrects, k).mean(),
119
+ )
120
+ print(
121
+ f"Hard Pass@{k} = ",
122
+ estimate_pass_at_k(hard_totals, hard_corrects, k).mean(),
123
+ )
124
+
125
+
126
+ pass_1_list = [result["pass@1"] for result in results]
127
+ print(f"Pass@1: {sum(pass_1_list) / len(pass_1_list)}")
128
+
129
+ easy_pass_1_list = [
130
+ result["pass@1"]
131
+ for result in results
132
+ if "difficulty" in result and result["difficulty"] == "easy"
133
+ ]
134
+ if len(easy_pass_1_list) > 0:
135
+ print(f"Easy Pass@1: {sum(easy_pass_1_list) / len(easy_pass_1_list)}")
136
+
137
+ medium_pass_1_list = [
138
+ result["pass@1"]
139
+ for result in results
140
+ if "difficulty" in result and result["difficulty"] == "medium"
141
+ ]
142
+ if len(medium_pass_1_list) > 0:
143
+ print(f"Medium Pass@1: {sum(medium_pass_1_list) / len(medium_pass_1_list)}")
144
+
145
+ hard_pass_1_list = [
146
+ result["pass@1"]
147
+ for result in results
148
+ if "difficulty" in result and result["difficulty"] == "hard"
149
+ ]
150
+ if len(hard_pass_1_list) > 0:
151
+ print(f"Hard Pass@1: {sum(hard_pass_1_list) / len(hard_pass_1_list)}")
152
+
153
+
154
+ if __name__ == "__main__":
155
+ compute_scores(get_parser())
lcb_runner/evaluation/compute_test_output_prediction_metrics.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import json
3
+
4
+ import tqdm
5
+
6
+ from lcb_runner.evaluation.pass_k_utils import compute_metrics_from_results
7
+
8
+
9
+ def parse_assert_statement(statement):
10
+ """
11
+ Parse a Python assert statement and extract the expected output
12
+ from the right side of the '==' operator as a string.
13
+
14
+ :param statement: A string containing the assert statement.
15
+ :return: The expected output from the assert statement as a string.
16
+ """
17
+ try:
18
+ parsed = ast.parse(statement, mode="exec")
19
+ except SyntaxError:
20
+ return "Invalid syntax"
21
+
22
+ if len(parsed.body) == 0:
23
+ return "Empty statement"
24
+
25
+ if not isinstance(parsed.body[0], ast.Assert):
26
+ return "Not an assert statement"
27
+
28
+ comparison = parsed.body[0].test
29
+
30
+ if not isinstance(comparison, ast.Compare) or not isinstance(
31
+ comparison.ops[0], ast.Eq
32
+ ):
33
+ return "Not an equality assertion"
34
+
35
+ # Extract and return the right side of the '==' operator as a string
36
+ return ast.get_source_segment(statement, comparison.comparators[0])
37
+
38
+
39
+ def check_testcase_output(testcase_str, expected_output):
40
+
41
+ if len(testcase_str.splitlines()) > 1:
42
+ for line in testcase_str.splitlines():
43
+ if line.startswith("#"):
44
+ continue
45
+ if "assert" in line:
46
+ testcase_str = line
47
+ break
48
+
49
+ testcase_str = testcase_str.strip()
50
+
51
+ if "assert" in testcase_str:
52
+ testcase_output_str = str(parse_assert_statement(testcase_str))
53
+
54
+ else:
55
+ testcase_output_str = testcase_str
56
+
57
+ global_result = None
58
+
59
+ try:
60
+ testcase_output_eval = eval(testcase_output_str)
61
+ except:
62
+ global_result = False
63
+ # print("Failed to eval testcase output", testcase_output_str)
64
+ # breakpoint()
65
+
66
+ try:
67
+ expected_output_eval = json.loads(expected_output)
68
+ except:
69
+ global_result = False
70
+ print("Failed to eval expected testcase output", expected_output)
71
+
72
+ if global_result is None:
73
+ global_result = testcase_output_eval == expected_output_eval
74
+
75
+ return global_result
76
+
77
+
78
+ def test_output_metrics(
79
+ samples,
80
+ generations,
81
+ k_list=[1, 5],
82
+ ):
83
+ num_samples = len(samples)
84
+ results = []
85
+ for idx in tqdm.tqdm(list(range(num_samples))):
86
+ idx_results = []
87
+ sample = samples[idx]
88
+ extracted_generation_list = generations[idx]
89
+ for extracted_generation in extracted_generation_list:
90
+ global_result = check_testcase_output(
91
+ extracted_generation, sample["output"]
92
+ )
93
+ idx_results.append([global_result])
94
+ results.append(idx_results)
95
+
96
+ results = {result_idx: results[result_idx] for result_idx in range(len(results))}
97
+
98
+ metrics = compute_metrics_from_results(results, k_list=k_list)
99
+
100
+ return [metrics, results]
lcb_runner/evaluation/old_results_check.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import json
3
+ from lcb_runner.benchmarks import load_generation_dataset, CodeGenerationProblem
4
+ from lcb_runner.evaluation import codegen_metrics
5
+
6
+
7
+ dataset = load_generation_dataset()
8
+
9
+ dataset = sorted(dataset, key=lambda x: x.question_id)
10
+
11
+
12
+ def check_model(model_key):
13
+ path = f"/home/naman/Repos/LiveCodeBench/run_models_outputs/{model_key}/chat_0.2_checked.json"
14
+ with open(path) as f:
15
+ old_results = json.load(f)
16
+ old_results = sorted(old_results, key=lambda x: x["question_id"])
17
+ assert old_results[0]["question_id"] == dataset[0].question_id
18
+
19
+ def debug(idx):
20
+ codegen_metrics(
21
+ [dataset[idx].get_evaluation_sample()],
22
+ [old_results[idx]["code_list"][:1]],
23
+ debug=True,
24
+ )
25
+
26
+ def run(idx):
27
+ return codegen_metrics(
28
+ [dataset[idx].get_evaluation_sample()],
29
+ [old_results[idx]["code_list"]],
30
+ )
31
+
32
+ debug(380)
33
+ exit()
34
+ # debug(196)
35
+ # debug(352)
36
+
37
+ metrics = codegen_metrics(
38
+ [d.get_evaluation_sample() for d in dataset],
39
+ [r["code_list"] for r in old_results],
40
+ num_process_evaluate=12,
41
+ )
42
+ old_pass1 = np.mean([np.mean(r["pass1_list"]) for r in old_results])
43
+
44
+ print(old_pass1)
45
+ print(metrics[0]["pass@1"])
46
+
47
+ for idx in range(400):
48
+ old_pass1 = np.mean(old_results[idx]["pass1_list"])
49
+ new_pass1 = metrics[0]["detail"]["pass@1"][idx]
50
+ if not abs(old_pass1 - new_pass1) < 1e-4:
51
+ print(idx, old_pass1, new_pass1)
52
+
53
+
54
+ # model_key = "GPT-4-Turbo-1106"
55
+ # check_model(model_key)
56
+
57
+ model_key = "Claude-3-Opus"
58
+ check_model(model_key)
59
+
60
+ model_key = "GPT-4-0613"
61
+ check_model(model_key)
62
+
63
+ model_key = "Mistral-Large"
64
+ check_model(model_key)
65
+
66
+ model_key = "Claude-3-Sonnet"
67
+ check_model(model_key)
68
+
69
+ model_key = "GPT-3.5-Turbo-0301"
70
+ check_model(model_key)
71
+
72
+ model_key = "Gemini-Pro"
73
+ check_model(model_key)
lcb_runner/evaluation/pass_k_utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def estimate_pass_at_k(num_samples, num_correct, k):
5
+ """Estimates pass@k of each problem and returns them in an array."""
6
+
7
+ def estimator(n: int, c: int, k: int) -> float:
8
+ """Calculates 1 - comb(n - c, k) / comb(n, k)."""
9
+ if n - c < k:
10
+ return 1.0
11
+ return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
12
+
13
+ import itertools
14
+
15
+ if isinstance(num_samples, int):
16
+ num_samples_it = itertools.repeat(num_samples, len(num_correct))
17
+ else:
18
+ assert len(num_samples) == len(num_correct)
19
+ num_samples_it = iter(num_samples)
20
+
21
+ return np.array(
22
+ [estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)]
23
+ )
24
+
25
+
26
+ def compute_metrics_from_results(results, k_list=[1, 5]):
27
+ total = []
28
+ correct = []
29
+ task_ids = []
30
+ for task_id, res in results.items():
31
+ all_correct = []
32
+ for generation in res:
33
+ gen = np.array(generation)
34
+ all_correct.append(np.all(gen > 0))
35
+ task_ids.append(task_id)
36
+ total.append(len(all_correct))
37
+ correct.append(sum(all_correct))
38
+ total = np.array(total)
39
+ correct = np.array(correct)
40
+ ks = k_list
41
+ detail_pass_at_k = {
42
+ f"pass@{k}": estimate_pass_at_k(total, correct, k).tolist()
43
+ for k in ks
44
+ if (total >= k).all()
45
+ }
46
+ pass_at_k = {
47
+ f"pass@{k}": estimate_pass_at_k(total, correct, k).mean()
48
+ for k in ks
49
+ if (total >= k).all()
50
+ }
51
+ detail_metrics = {k: dict(zip(task_ids, v)) for k, v in detail_pass_at_k.items()}
52
+ pass_at_k["detail"] = detail_metrics
53
+ return pass_at_k
54
+
55
+
56
+ def extract_instance_results(results):
57
+ instance_wise_grades = {}
58
+ for task_id, res in results.items():
59
+ instance_wise_grades[task_id] = []
60
+ for generation in res:
61
+ instance_wise_grades[task_id].append(all([g > 0 for g in generation]))
62
+
63
+ instance_wise_grades = [
64
+ v for _, v in sorted(instance_wise_grades.items(), key=lambda item: item[0])
65
+ ]
66
+ return instance_wise_grades
lcb_runner/evaluation/testing_util.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import json
3
+ import sys
4
+ import faulthandler
5
+ import platform
6
+
7
+ # used for debugging to time steps
8
+ from datetime import datetime
9
+
10
+ # to run the solution files we're using a timing based approach
11
+ import signal
12
+
13
+ import numpy as np
14
+
15
+ from io import StringIO
16
+
17
+ # used for testing the code that reads from input
18
+ from unittest.mock import patch, mock_open
19
+
20
+ # from pyext import RuntimeModule
21
+ from types import ModuleType
22
+
23
+ from enum import Enum
24
+ from decimal import Decimal
25
+ import time
26
+
27
+ import_string = "from string import *\nfrom re import *\nfrom datetime import *\nfrom collections import *\nfrom heapq import *\nfrom bisect import *\nfrom copy import *\nfrom math import *\nfrom random import *\nfrom statistics import *\nfrom itertools import *\nfrom functools import *\nfrom operator import *\nfrom io import *\nfrom sys import *\nfrom json import *\nfrom builtins import *\nfrom typing import *\nimport string\nimport re\nimport datetime\nimport collections\nimport heapq\nimport bisect\nimport copy\nimport math\nimport random\nimport statistics\nimport itertools\nimport functools\nimport operator\nimport io\nimport sys\nimport json\nsys.setrecursionlimit(50000)\n"
28
+
29
+
30
+ def truncatefn(s, length=300):
31
+ if isinstance(s, str):
32
+ pass
33
+ else:
34
+ s = str(s)
35
+ if len(s) <= length:
36
+ return s
37
+
38
+ return s[: length // 2] + "...(truncated) ..." + s[-length // 2 :]
39
+
40
+
41
+ class CODE_TYPE(Enum):
42
+ call_based = 0
43
+ standard_input = 1
44
+
45
+
46
+ # stuff for setting up signal timer
47
+ class TimeoutException(Exception):
48
+ pass
49
+
50
+
51
+ def timeout_handler(signum, frame):
52
+ print("timeout occured: alarm went off")
53
+ raise TimeoutException
54
+
55
+
56
+ # used to capture stdout as a list
57
+ # from https://stackoverflow.com/a/16571630/6416660
58
+ # alternative use redirect_stdout() from contextlib
59
+ class Capturing(list):
60
+ def __enter__(self):
61
+ self._stdout = sys.stdout
62
+ sys.stdout = self._stringio = StringIO()
63
+ # Make closing the StringIO a no-op
64
+ self._stringio.close = lambda x: 1
65
+ return self
66
+
67
+ def __exit__(self, *args):
68
+ self.append(self._stringio.getvalue())
69
+ del self._stringio # free up some memory
70
+ sys.stdout = self._stdout
71
+
72
+
73
+ # Custom mock for sys.stdin that supports buffer attribute
74
+ class MockStdinWithBuffer:
75
+ def __init__(self, inputs: str):
76
+ self.inputs = inputs
77
+ self._stringio = StringIO(inputs)
78
+ self.buffer = MockBuffer(inputs)
79
+
80
+ def read(self, *args):
81
+ return self.inputs
82
+
83
+ def readline(self, *args):
84
+ return self._stringio.readline(*args)
85
+
86
+ def readlines(self, *args):
87
+ return self.inputs.split("\n")
88
+
89
+ def __getattr__(self, name):
90
+ # Delegate other attributes to StringIO
91
+ return getattr(self._stringio, name)
92
+
93
+
94
+ class MockBuffer:
95
+ def __init__(self, inputs: str):
96
+ self.inputs = inputs.encode("utf-8") # Convert to bytes
97
+
98
+ def read(self, *args):
99
+ # Return as byte strings that can be split
100
+ return self.inputs
101
+
102
+ def readline(self, *args):
103
+ return self.inputs.split(b"\n")[0] + b"\n"
104
+
105
+
106
+ def clean_if_name(code: str) -> str:
107
+ try:
108
+ astree = ast.parse(code)
109
+ last_block = astree.body[-1]
110
+ if isinstance(last_block, ast.If):
111
+ condition = last_block.test
112
+ if ast.unparse(condition).strip() == "__name__ == '__main__'":
113
+ code = (
114
+ ast.unparse(astree.body[:-1]) + "\n" + ast.unparse(last_block.body) # type: ignore
115
+ )
116
+ except:
117
+ pass
118
+
119
+ return code
120
+
121
+
122
+ def make_function(code: str) -> str:
123
+ try:
124
+ import_stmts = []
125
+ all_other_stmts = []
126
+ astree = ast.parse(code)
127
+ for stmt in astree.body:
128
+ if isinstance(stmt, (ast.Import, ast.ImportFrom)):
129
+ import_stmts.append(stmt)
130
+ else:
131
+ all_other_stmts.append(stmt)
132
+
133
+ function_ast = ast.FunctionDef(
134
+ name="wrapped_function",
135
+ args=ast.arguments(
136
+ posonlyargs=[], args=[], kwonlyargs=[], kw_defaults=[], defaults=[]
137
+ ),
138
+ body=all_other_stmts,
139
+ decorator_list=[],
140
+ lineno=-1,
141
+ )
142
+ main_code = (
143
+ import_string
144
+ + "\n"
145
+ + ast.unparse(import_stmts) # type: ignore
146
+ + "\n"
147
+ + ast.unparse(function_ast) # type: ignore
148
+ )
149
+ return main_code
150
+ except Exception as e:
151
+ return code
152
+
153
+
154
+ def call_method(method, inputs):
155
+
156
+ if isinstance(inputs, list):
157
+ inputs = "\n".join(inputs)
158
+
159
+ inputs_line_iterator = iter(inputs.split("\n"))
160
+
161
+ # Create custom stdin mock with buffer support
162
+ mock_stdin = MockStdinWithBuffer(inputs)
163
+
164
+ # sys.setrecursionlimit(10000)
165
+
166
+ # @patch('builtins.input', side_effect=inputs.split("\n"))
167
+ @patch("builtins.open", mock_open(read_data=inputs))
168
+ @patch("sys.stdin", mock_stdin) # Use our custom mock instead of StringIO
169
+ @patch("sys.stdin.readline", lambda *args: next(inputs_line_iterator))
170
+ @patch("sys.stdin.readlines", lambda *args: inputs.split("\n"))
171
+ @patch("sys.stdin.read", lambda *args: inputs)
172
+ # @patch('sys.stdout.write', print)
173
+ def _inner_call_method(_method):
174
+ try:
175
+ return _method()
176
+ except SystemExit as e:
177
+ pass
178
+ finally:
179
+ pass
180
+
181
+ return _inner_call_method(method)
182
+
183
+
184
+ def get_function(compiled_sol, fn_name: str): # type: ignore
185
+ try:
186
+ assert hasattr(compiled_sol, fn_name)
187
+ return getattr(compiled_sol, fn_name)
188
+ except Exception as e:
189
+ return
190
+
191
+
192
+ def compile_code(code: str, timeout: int):
193
+ signal.alarm(timeout)
194
+ try:
195
+ tmp_sol = ModuleType("tmp_sol", "")
196
+ exec(code, tmp_sol.__dict__)
197
+ if "class Solution" in code:
198
+ # leetcode wraps solutions in `Solution`
199
+ # this is a hack to check if it is leetcode solution or not
200
+ # currently livecodebench only supports LeetCode but
201
+ # else condition allows future extensibility to other platforms
202
+ compiled_sol = tmp_sol.Solution()
203
+ else:
204
+ # do nothing in the other case since function is accesible
205
+ compiled_sol = tmp_sol
206
+
207
+ assert compiled_sol is not None
208
+ finally:
209
+ signal.alarm(0)
210
+
211
+ return compiled_sol
212
+
213
+
214
+ def convert_line_to_decimals(line: str) -> tuple[bool, list[Decimal]]:
215
+ try:
216
+ decimal_line = [Decimal(elem) for elem in line.split()]
217
+ except:
218
+ return False, []
219
+ return True, decimal_line
220
+
221
+
222
+ def get_stripped_lines(val: str):
223
+ ## you don't want empty lines to add empty list after splitlines!
224
+ val = val.strip()
225
+
226
+ return [val_line.strip() for val_line in val.split("\n")]
227
+
228
+
229
+ def grade_call_based(
230
+ code: str, all_inputs: list, all_outputs: list, fn_name: str, timeout: int
231
+ ):
232
+ # call-based clean up logic
233
+ # need to wrap in try-catch logic after to catch the correct errors, but for now this is fine.
234
+ code = import_string + "\n\n" + code
235
+ compiled_sol = compile_code(code, timeout)
236
+
237
+ if compiled_sol is None:
238
+ return
239
+
240
+ method = get_function(compiled_sol, fn_name)
241
+
242
+ if method is None:
243
+ return
244
+
245
+ all_inputs = [
246
+ [json.loads(line) for line in inputs.split("\n")] for inputs in all_inputs
247
+ ]
248
+
249
+ all_outputs = [json.loads(output) for output in all_outputs]
250
+
251
+ total_execution = 0
252
+ all_results = []
253
+ for idx, (gt_inp, gt_out) in enumerate(zip(all_inputs, all_outputs)):
254
+ signal.alarm(timeout)
255
+ faulthandler.enable()
256
+ try:
257
+ # can lock here so time is useful
258
+ start = time.time()
259
+ prediction = method(*gt_inp)
260
+ total_execution += time.time() - start
261
+ signal.alarm(0)
262
+
263
+ # don't penalize model if it produces tuples instead of lists
264
+ # ground truth sequences are not tuples
265
+ if isinstance(prediction, tuple):
266
+ prediction = list(prediction)
267
+
268
+ tmp_result = prediction == gt_out
269
+
270
+ # handle floating point comparisons
271
+
272
+ all_results.append(tmp_result)
273
+
274
+ if not tmp_result:
275
+ return all_results, {
276
+ "output": truncatefn(prediction),
277
+ "inputs": truncatefn(gt_inp),
278
+ "expected": truncatefn(gt_out),
279
+ "error_code": -2,
280
+ "error_message": "Wrong Answer",
281
+ }
282
+ except Exception as e:
283
+ signal.alarm(0)
284
+ if "timeoutexception" in repr(e).lower():
285
+ all_results.append(-3)
286
+ return all_results, {
287
+ "error": repr(e),
288
+ "error_code": -3,
289
+ "error_message": "Time Limit Exceeded",
290
+ "inputs": truncatefn(gt_inp),
291
+ "expected": truncatefn(gt_out),
292
+ }
293
+ else:
294
+ all_results.append(-4)
295
+ return all_results, {
296
+ "error": repr(e),
297
+ "error_code": -4,
298
+ "error_message": "Runtime Error",
299
+ "inputs": truncatefn(gt_inp),
300
+ "expected": truncatefn(gt_out),
301
+ }
302
+
303
+ finally:
304
+ signal.alarm(0)
305
+ faulthandler.disable()
306
+
307
+ return all_results, {"execution time": total_execution}
308
+
309
+
310
+ def grade_stdio(
311
+ code: str,
312
+ all_inputs: list,
313
+ all_outputs: list,
314
+ timeout: int,
315
+ ):
316
+ ## runtime doesn't interact well with __name__ == '__main__'
317
+ code = clean_if_name(code)
318
+
319
+ ## we wrap the given code inside another function
320
+ code = make_function(code)
321
+
322
+ compiled_sol = compile_code(code, timeout)
323
+ if compiled_sol is None:
324
+ return
325
+
326
+ method = get_function(compiled_sol, "wrapped_function")
327
+
328
+ if method is None:
329
+ return
330
+
331
+ all_results = []
332
+ total_execution_time = 0
333
+ for idx, (gt_inp, gt_out) in enumerate(zip(all_inputs, all_outputs)):
334
+ signal.alarm(timeout)
335
+ faulthandler.enable()
336
+
337
+ signal.alarm(timeout)
338
+ with Capturing() as captured_output:
339
+ try:
340
+ start = time.time()
341
+ call_method(method, gt_inp)
342
+ total_execution_time += time.time() - start
343
+ # reset the alarm
344
+ signal.alarm(0)
345
+ except Exception as e:
346
+ signal.alarm(0)
347
+ if "timeoutexception" in repr(e).lower():
348
+ all_results.append(-3)
349
+ return all_results, {
350
+ "error": repr(e),
351
+ "error_code": -3,
352
+ "error_message": "Time Limit Exceeded",
353
+ "inputs": truncatefn(gt_inp),
354
+ "expected": truncatefn(gt_out),
355
+ }
356
+ else:
357
+ all_results.append(-4)
358
+ return all_results, {
359
+ "error": repr(e),
360
+ "error_code": -4,
361
+ "error_message": "Runtime Error",
362
+ "inputs": truncatefn(gt_inp),
363
+ "expected": truncatefn(gt_out),
364
+ }
365
+
366
+ finally:
367
+ signal.alarm(0)
368
+ faulthandler.disable()
369
+
370
+ prediction = captured_output[0]
371
+
372
+ stripped_prediction_lines = get_stripped_lines(prediction)
373
+ stripped_gt_out_lines = get_stripped_lines(gt_out)
374
+
375
+ ## WA happens in multiple circumstances
376
+ ## so cache the return to make it clean!
377
+ WA_send_args = {
378
+ "output": truncatefn(prediction),
379
+ "inputs": truncatefn(gt_inp),
380
+ "expected": truncatefn(gt_out),
381
+ "error_code": -2,
382
+ }
383
+
384
+ if len(stripped_prediction_lines) != len(stripped_gt_out_lines):
385
+ all_results.append(-2)
386
+ WA_send_args["error_message"] = "Wrong answer: mismatched output length"
387
+ return all_results, WA_send_args
388
+
389
+ for output_line_idx, (
390
+ stripped_prediction_line,
391
+ stripped_gt_out_line,
392
+ ) in enumerate(zip(stripped_prediction_lines, stripped_gt_out_lines)):
393
+ WA_send_args["error_message"] = (
394
+ f"Wrong answer at {output_line_idx=}: {truncatefn(stripped_prediction_line)} != {truncatefn(stripped_gt_out_line)}"
395
+ )
396
+
397
+ ## CASE 1: exact match
398
+ if stripped_prediction_line == stripped_gt_out_line:
399
+ continue
400
+
401
+ ## CASE 2: element-wise comparision
402
+ ## if there are floating elements
403
+ ## use `decimal` library for good floating point comparision
404
+ ## otherwise gotcha: np.isclose(50000000000000000, 50000000000000001) = True
405
+ ## note that we should always be able to convert to decimals
406
+
407
+ success, decimal_prediction_line = convert_line_to_decimals(
408
+ stripped_prediction_line
409
+ )
410
+ if not success:
411
+ all_results.append(-2)
412
+ return all_results, WA_send_args
413
+ success, decimal_gtout_line = convert_line_to_decimals(stripped_gt_out_line)
414
+ if not success:
415
+ all_results.append(-2)
416
+ return all_results, WA_send_args
417
+
418
+ if decimal_prediction_line == decimal_gtout_line:
419
+ continue
420
+
421
+ all_results.append(-2)
422
+ return all_results, WA_send_args
423
+ all_results.append(True)
424
+
425
+ return all_results, {"execution time": total_execution_time}
426
+
427
+
428
+ def run_test(sample, test=None, debug=False, timeout=6):
429
+ """
430
+ if test(generated_code) is not None it'll try to run the code.
431
+ otherwise it'll just return an input and output pair.
432
+ """
433
+ signal.signal(signal.SIGALRM, timeout_handler)
434
+
435
+ # Disable functionalities that can make destructive changes to the test.
436
+ # max memory is set to 4GB
437
+ reliability_guard(4 * 1024 * 1024 * 1024)
438
+
439
+ if debug:
440
+ print(f"start = {datetime.now().time()}")
441
+
442
+ try:
443
+ in_outs = json.loads(sample["input_output"])
444
+ except ValueError as e:
445
+ raise e
446
+ in_outs = None
447
+
448
+ if in_outs:
449
+ if in_outs.get("fn_name") is None:
450
+ which_type = CODE_TYPE.standard_input # Standard input
451
+ method_name = None
452
+
453
+ else:
454
+ which_type = CODE_TYPE.call_based # Call-based
455
+ method_name = in_outs["fn_name"]
456
+
457
+ if debug:
458
+ print(f"loaded input_output = {datetime.now().time()}")
459
+
460
+ if test is None:
461
+ assert False, "should not happen: test code is none"
462
+ return in_outs, {"error": "no test code provided"}
463
+ elif test is not None:
464
+ results = []
465
+ sol = import_string
466
+ if debug:
467
+ print(f"loading test code = {datetime.now().time()}")
468
+
469
+ if which_type == CODE_TYPE.call_based:
470
+ signal.alarm(timeout)
471
+ try:
472
+ results, metadata = grade_call_based(
473
+ code=test,
474
+ all_inputs=in_outs["inputs"],
475
+ all_outputs=in_outs["outputs"],
476
+ fn_name=method_name,
477
+ timeout=timeout,
478
+ )
479
+ return results, metadata
480
+ except Exception as e:
481
+ return [-4], {
482
+ "error_code": -4,
483
+ "error_message": f"Error during testing: {e}",
484
+ }
485
+ finally:
486
+ signal.alarm(0)
487
+ elif which_type == CODE_TYPE.standard_input:
488
+ # sol
489
+ # if code has if __name__ == "__main__": then remove it
490
+
491
+ signal.alarm(timeout)
492
+ try:
493
+ results, metadata = grade_stdio(
494
+ code=test,
495
+ all_inputs=in_outs["inputs"],
496
+ all_outputs=in_outs["outputs"],
497
+ timeout=timeout,
498
+ )
499
+ return results, metadata
500
+ except Exception as e:
501
+ return [-4], {
502
+ "error_code": -4,
503
+ "error_message": f"Error during testing: {e}",
504
+ }
505
+ finally:
506
+ signal.alarm(0)
507
+
508
+
509
+ def reliability_guard(maximum_memory_bytes=None):
510
+ """
511
+ This disables various destructive functions and prevents the generated code
512
+ from interfering with the test (e.g. fork bomb, killing other processes,
513
+ removing filesystem files, etc.)
514
+ WARNING
515
+ This function is NOT a security sandbox. Untrusted code, including, model-
516
+ generated code, should not be blindly executed outside of one. See the
517
+ Codex paper for more information about OpenAI's code sandbox, and proceed
518
+ with caution.
519
+ """
520
+
521
+ if maximum_memory_bytes is not None:
522
+ import resource
523
+
524
+ resource.setrlimit(
525
+ resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes)
526
+ )
527
+ resource.setrlimit(
528
+ resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes)
529
+ )
530
+ if not platform.uname().system == "Darwin":
531
+ resource.setrlimit(
532
+ resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes)
533
+ )
534
+
535
+ faulthandler.disable()
536
+
537
+ import builtins
538
+
539
+ # builtins.exit = None
540
+ builtins.quit = None
541
+
542
+ import os
543
+
544
+ os.environ["OMP_NUM_THREADS"] = "1"
545
+
546
+ os.kill = None
547
+ os.system = None
548
+ os.putenv = None
549
+ os.remove = None
550
+ os.removedirs = None
551
+ os.rmdir = None
552
+ os.fchdir = None
553
+ os.setuid = None
554
+ os.fork = None
555
+ os.forkpty = None
556
+ os.killpg = None
557
+ os.rename = None
558
+ os.renames = None
559
+ os.truncate = None
560
+ os.replace = None
561
+ os.unlink = None
562
+ os.fchmod = None
563
+ os.fchown = None
564
+ os.chmod = None
565
+ os.chown = None
566
+ os.chroot = None
567
+ os.fchdir = None
568
+ os.lchflags = None
569
+ os.lchmod = None
570
+ os.lchown = None
571
+ os.getcwd = None
572
+ os.chdir = None
573
+
574
+ import shutil
575
+
576
+ shutil.rmtree = None
577
+ shutil.move = None
578
+ shutil.chown = None
579
+
580
+ import subprocess
581
+
582
+ subprocess.Popen = None # type: ignore
583
+
584
+ __builtins__["help"] = None
585
+
586
+ import sys
587
+
588
+ sys.modules["ipdb"] = None
589
+ sys.modules["joblib"] = None
590
+ sys.modules["resource"] = None
591
+ sys.modules["psutil"] = None
592
+ sys.modules["tkinter"] = None
lcb_runner/evaluation/utils_execute.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # This code is adapted from OpenAI's release
16
+ # https://github.com/openai/human-eval/blob/master/human_eval/execution.py
17
+
18
+ import contextlib
19
+ import faulthandler
20
+ import io
21
+ import multiprocessing
22
+ import os
23
+ import platform
24
+ import signal
25
+ import tempfile
26
+
27
+
28
+ BASE_IMPORTS = """from itertools import accumulate, chain, combinations, count, permutations, product, groupby, islice, repeat
29
+ from copy import deepcopy
30
+ from string import ascii_lowercase
31
+ from math import floor, log2, log10, sqrt, comb, gcd, ceil, inf, isqrt
32
+ from collections import defaultdict, deque, Counter
33
+ from bisect import bisect, bisect_left, bisect_right, insort
34
+ from heapq import heappush, heappop, heapify, merge
35
+ from functools import reduce, cache, lru_cache
36
+ from random import randrange, shuffle
37
+ from operator import itemgetter, sub
38
+ from re import search as re_search # Assuming 're' refers to a regex search
39
+ from os.path import commonprefix
40
+ from typing import List, Tuple, Dict, Set, Optional, Union, Any, Callable, Iterable, Iterator, Generator
41
+ import copy
42
+ import string
43
+ import math
44
+ import collections
45
+ import bisect
46
+ import heapq
47
+ import functools
48
+ import random
49
+ import itertools
50
+ import operator
51
+ import re
52
+ import numpy as np
53
+ import pandas as pd
54
+ from math import log, prod # 'log' and 'prod' are functions in the math module
55
+ from collections import deque, defaultdict, Counter, OrderedDict
56
+ from itertools import accumulate, permutations, combinations, product, groupby, islice, chain, repeat, zip_longest, cycle
57
+ from functools import lru_cache, reduce, partial
58
+ # from sortedcontainers import SortedList, SortedDict, SortedSet
59
+ # import sortedcontainers
60
+ from operator import iand
61
+ import sys
62
+ """
63
+
64
+ def check_correctness(check_program, timeout=3):
65
+ """
66
+ Evaluates the functional correctness of a completion by running the test
67
+ suite provided in the problem.
68
+
69
+ :param completion_id: an optional completion ID so we can match
70
+ the results later even if execution finishes asynchronously.
71
+ """
72
+ manager = multiprocessing.Manager()
73
+ result = manager.list()
74
+
75
+ p = multiprocessing.Process(target=unsafe_execute, args=(check_program, result, timeout))
76
+ p.start()
77
+ p.join(timeout=timeout + 1)
78
+ if p.is_alive():
79
+ p.kill()
80
+
81
+ if not result:
82
+ result.append("timed out")
83
+
84
+ return result[0] == "passed"
85
+
86
+
87
+ def unsafe_execute(check_program, result, timeout):
88
+
89
+ with create_tempdir():
90
+
91
+ # These system calls are needed when cleaning up tempdir.
92
+ import os
93
+ import shutil
94
+
95
+ rmtree = shutil.rmtree
96
+ rmdir = os.rmdir
97
+ chdir = os.chdir
98
+
99
+ # Disable functionalities that can make destructive changes to the test.
100
+ reliability_guard()
101
+
102
+ # Run program.
103
+ try:
104
+ exec_globals = {}
105
+ with swallow_io():
106
+ with time_limit(timeout):
107
+ exec(check_program, exec_globals)
108
+ result.append("passed")
109
+ except TimeoutException:
110
+ result.append("timed out")
111
+ except BaseException as e:
112
+ result.append(f"failed: {e}")
113
+
114
+ # Needed for cleaning up.
115
+ shutil.rmtree = rmtree
116
+ os.rmdir = rmdir
117
+ os.chdir = chdir
118
+
119
+
120
+ @contextlib.contextmanager
121
+ def time_limit(seconds):
122
+ def signal_handler(signum, frame):
123
+ raise TimeoutException("Timed out!")
124
+
125
+ signal.setitimer(signal.ITIMER_REAL, seconds)
126
+ signal.signal(signal.SIGALRM, signal_handler)
127
+ try:
128
+ yield
129
+ finally:
130
+ signal.setitimer(signal.ITIMER_REAL, 0)
131
+
132
+
133
+ @contextlib.contextmanager
134
+ def swallow_io():
135
+ stream = WriteOnlyStringIO()
136
+ with contextlib.redirect_stdout(stream):
137
+ with contextlib.redirect_stderr(stream):
138
+ with redirect_stdin(stream):
139
+ yield
140
+
141
+
142
+ @contextlib.contextmanager
143
+ def create_tempdir():
144
+ with tempfile.TemporaryDirectory() as dirname:
145
+ with chdir(dirname):
146
+ yield dirname
147
+
148
+
149
+ class TimeoutException(Exception):
150
+ pass
151
+
152
+
153
+ class WriteOnlyStringIO(io.StringIO):
154
+ """StringIO that throws an exception when it's read from"""
155
+
156
+ def read(self, *args, **kwargs):
157
+ raise OSError
158
+
159
+ def readline(self, *args, **kwargs):
160
+ raise OSError
161
+
162
+ def readlines(self, *args, **kwargs):
163
+ raise OSError
164
+
165
+ def readable(self, *args, **kwargs):
166
+ """Returns True if the IO object can be read."""
167
+ return False
168
+
169
+
170
+ class redirect_stdin(contextlib._RedirectStream): # type: ignore
171
+ _stream = "stdin"
172
+
173
+
174
+ @contextlib.contextmanager
175
+ def chdir(root):
176
+ if root == ".":
177
+ yield
178
+ return
179
+ cwd = os.getcwd()
180
+ os.chdir(root)
181
+ try:
182
+ yield
183
+ except BaseException as exc:
184
+ raise exc
185
+ finally:
186
+ os.chdir(cwd)
187
+
188
+
189
+ def reliability_guard(maximum_memory_bytes=None):
190
+ """
191
+ This disables various destructive functions and prevents the generated code
192
+ from interfering with the test (e.g. fork bomb, killing other processes,
193
+ removing filesystem files, etc.)
194
+
195
+ WARNING
196
+ This function is NOT a security sandbox. Untrusted code, including, model-
197
+ generated code, should not be blindly executed outside of one. See the
198
+ Codex paper for more information about OpenAI's code sandbox, and proceed
199
+ with caution.
200
+ """
201
+
202
+ if maximum_memory_bytes is not None:
203
+ import resource
204
+
205
+ resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes))
206
+ resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes))
207
+ if not platform.uname().system == "Darwin":
208
+ resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes))
209
+
210
+ faulthandler.disable()
211
+
212
+ import builtins
213
+
214
+ builtins.exit = None
215
+ builtins.quit = None
216
+
217
+ import os
218
+
219
+ os.environ["OMP_NUM_THREADS"] = "1"
220
+
221
+ os.kill = None
222
+ os.system = None
223
+ os.putenv = None
224
+ os.remove = None
225
+ os.removedirs = None
226
+ os.rmdir = None
227
+ os.fchdir = None
228
+ os.setuid = None
229
+ os.fork = None
230
+ os.forkpty = None
231
+ os.killpg = None
232
+ os.rename = None
233
+ os.renames = None
234
+ os.truncate = None
235
+ os.replace = None
236
+ os.unlink = None
237
+ os.fchmod = None
238
+ os.fchown = None
239
+ os.chmod = None
240
+ os.chown = None
241
+ os.chroot = None
242
+ os.fchdir = None
243
+ os.lchflags = None
244
+ os.lchmod = None
245
+ os.lchown = None
246
+ os.getcwd = None
247
+ os.chdir = None
248
+
249
+ import shutil
250
+
251
+ shutil.rmtree = None
252
+ shutil.move = None
253
+ shutil.chown = None
254
+
255
+ import subprocess
256
+
257
+ subprocess.Popen = None # type: ignore
258
+
259
+ __builtins__["help"] = None
260
+
261
+ import sys
262
+
263
+ sys.modules["ipdb"] = None
264
+ sys.modules["joblib"] = None
265
+ sys.modules["resource"] = None
266
+ sys.modules["psutil"] = None
267
+ sys.modules["tkinter"] = None
lcb_runner/lm_styles.py ADDED
@@ -0,0 +1,920 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from datetime import datetime
3
+ from enum import Enum
4
+
5
+
6
+ class LMStyle(Enum):
7
+ OpenAIChat = "OpenAIChat"
8
+ OpenAIReasonPreview = "OpenAIReasonPreview"
9
+ OpenAIReason = "OpenAIReason"
10
+
11
+ Claude = "Claude" # Claude 1 and Claude 2
12
+ Claude3 = "Claude3"
13
+ Claude3Thinking = "Claude3Thinking"
14
+
15
+ Gemini = "Gemini"
16
+ GeminiThinking = "GeminiThinking"
17
+ Grok = "Grok"
18
+
19
+ MistralWeb = "MistralWeb"
20
+ CohereCommand = "CohereCommand"
21
+
22
+ DataBricks = "DataBricks"
23
+ DeepSeekAPI = "DeepSeekAPI"
24
+
25
+ GenericBase = "GenericBase"
26
+
27
+ DeepSeekCodeInstruct = "DeepSeekCodeInstruct"
28
+ CodeLLaMaInstruct = "CodeLLaMaInstruct"
29
+ StarCoderInstruct = "StarCoderInstruct"
30
+ CodeQwenInstruct = "CodeQwenInstruct"
31
+ QwQ = "QwQ"
32
+ LLaMa3 = "LLaMa3"
33
+ DeepSeekR1 = "DeepSeekR1"
34
+
35
+ TogetherAI = "TogetherAI"
36
+
37
+
38
+ @dataclass
39
+ class LanguageModel:
40
+ model_name: str
41
+ model_repr: str
42
+ model_style: LMStyle
43
+ release_date: datetime | None # XXX Should we use timezone.utc?
44
+ link: str | None = None
45
+
46
+ def __hash__(self) -> int:
47
+ return hash(self.model_name)
48
+
49
+ def to_dict(self) -> dict:
50
+ return {
51
+ "model_name": self.model_name,
52
+ "model_repr": self.model_repr,
53
+ "model_style": self.model_style.value,
54
+ "release_date": int(self.release_date.timestamp() * 1000),
55
+ "link": self.link,
56
+ }
57
+
58
+
59
+ LanguageModelList: list[LanguageModel] = [
60
+ ## LLama3 Base (8B and 70B)
61
+ LanguageModel(
62
+ "meta-llama/Meta-Llama-3-8B",
63
+ "LLama3-8b-Base",
64
+ LMStyle.GenericBase,
65
+ datetime(2023, 1, 1),
66
+ link="https://huggingface.co/meta-llama/Meta-Llama-3-8B",
67
+ ),
68
+ LanguageModel(
69
+ "meta-llama/Meta-Llama-3-70B",
70
+ "LLama3-70b-Base",
71
+ LMStyle.GenericBase,
72
+ datetime(2023, 1, 1),
73
+ link="https://huggingface.co/meta-llama/Meta-Llama-3-70B",
74
+ ),
75
+ ## LLama3 Instruct (8B and 70B)
76
+ LanguageModel(
77
+ "meta-llama/Meta-Llama-3-8B-Instruct",
78
+ "LLama3-8b-Ins",
79
+ LMStyle.LLaMa3,
80
+ datetime(2023, 1, 1),
81
+ link="https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct",
82
+ ),
83
+ LanguageModel(
84
+ "meta-llama/Meta-Llama-3-70B-Instruct",
85
+ "LLama3-70b-Ins",
86
+ LMStyle.LLaMa3,
87
+ datetime(2023, 1, 1),
88
+ link="https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct",
89
+ ),
90
+ ## LLama3.1 Base (8B, 70B, 405B)
91
+ LanguageModel(
92
+ "meta-llama/Llama-3.1-8B",
93
+ "LLama3.1-8b-Base",
94
+ LMStyle.GenericBase,
95
+ datetime(2023, 1, 1),
96
+ link="https://huggingface.co/meta-llama/Llama-3.1-8B",
97
+ ),
98
+ LanguageModel(
99
+ "meta-llama/Llama-3.1-70B",
100
+ "LLama3.1-70b-Base",
101
+ LMStyle.GenericBase,
102
+ datetime(2023, 1, 1),
103
+ link="https://huggingface.co/meta-llama/Llama-3.1-70B",
104
+ ),
105
+ LanguageModel(
106
+ "meta-llama/Llama-3.1-405B-FP8",
107
+ "LLama3.1-405b-Base-FP8",
108
+ LMStyle.GenericBase,
109
+ datetime(2023, 1, 1),
110
+ link="https://huggingface.co/meta-llama/Llama-3.1-405B-FP8",
111
+ ),
112
+ ## LLama3.1 Instruct (8B, 70B, 405B)
113
+ LanguageModel(
114
+ "meta-llama/Llama-3.1-8B-Instruct",
115
+ "LLama3.1-8b-Ins",
116
+ LMStyle.LLaMa3,
117
+ datetime(2023, 1, 1),
118
+ link="https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct",
119
+ ),
120
+ LanguageModel(
121
+ "meta-llama/Llama-3.1-70B-Instruct",
122
+ "LLama3.1-70b-Ins",
123
+ LMStyle.LLaMa3,
124
+ datetime(2023, 1, 1),
125
+ link="https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct",
126
+ ),
127
+ LanguageModel(
128
+ "meta-llama/Llama-3.1-405B-Instruct-FP8",
129
+ "LLama3.1-405b-Ins-FP8",
130
+ LMStyle.LLaMa3,
131
+ datetime(2023, 1, 1),
132
+ link="https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct-FP8",
133
+ ),
134
+ ## LLama3.3 Instruct (8B, 70B)
135
+ # LanguageModel(
136
+ # "meta-llama/Llama-3.3-8B-Instruct", # Has been removed from HuggingFace by meta-llama
137
+ # "LLama3.3-8b-Ins",
138
+ # LMStyle.LLaMa3,
139
+ # datetime(2023, 1, 1),
140
+ # link="https://huggingface.co/meta-llama/Llama-3.3-8B-Instruct",
141
+ # ),
142
+ LanguageModel(
143
+ "meta-llama/Llama-3.3-70B-Instruct",
144
+ "LLama3.3-70b-Ins",
145
+ LMStyle.LLaMa3,
146
+ datetime(2023, 1, 1),
147
+ link="https://huggingface.co/meta-llama/Llama-3.3-70B-Instruct",
148
+ ),
149
+ ## Deepseek-Coder Base (33B, 6.7B, 1.3B)
150
+ LanguageModel(
151
+ "deepseek-ai/deepseek-coder-33b-base",
152
+ "DSCoder-33b-Base",
153
+ LMStyle.GenericBase,
154
+ datetime(2023, 1, 1),
155
+ link="https://huggingface.co/deepseek-ai/deepseek-coder-33b-base",
156
+ ),
157
+ LanguageModel(
158
+ "deepseek-ai/deepseek-coder-6.7b-base",
159
+ "DSCoder-6.7b-Base",
160
+ LMStyle.GenericBase,
161
+ datetime(2023, 1, 1),
162
+ link="https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base",
163
+ ),
164
+ LanguageModel(
165
+ "deepseek-ai/deepseek-coder-1.3b-base",
166
+ "DSCoder-1.3b-Base",
167
+ LMStyle.GenericBase,
168
+ datetime(2023, 1, 1),
169
+ link="https://huggingface.co/deepseek-ai/deepseek-coder-1.3b-base",
170
+ ),
171
+ ## Deepseek-Coder Instruct (33B, 6.7B, 1.3B)
172
+ LanguageModel(
173
+ "deepseek-ai/deepseek-coder-33b-instruct",
174
+ "DSCoder-33b-Ins",
175
+ LMStyle.DeepSeekCodeInstruct,
176
+ datetime(2023, 9, 1),
177
+ link="https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct",
178
+ ),
179
+ LanguageModel(
180
+ "deepseek-ai/deepseek-coder-6.7b-instruct",
181
+ "DSCoder-6.7b-Ins",
182
+ LMStyle.DeepSeekCodeInstruct,
183
+ datetime(2023, 9, 1),
184
+ link="https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-instruct",
185
+ ),
186
+ LanguageModel(
187
+ "deepseek-ai/deepseek-coder-1.3b-instruct",
188
+ "DSCoder-1.3b-Ins",
189
+ LMStyle.DeepSeekCodeInstruct,
190
+ datetime(2023, 8, 1),
191
+ link="https://huggingface.co/deepseek-ai/deepseek-coder-1.3b-instruct",
192
+ ),
193
+ ##
194
+ LanguageModel(
195
+ "01-ai/Yi-Coder-9B-Chat",
196
+ "Yi-Coder-9B-Chat",
197
+ LMStyle.DeepSeekAPI,
198
+ datetime(2023, 8, 1),
199
+ link="https://huggingface.co/01-ai/Yi-Coder-9B-Chat",
200
+ ),
201
+ ## Deepseek-Chat Latest API (currently DeepSeek-V3)
202
+ LanguageModel(
203
+ "deepseek-r1-preview",
204
+ "DeepSeek-R1-Preview",
205
+ LMStyle.DeepSeekAPI,
206
+ datetime(2024, 6, 30),
207
+ link="https://api-docs.deepseek.com/news/news1120",
208
+ ),
209
+ LanguageModel(
210
+ "deepseek-r1-lite-preview",
211
+ "DeepSeek-R1-Lite-Preview",
212
+ LMStyle.DeepSeekAPI,
213
+ datetime(2024, 6, 30),
214
+ link="https://api-docs.deepseek.com/news/news1120",
215
+ ),
216
+ LanguageModel(
217
+ "deepseek-chat",
218
+ "DeepSeek-V3",
219
+ LMStyle.DeepSeekAPI,
220
+ datetime(2024, 6, 30),
221
+ link="https://huggingface.co/deepseek-ai/DeepSeek-V3",
222
+ ),
223
+ ## Deepseek-Coder Latest API (currently DeepSeekCoder-V2.5)
224
+ LanguageModel(
225
+ "deepseek-coder",
226
+ "DeepSeekCoder-V2.5",
227
+ LMStyle.DeepSeekAPI,
228
+ datetime(2023, 8, 1),
229
+ link="https://huggingface.co/deepseek-ai/DeepSeek-V2",
230
+ ),
231
+ ## OpenAI GPT-3.5-Turbo
232
+ LanguageModel(
233
+ "gpt-3.5-turbo-0301",
234
+ "GPT-3.5-Turbo-0301",
235
+ LMStyle.OpenAIChat,
236
+ datetime(2021, 10, 1),
237
+ link="https://openai.com/blog/new-models-and-developer-products-announced-at-devday",
238
+ ),
239
+ LanguageModel(
240
+ "gpt-3.5-turbo-0125",
241
+ "GPT-3.5-Turbo-0125",
242
+ LMStyle.OpenAIChat,
243
+ datetime(2021, 10, 1),
244
+ link="https://openai.com/blog/new-embedding-models-and-api-updates#:~:text=Other%20new%20models%20and%20lower%20pricing",
245
+ ),
246
+ ## OpenAI GPT-4, GPT-4-Turbo
247
+ LanguageModel(
248
+ "gpt-4-0613",
249
+ "GPT-4-0613",
250
+ LMStyle.OpenAIChat,
251
+ datetime(2021, 10, 1),
252
+ link="https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4",
253
+ ),
254
+ LanguageModel(
255
+ "gpt-4-1106-preview",
256
+ "GPT-4-Turbo-1106",
257
+ LMStyle.OpenAIChat,
258
+ datetime(2023, 4, 30),
259
+ link="https://openai.com/blog/new-models-and-developer-products-announced-at-devday",
260
+ ),
261
+ LanguageModel(
262
+ "gpt-4-turbo-2024-04-09",
263
+ "GPT-4-Turbo-2024-04-09",
264
+ LMStyle.OpenAIChat,
265
+ datetime(2023, 4, 30),
266
+ link="https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4",
267
+ ),
268
+ ## OpenAI GPT-4O (and Mini)
269
+ LanguageModel(
270
+ "gpt-4o-2024-05-13",
271
+ "GPT-4O-2024-05-13",
272
+ LMStyle.OpenAIChat,
273
+ datetime(2023, 4, 30),
274
+ link="https://openai.com/index/spring-update",
275
+ ),
276
+ LanguageModel(
277
+ "gpt-4o-2024-08-06",
278
+ "GPT-4O-2024-08-06",
279
+ LMStyle.OpenAIChat,
280
+ datetime(2023, 4, 30),
281
+ link="https://openai.com/index/spring-update",
282
+ ),
283
+ LanguageModel(
284
+ "gpt-4o-mini-2024-07-18",
285
+ "GPT-4O-mini-2024-07-18",
286
+ LMStyle.OpenAIChat,
287
+ datetime(2023, 4, 30),
288
+ link="https://openai.com/index/spring-update",
289
+ ),
290
+ ## O1-Mini and O1-Preview
291
+ LanguageModel(
292
+ "o1-preview-2024-09-12",
293
+ "O1-Preview-2024-09-12",
294
+ LMStyle.OpenAIReasonPreview,
295
+ datetime(2023, 4, 30),
296
+ link="https://platform.openai.com/docs/guides/reasoning",
297
+ ),
298
+ LanguageModel(
299
+ "o1-mini-2024-09-12",
300
+ "O1-Mini-2024-09-12",
301
+ LMStyle.OpenAIReasonPreview,
302
+ datetime(2023, 4, 30),
303
+ link="https://platform.openai.com/docs/guides/reasoning",
304
+ ),
305
+ ## O1 (reasoning models)
306
+ LanguageModel(
307
+ "o1-2024-12-17__low",
308
+ "O1-2024-12-17 (Low)",
309
+ LMStyle.OpenAIReason,
310
+ datetime(2023, 4, 30),
311
+ link="https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
312
+ ),
313
+ LanguageModel(
314
+ "o1-2024-12-17__medium",
315
+ "O1-2024-12-17 (Med)",
316
+ LMStyle.OpenAIReason,
317
+ datetime(2023, 4, 30),
318
+ link="htthttps://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
319
+ ),
320
+ LanguageModel(
321
+ "o1-2024-12-17__high",
322
+ "O1-2024-12-17 (High)",
323
+ LMStyle.OpenAIReason,
324
+ datetime(2023, 4, 30),
325
+ link="https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
326
+ ),
327
+ ## O3-Mini
328
+ LanguageModel(
329
+ "o3-mini-2025-01-31__low",
330
+ "O3-Mini-2025-01-31 (Low)",
331
+ LMStyle.OpenAIReason,
332
+ datetime(2023, 4, 30),
333
+ link="https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
334
+ ),
335
+ LanguageModel(
336
+ "o3-mini-2025-01-31__medium",
337
+ "O3-Mini-2025-01-31 (Med)",
338
+ LMStyle.OpenAIReason,
339
+ datetime(2023, 4, 30),
340
+ link="https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
341
+ ),
342
+ LanguageModel(
343
+ "o3-mini-2025-01-31__high",
344
+ "O3-Mini-2025-01-31 (High)",
345
+ LMStyle.OpenAIReason,
346
+ datetime(2023, 4, 30),
347
+ link="https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
348
+ ),
349
+ LanguageModel(
350
+ "o4-mini__high",
351
+ "O4-Mini (High)",
352
+ LMStyle.OpenAIReason,
353
+ datetime(2023, 4, 30),
354
+ link="https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
355
+ ),
356
+ LanguageModel(
357
+ "o4-mini__medium",
358
+ "O4-Mini (Medium)",
359
+ LMStyle.OpenAIReason,
360
+ datetime(2023, 4, 30),
361
+ link="https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
362
+ ),
363
+ LanguageModel(
364
+ "o4-mini__low",
365
+ "O4-Mini (Low)",
366
+ LMStyle.OpenAIReason,
367
+ datetime(2023, 4, 30),
368
+ link="https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
369
+ ),
370
+ LanguageModel(
371
+ "o3__high",
372
+ "O3 (High)",
373
+ LMStyle.OpenAIReason,
374
+ datetime(2023, 4, 30),
375
+ link="https://platform.openai.com/docs/api-reference/chat/create#chat-create-reasoning_effort",
376
+ ),
377
+ ## Claude and Claude 2
378
+ LanguageModel(
379
+ "claude-instant-1",
380
+ "Claude-Instant-1",
381
+ LMStyle.Claude,
382
+ datetime(2022, 12, 31),
383
+ link="https://www.anthropic.com/index/introducing-claude",
384
+ ),
385
+ LanguageModel(
386
+ "claude-2",
387
+ "Claude-2",
388
+ LMStyle.Claude,
389
+ datetime(2022, 12, 31),
390
+ link="https://www.anthropic.com/index/claude-2",
391
+ ),
392
+ ## Claude 3 and Claude 3.5
393
+ LanguageModel(
394
+ "claude-3-opus-20240229",
395
+ "Claude-3-Opus",
396
+ LMStyle.Claude3,
397
+ datetime(2023, 9, 1),
398
+ link="https://www.anthropic.com/index/claude-3",
399
+ ),
400
+ LanguageModel(
401
+ "claude-3-sonnet-20240229",
402
+ "Claude-3-Sonnet",
403
+ LMStyle.Claude3,
404
+ datetime(2023, 9, 1),
405
+ link="https://www.anthropic.com/index/claude-3",
406
+ ),
407
+ LanguageModel(
408
+ "claude-3-5-sonnet-20240620",
409
+ "Claude-3.5-Sonnet-20240620",
410
+ LMStyle.Claude3,
411
+ datetime(2024, 3, 31),
412
+ link="https://www.anthropic.com/news/claude-3-5-sonnet",
413
+ ),
414
+ LanguageModel(
415
+ "claude-3-5-sonnet-20241022",
416
+ "Claude-3.5-Sonnet-20241022",
417
+ LMStyle.Claude3,
418
+ datetime(2024, 3, 31),
419
+ link="https://www.anthropic.com/news/claude-3-5-sonnet",
420
+ ),
421
+ LanguageModel(
422
+ "claude-3-7-sonnet-20250219",
423
+ "Claude-3.7-Sonnet (Thinking)",
424
+ LMStyle.Claude3Thinking,
425
+ datetime(2024, 3, 31),
426
+ link="https://www.anthropic.com/news/claude-3-7-sonnet",
427
+ ),
428
+ LanguageModel(
429
+ "claude-3-haiku-20240307",
430
+ "Claude-3-Haiku",
431
+ LMStyle.Claude3,
432
+ datetime(2023, 4, 30),
433
+ link="https://www.anthropic.com/index/claude-3",
434
+ ),
435
+ LanguageModel(
436
+ "claude-sonnet-4-20250514",
437
+ "Claude-Sonnet-4 (Thinking)",
438
+ LMStyle.Claude3Thinking,
439
+ datetime(2023, 4, 30),
440
+ link="https://www.anthropic.com/claude/sonnet",
441
+ ),
442
+ LanguageModel(
443
+ "claude-opus-4-20250514",
444
+ "Claude-Opus-4 (Thinking)",
445
+ LMStyle.Claude3Thinking,
446
+ datetime(2023, 4, 30),
447
+ link="https://www.anthropic.com/claude/sonnet",
448
+ ),
449
+ LanguageModel(
450
+ "claude-opus-4-20250514_nothink",
451
+ "Claude-Opus-4",
452
+ LMStyle.Claude3Thinking,
453
+ datetime(2023, 4, 30),
454
+ link="https://www.anthropic.com/claude/sonnet",
455
+ ),
456
+ LanguageModel(
457
+ "claude-sonnet-4-20250514_nothink",
458
+ "Claude-Sonnet-4",
459
+ LMStyle.Claude3,
460
+ datetime(2023, 4, 30),
461
+ link="https://www.anthropic.com/claude/sonnet",
462
+ ),
463
+ ## Gemini
464
+ LanguageModel(
465
+ "gemini-1.5-pro-002",
466
+ "Gemini-Pro-1.5-002",
467
+ LMStyle.Gemini,
468
+ datetime(2023, 4, 30),
469
+ link="https://blog.google/technology/ai/gemini-api-developers-cloud",
470
+ ),
471
+ LanguageModel(
472
+ "gemini-1.5-flash-002",
473
+ "Gemini-Flash-1.5-002",
474
+ LMStyle.Gemini,
475
+ datetime(2023, 4, 30),
476
+ link="https://blog.google/technology/ai/gemini-api-developers-cloud",
477
+ ),
478
+ # LanguageModel(
479
+ # "gemini-exp-1206",
480
+ # "Gemini-Exp-1206",
481
+ # LMStyle.Gemini,
482
+ # datetime(2023, 4, 30),
483
+ # link="https://ai.google.dev/gemini-api/docs/models/experimental-models",
484
+ # ),
485
+ # LanguageModel(
486
+ # "gemini-2.0-flash-thinking-exp-1219",
487
+ # "Gemini-Flash-2.0-Thinking-12-19",
488
+ # LMStyle.GeminiThinking,
489
+ # datetime(2023, 4, 30),
490
+ # link="https://ai.google.dev/gemini-api/docs/models/experimental-models",
491
+ # ),
492
+ # LanguageModel(
493
+ # "gemini-2.0-flash-thinking-exp-01-21",
494
+ # "Gemini-Flash-2.0-Thinking-01-21",
495
+ # LMStyle.GeminiThinking,
496
+ # datetime(2023, 4, 30),
497
+ # link="https://ai.google.dev/gemini-api/docs/models/experimental-models",
498
+ # ),
499
+ # LanguageModel(
500
+ # "gemini-2.0-flash-exp",
501
+ # "Gemini-Flash-2.0-Exp",
502
+ # LMStyle.Gemini,
503
+ # datetime(2023, 4, 30),
504
+ # link="https://ai.google.dev/gemini-api/docs/models/experimental-models",
505
+ # ),
506
+ LanguageModel(
507
+ "gemini-2.5-pro",
508
+ "Gemini-2.5-Pro-06-17",
509
+ LMStyle.GeminiThinking,
510
+ datetime(2023, 4, 30),
511
+ link="https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#advanced-coding",
512
+ ),
513
+ LanguageModel(
514
+ "gemini-2.5-pro-preview-06-05",
515
+ "Gemini-2.5-Pro-06-05",
516
+ LMStyle.GeminiThinking,
517
+ datetime(2023, 4, 30),
518
+ link="https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#advanced-coding",
519
+ ),
520
+ LanguageModel(
521
+ "gemini-2.5-pro-preview-05-06",
522
+ "Gemini-2.5-Pro-05-06",
523
+ LMStyle.GeminiThinking,
524
+ datetime(2023, 4, 30),
525
+ link="https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#advanced-coding",
526
+ ),
527
+ # LanguageModel(
528
+ # "gemini-2.5-pro-exp-03-25",
529
+ # "Gemini-2.5-Pro-03-25",
530
+ # LMStyle.GeminiThinking,
531
+ # datetime(2023, 4, 30),
532
+ # link="https://blog.google/technology/google-deepmind/gemini-model-thinking-updates-march-2025/#advanced-coding",
533
+ # ),
534
+ LanguageModel(
535
+ "gemini-2.5-flash-preview-04-17",
536
+ "Gemini-2.5-Flash-04-17",
537
+ LMStyle.GeminiThinking,
538
+ datetime(2023, 4, 30),
539
+ link="https://developers.googleblog.com/en/start-building-with-gemini-25-flash/",
540
+ ),
541
+ LanguageModel(
542
+ "gemini-2.5-flash-preview-05-20",
543
+ "Gemini-2.5-Flash-05-20",
544
+ LMStyle.GeminiThinking,
545
+ datetime(2023, 4, 30),
546
+ link="https://developers.googleblog.com/en/start-building-with-gemini-25-flash/",
547
+ ),
548
+ ## Generic Base Models
549
+ LanguageModel(
550
+ "bigcode/starcoder2-3b",
551
+ "StarCoder2-3b",
552
+ LMStyle.GenericBase,
553
+ datetime(2023, 1, 1),
554
+ link="https://huggingface.co/bigcode/starcoder2-7b-magicoder-instruct/tree/main",
555
+ ),
556
+ LanguageModel(
557
+ "bigcode/starcoder2-7b",
558
+ "StarCoder2-7b",
559
+ LMStyle.GenericBase,
560
+ datetime(2023, 1, 1),
561
+ link="https://huggingface.co/bigcode/starcoder2-7b-magicoder-instruct/tree/main",
562
+ ),
563
+ LanguageModel(
564
+ "bigcode/starcoder2-15b",
565
+ "StarCoder2-15b",
566
+ LMStyle.GenericBase,
567
+ datetime(2023, 1, 1),
568
+ link="https://huggingface.co/bigcode/starcoder2-7b-magicoder-instruct/tree/main",
569
+ ),
570
+ LanguageModel(
571
+ "google/codegemma-7b",
572
+ "CodeGemma-7b-Base",
573
+ LMStyle.GenericBase,
574
+ datetime(2023, 1, 1),
575
+ link="https://huggingface.co/google/codegemma-7b",
576
+ ),
577
+ LanguageModel(
578
+ "google/codegemma-2b",
579
+ "CodeGemma-2b-Base",
580
+ LMStyle.GenericBase,
581
+ datetime(2023, 1, 1),
582
+ link="https://huggingface.co/google/codegemma-2b",
583
+ ),
584
+ LanguageModel(
585
+ "google/gemma-7b",
586
+ "Gemma-7b-Base",
587
+ LMStyle.GenericBase,
588
+ datetime(2023, 1, 1),
589
+ link="https://huggingface.co/google/gemma-7b",
590
+ ),
591
+ LanguageModel(
592
+ "google/gemma-2b",
593
+ "Gemma-2b-Base",
594
+ LMStyle.GenericBase,
595
+ datetime(2023, 1, 1),
596
+ link="https://huggingface.co/google/gemma-2b",
597
+ ),
598
+ ## Mistral Web
599
+ LanguageModel(
600
+ "mistral-large-latest",
601
+ "Mistral-Large",
602
+ LMStyle.MistralWeb,
603
+ datetime(2023, 1, 1),
604
+ link="https://mistral.ai/news/mistral-large/",
605
+ ),
606
+ ## Mistral OSS
607
+ LanguageModel(
608
+ "open-mixtral-8x22b",
609
+ "Mixtral-8x22B-Ins",
610
+ LMStyle.MistralWeb,
611
+ datetime(2023, 1, 1),
612
+ link="https://mistral.ai/news/mixtral-8x22b/",
613
+ ),
614
+ LanguageModel(
615
+ "open-mixtral-8x7b",
616
+ "Mixtral-8x7B-Ins",
617
+ LMStyle.MistralWeb,
618
+ datetime(2023, 1, 1),
619
+ link="https://mistral.ai/news/mixtral-8x7b/",
620
+ ),
621
+ LanguageModel(
622
+ "codestral-latest",
623
+ "Codestral-Latest",
624
+ LMStyle.MistralWeb,
625
+ datetime(2023, 1, 1),
626
+ link="https://mistral.ai/news/codestral/",
627
+ ),
628
+ ## QwQ
629
+ LanguageModel(
630
+ "Qwen/QwQ-32B-Preview",
631
+ "QwQ-32B-Preview",
632
+ LMStyle.QwQ,
633
+ datetime(2024, 6, 30),
634
+ link="https://huggingface.co/Qwen/QwQ-32B-Preview",
635
+ ),
636
+ LanguageModel(
637
+ "Qwen/QwQ-32B",
638
+ "QwQ-32B",
639
+ LMStyle.QwQ,
640
+ datetime(2024, 6, 30),
641
+ link="https://huggingface.co/Qwen/QwQ-32B",
642
+ ),
643
+ ## Qwen 2
644
+ LanguageModel(
645
+ "Qwen/Qwen2-72B-Instruct",
646
+ "Qwen2-Ins-72B",
647
+ LMStyle.CodeQwenInstruct,
648
+ datetime(2023, 8, 30),
649
+ link="https://huggingface.co/Qwen/Qwen2-72B-Instruct",
650
+ ),
651
+ ## Qwen 2.5
652
+ LanguageModel(
653
+ "Qwen/Qwen2.5-7B-Instruct",
654
+ "Qwen2.5-Ins-7B",
655
+ LMStyle.CodeQwenInstruct,
656
+ datetime(2023, 8, 30),
657
+ link="https://huggingface.co/Qwen/Qwen2.5-7B-Instruct",
658
+ ),
659
+ LanguageModel(
660
+ "Qwen/Qwen3-4B-Base",
661
+ "Qwen3-4B-Base",
662
+ LMStyle.GenericBase,
663
+ datetime(2023, 8, 30),
664
+ link="https://huggingface.co/Qwen/Qwen3-4B-Base",
665
+ ),
666
+ LanguageModel(
667
+ "Qwen/Qwen3-1.7B-Base",
668
+ "Qwen3-1.7B-Base",
669
+ LMStyle.GenericBase,
670
+ datetime(2023, 8, 30),
671
+ link="https://huggingface.co/Qwen/Qwen3-1.7B-Base",
672
+ ),
673
+ LanguageModel(
674
+ "ftajwar/qwen3_4B_Base_MaxRL_Polaris_1000_steps",
675
+ "ftajwar/qwen3_4B_Base_MaxRL_Polaris_1000_steps",
676
+ LMStyle.GenericBase,
677
+ datetime(2023, 8, 30),
678
+ link="https://huggingface.co/ftajwar/qwen3_4B_Base_MaxRL_Polaris_1000_steps",
679
+ ),
680
+ LanguageModel(
681
+ "ftajwar/qwen3_4B_Base_GRPO_Polaris_1000_steps",
682
+ "ftajwar/qwen3_4B_Base_GRPO_Polaris_1000_steps",
683
+ LMStyle.GenericBase,
684
+ datetime(2023, 8, 30),
685
+ link="https://huggingface.co/ftajwar/qwen3_4B_Base_GRPO_Polaris_1000_steps",
686
+ ),
687
+ LanguageModel(
688
+ "/data/models/Qwen2.5-1.5B-Instruct/",
689
+ "/data/models/Qwen2.5-1.5B-Instruct/",
690
+ LMStyle.CodeQwenInstruct,
691
+ datetime(2023, 8, 30),
692
+ link="https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct",
693
+ ),
694
+ LanguageModel(
695
+ "/data/repos/agentica-rllm/scripts/train/deepcoder/checkpoints/deepcoder-1.5b/grpo-lcb-prompt/global_step_900/actor/checkpoint",
696
+ "grpo_900",
697
+ LMStyle.CodeQwenInstruct,
698
+ datetime(2023, 8, 30),
699
+ link="https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct",
700
+ ),
701
+ LanguageModel(
702
+ "/data/repos/agentica-rllm/scripts/train/deepcoder/checkpoints/deepcoder-1.5b/tailrl-lcb-prompt/global_step_900/actor/checkpoint",
703
+ "tailrl_900",
704
+ LMStyle.CodeQwenInstruct,
705
+ datetime(2023, 8, 30),
706
+ link="https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct",
707
+ ),
708
+ LanguageModel(
709
+ "Qwen/Qwen2.5-32B-Instruct",
710
+ "Qwen2.5-Ins-32B",
711
+ LMStyle.CodeQwenInstruct,
712
+ datetime(2023, 8, 30),
713
+ link="https://huggingface.co/Qwen/Qwen2.5-32B-Instruct",
714
+ ),
715
+ LanguageModel(
716
+ "Qwen/Qwen2.5-72B-Instruct",
717
+ "Qwen2.5-Ins-72B",
718
+ LMStyle.CodeQwenInstruct,
719
+ datetime(2023, 8, 30),
720
+ link="https://huggingface.co/Qwen/Qwen2.5-72B-Instruct",
721
+ ),
722
+ ## Qwen 2.5-Coder
723
+ LanguageModel(
724
+ "Qwen/Qwen2.5-Coder-7B-Instruct",
725
+ "Qwen2.5-Coder-Ins-7B",
726
+ LMStyle.CodeQwenInstruct,
727
+ datetime(2024, 6, 30),
728
+ link="https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct",
729
+ ),
730
+ LanguageModel(
731
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
732
+ "Qwen2.5-Coder-Ins-32B",
733
+ LMStyle.CodeQwenInstruct,
734
+ datetime(2024, 6, 30),
735
+ link="https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct",
736
+ ),
737
+ LanguageModel(
738
+ "Qwen/Qwen3-235B-A22B",
739
+ "Qwen3-235B-A22B",
740
+ LMStyle.CodeQwenInstruct,
741
+ datetime(2024, 6, 30),
742
+ link="https://huggingface.co/Qwen/Qwen3-235B-A22B",
743
+ ),
744
+ LanguageModel(
745
+ "azerogpt",
746
+ "AzeroGPT-64b",
747
+ LMStyle.CodeQwenInstruct,
748
+ datetime(2024, 3, 1),
749
+ "https://azerogpt.soundai.com",
750
+ ),
751
+ LanguageModel(
752
+ "Kimi-k1.6-IOI-high",
753
+ "Kimi-k1.6-IOI-high",
754
+ LMStyle.CodeQwenInstruct,
755
+ datetime(2024, 7, 30),
756
+ "https://kimi.moonshot.cn/",
757
+ ),
758
+ LanguageModel(
759
+ "Kimi-k1.6-IOI",
760
+ "Kimi-k1.6-IOI",
761
+ LMStyle.CodeQwenInstruct,
762
+ datetime(2024, 7, 30),
763
+ "https://kimi.moonshot.cn/",
764
+ ),
765
+ LanguageModel(
766
+ "Qwen/QwQ-Max-Preview",
767
+ "QwQ-Max-Preview",
768
+ LMStyle.QwQ,
769
+ datetime(2024, 6, 30),
770
+ link="https://huggingface.co/Qwen/QwQ-Max-Preview",
771
+ ),
772
+ LanguageModel(
773
+ "accounts/fireworks/models/deepseek-r1",
774
+ "DeepSeek-R1",
775
+ LMStyle.DeepSeekR1,
776
+ datetime(2024, 6, 30),
777
+ link="https://huggingface.co/deepseek-ai/DeepSeek-R1",
778
+ ),
779
+ LanguageModel(
780
+ "deepseek-reasoner",
781
+ "DeepSeek-R1-0528",
782
+ LMStyle.DeepSeekR1,
783
+ datetime(2024, 6, 30),
784
+ link="https://huggingface.co/deepseek-ai/DeepSeek-R1-0528",
785
+ ),
786
+ ## DeepSeek R1 distilled of Qwen/Llama models
787
+ LanguageModel(
788
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
789
+ "DeepSeek-R1-Distill-Qwen-1.5B",
790
+ LMStyle.DeepSeekR1,
791
+ datetime(2025, 1, 20),
792
+ link="https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
793
+ ),
794
+ LanguageModel(
795
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
796
+ "DeepSeek-R1-Distill-Qwen-7B",
797
+ LMStyle.DeepSeekR1,
798
+ datetime(2025, 1, 20),
799
+ link="https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
800
+ ),
801
+ LanguageModel(
802
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
803
+ "DeepSeek-R1-Distill-Qwen-14B",
804
+ LMStyle.DeepSeekR1,
805
+ datetime(2025, 1, 20),
806
+ link="https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
807
+ ),
808
+ LanguageModel(
809
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
810
+ "DeepSeek-R1-Distill-Qwen-32B",
811
+ LMStyle.DeepSeekR1,
812
+ datetime(2025, 1, 20),
813
+ link="https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
814
+ ),
815
+ LanguageModel(
816
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
817
+ "DeepSeek-R1-Distill-Llama-8B",
818
+ LMStyle.DeepSeekR1,
819
+ datetime(2025, 1, 20),
820
+ link="https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
821
+ ),
822
+ LanguageModel(
823
+ "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
824
+ "DeepSeek-R1-Distill-Llama-70B",
825
+ LMStyle.DeepSeekR1,
826
+ datetime(2025, 1, 20),
827
+ link="https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
828
+ ),
829
+ LanguageModel(
830
+ model_name="command-r-plus-08-2024",
831
+ model_repr="command-r-plus-08-2024",
832
+ model_style=LMStyle.CohereCommand,
833
+ release_date=datetime(2024, 8, 1),
834
+ link="https://docs.cohere.com/docs/command-r-plus",
835
+ ),
836
+ LanguageModel(
837
+ model_name="command-r-08-2024",
838
+ model_repr="command-r-08-2024",
839
+ model_style=LMStyle.CohereCommand,
840
+ release_date=datetime(2024, 8, 1),
841
+ link="https://docs.cohere.com/docs/command-r",
842
+ ),
843
+ LanguageModel(
844
+ model_name="command-r7b-12-2024",
845
+ model_repr="command-r7b-12-2024",
846
+ model_style=LMStyle.CohereCommand,
847
+ release_date=datetime(2024, 12, 13),
848
+ link="https://docs.cohere.com/docs/command-r7b",
849
+ ),
850
+ LanguageModel(
851
+ model_name="command-a-03-2025",
852
+ model_repr="command-a-03-2025",
853
+ model_style=LMStyle.CohereCommand,
854
+ release_date=datetime(2025, 3, 13),
855
+ link="https://docs.cohere.com/docs/command-a",
856
+ ),
857
+ LanguageModel(
858
+ model_name="Qwen/Qwen2.5-72B-Instruct-Turbo",
859
+ model_repr="Qwen2.5-72B-Instruct-Turbo",
860
+ model_style=LMStyle.TogetherAI,
861
+ release_date=datetime(2024, 9, 19),
862
+ link="https://docs.together.ai/docs/serverless-models",
863
+ ),
864
+ # LanguageModel(
865
+ # model_name="deepseek-ai/DeepSeek-V3",
866
+ # model_repr="DeepSeek-V3",
867
+ # model_style=LMStyle.TogetherAI,
868
+ # release_date=datetime(2024, 12, 26),
869
+ # link="https://docs.together.ai/docs/serverless-models",
870
+ # ),
871
+ LanguageModel(
872
+ "MetaStone-L1-7B",
873
+ "MetaStone-L1-7B",
874
+ LMStyle.DeepSeekR1,
875
+ datetime(2025, 3, 12),
876
+ "https://www.wenxiaobai.com/",
877
+ ),
878
+ LanguageModel(
879
+ "grok-3-mini-beta",
880
+ "Grok-3-Mini",
881
+ LMStyle.Grok,
882
+ datetime(2024, 3, 1),
883
+ "https://x.com/i/grok",
884
+ ),
885
+ LanguageModel(
886
+ "grok-3-mini-beta_high",
887
+ "Grok-3-Mini (High)",
888
+ LMStyle.Grok,
889
+ datetime(2024, 3, 1),
890
+ "https://x.com/i/grok",
891
+ ),
892
+ LanguageModel(
893
+ "nvidia/Llama-3_1-Nemotron-Ultra-253B-v1",
894
+ "Llama-3_1-Nemotron-Ultra-253B-v1",
895
+ LMStyle.DeepSeekR1,
896
+ datetime(2024, 4, 1),
897
+ "https://huggingface.co/nvidia/Llama-3_1-Nemotron-Ultra-253B-v1",
898
+ ),
899
+ LanguageModel(
900
+ "nvidia/Llama-3_1-Nemotron-Nano-8B-v1",
901
+ "Llama-3_1-Nemotron-Nano-8B-v1",
902
+ LMStyle.DeepSeekR1,
903
+ datetime(2024, 4, 1),
904
+ "https://huggingface.co/nvidia/Llama-3_1-Nemotron-Nano-8B-v1/",
905
+ ),
906
+ LanguageModel(
907
+ "agentica-org/DeepCoder-14B-Preview",
908
+ "DeepCoder-14B-Preview",
909
+ LMStyle.DeepSeekR1,
910
+ datetime(2024, 4, 1),
911
+ "https://huggingface.co/agentica-org/DeepCoder-14B-Preview",
912
+ ),
913
+ ]
914
+
915
+ LanguageModelStore: dict[str, LanguageModel] = {
916
+ lm.model_name: lm for lm in LanguageModelList
917
+ }
918
+
919
+ if __name__ == "__main__":
920
+ print(list(LanguageModelStore.keys()))
lcb_runner/prompts/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from lcb_runner.prompts.code_execution import format_prompt_execution, format_prompt_execution_cot
2
+ from lcb_runner.prompts.code_generation import format_prompt_generation
3
+ from lcb_runner.prompts.test_output_prediction import format_prompt_test_output
4
+ from lcb_runner.prompts.self_repair import format_prompt_self_repair
lcb_runner/prompts/code_execution.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from lcb_runner.lm_styles import LMStyle
4
+ from lcb_runner.benchmarks import CodeExecutionProblem
5
+
6
+
7
+ def make_cot_output_prompt(s):
8
+ code, input = s
9
+ return f"""You are given a Python function and an assertion containing an input to the function. Complete the assertion with a literal (no unsimplified expressions, no function calls) containing the output when executing the provided code on the given input, even if the function is incorrect or incomplete. Do NOT output any extra information. Execute the program step by step before arriving at an answer, and provide the full assertion with the correct output in [ANSWER] and [/ANSWER] tags, following the examples.
10
+
11
+ [PYTHON]
12
+ def performOperation(s):
13
+ s = s + s
14
+ return "b" + s + "a"
15
+ assert performOperation(s = "hi") == ??
16
+ [/PYTHON]
17
+ [THOUGHT]
18
+ Let's execute the code step by step:
19
+
20
+ 1. The function performOperation is defined, which takes a single argument s.
21
+ 2. The function is called with the argument "hi", so within the function, s is initially "hi".
22
+ 3. Inside the function, s is concatenated with itself, so s becomes "hihi".
23
+ 4. The function then returns a new string that starts with "b", followed by the value of s (which is now "hihi"), and ends with "a".
24
+ 5. The return value of the function is therefore "bhihia".
25
+ [/THOUGHT]
26
+ [ANSWER]
27
+ assert performOperation(s = "hi") == "bhihia"
28
+ [/ANSWER]
29
+
30
+ [PYTHON]
31
+ {code}
32
+ assert {input} == ??
33
+ [/PYTHON]
34
+ [THOUGHT]
35
+ """
36
+
37
+
38
+ def make_direct_output_prompt(s):
39
+ code, input = s
40
+ return f"""You are given a Python function and an assertion containing an input to the function. Complete the assertion with a literal (no unsimplified expressions, no function calls) containing the output when executing the provided code on the given input, even if the function is incorrect or incomplete. Do NOT output any extra information. Provide the full assertion with the correct output in [ANSWER] and [/ANSWER] tags, following the examples.
41
+
42
+ [PYTHON]
43
+ def repeatNumber(number : int) -> int:
44
+ return number
45
+ assert repeatNumber(number = 17) == ??
46
+ [/PYTHON]
47
+ [ANSWER]
48
+ assert repeatNumber(number = 17) == 17
49
+ [/ANSWER]
50
+
51
+ [PYTHON]
52
+ def addCharacterA(string : str) -> str:
53
+ return string + "a"
54
+ assert addCharacterA(string = "x9j") == ??
55
+ [/PYTHON]
56
+ [ANSWER]
57
+ assert addCharacterA(string = "x9j") == "x9ja"
58
+ [/ANSWER]
59
+
60
+ [PYTHON]
61
+ {code}
62
+ assert {input} == ??
63
+ [/PYTHON]
64
+ [ANSWER]
65
+ """
66
+
67
+
68
+ def format_prompt_execution(question, LanguageModelStyle):
69
+ return format_prompt_execution_base(question, LanguageModelStyle, False)
70
+
71
+
72
+ def format_prompt_execution_cot(question, LanguageModelStyle):
73
+ return format_prompt_execution_base(question, LanguageModelStyle, True)
74
+
75
+
76
+ def format_prompt_execution_base(
77
+ question: CodeExecutionProblem, LanguageModelStyle: LMStyle, cot: bool
78
+ ) -> str:
79
+ code = question.code
80
+ input = question.input
81
+ system_message = "You are an expert at Python programming, code execution, test case generation, and fuzzing."
82
+ if cot:
83
+ prompt = make_cot_output_prompt((code, input))
84
+ else:
85
+ prompt = make_direct_output_prompt((code, input))
86
+
87
+ if LanguageModelStyle == LMStyle.OpenAIChat:
88
+ chat_messages = [
89
+ {
90
+ "role": "system",
91
+ "content": system_message,
92
+ },
93
+ ]
94
+ chat_messages += [
95
+ {"role": "user", "content": prompt},
96
+ ]
97
+ return chat_messages
98
+ if LanguageModelStyle == LMStyle.LLaMa3:
99
+ chat_messages = [
100
+ {
101
+ "role": "system",
102
+ "content": system_message,
103
+ },
104
+ ]
105
+ chat_messages += [
106
+ {"role": "user", "content": prompt},
107
+ ]
108
+ from transformers import AutoTokenizer
109
+
110
+ tokenizer = AutoTokenizer.from_pretrained(
111
+ "meta-llama/Meta-Llama-3-8B-Instruct", padding_side="left", use_fast=False
112
+ )
113
+ return tokenizer.apply_chat_template(
114
+ chat_messages,
115
+ tokenize=False,
116
+ add_generation_prompt=True,
117
+ truncation=False,
118
+ padding=False,
119
+ )
120
+
121
+ elif LanguageModelStyle == LMStyle.Claude:
122
+ return prompt
123
+ elif LanguageModelStyle == LMStyle.Claude3:
124
+ prompt = [
125
+ {
126
+ "role": "user",
127
+ "content": prompt,
128
+ }
129
+ ]
130
+ return system_message, prompt
131
+ elif LanguageModelStyle == LMStyle.Gemini:
132
+ return prompt
133
+ elif LanguageModelStyle == LMStyle.StarCoderInstruct:
134
+ return prompt
135
+ elif LanguageModelStyle == LMStyle.DeepSeekCodeInstruct:
136
+ return prompt
137
+ elif LanguageModelStyle == LMStyle.CodeLLaMaInstruct:
138
+ return prompt
139
+ elif LanguageModelStyle == LMStyle.MagiCoder:
140
+ return prompt
141
+ elif LanguageModelStyle == LMStyle.WizardCoder:
142
+ return prompt
143
+ elif LanguageModelStyle == LMStyle.Phind:
144
+ return prompt
145
+ elif LanguageModelStyle == LMStyle.OC:
146
+ return prompt
147
+ elif LanguageModelStyle == LMStyle.MistralWeb:
148
+ chat_messages = [
149
+ {
150
+ "role": "system",
151
+ "content": system_message,
152
+ },
153
+ {"role": "user", "content": prompt},
154
+ ]
155
+ return chat_messages
156
+ elif LanguageModelStyle == LMStyle.DracarysLlama:
157
+ chat_messages = [
158
+ {
159
+ "role": "system",
160
+ "content": system_message,
161
+ },
162
+ ]
163
+ chat_messages += [
164
+ {"role": "user", "content": prompt},
165
+ ]
166
+ from transformers import AutoTokenizer
167
+
168
+ tokenizer = AutoTokenizer.from_pretrained(
169
+ "abacusai/Dracarys-Llama-3.1-70B-Instruct", padding_side="right", use_fast=False
170
+ )
171
+ return tokenizer.apply_chat_template(
172
+ chat_messages,
173
+ tokenize=False,
174
+ add_generation_prompt=True,
175
+ truncation=False,
176
+ padding=False,
177
+ )
178
+ elif LanguageModelStyle == LMStyle.DracarysQwen:
179
+ return prompt
180
+ else:
181
+ raise NotImplementedError(
182
+ f"LanguageModelStyle {LanguageModelStyle} not implemented"
183
+ )
lcb_runner/prompts/code_generation.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ try:
4
+ from anthropic import HUMAN_PROMPT, AI_PROMPT
5
+ except ImportError:
6
+ HUMAN_PROMPT = None
7
+ AI_PROMPT = None
8
+
9
+ from lcb_runner.lm_styles import LMStyle
10
+ from lcb_runner.benchmarks.code_generation import CodeGenerationProblem
11
+
12
+
13
+ class PromptConstants:
14
+ SYSTEM_MESSAGE_GENERIC = f"You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests."
15
+
16
+ SYSTEM_MESSAGE_GEMINI = f"You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. Do NOT use system calls like `exit` in the generated program. Ensure that the first code block contains the solution."
17
+
18
+ SYSTEM_MESSAGE_GEMINITHINK = f"You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests."
19
+
20
+ SYSTEM_MESSAGE_DEEPSEEK = f"You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you answer questions related to computer science."
21
+
22
+ SYSTEM_MESSAGE_CODEQWEN = (
23
+ f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user"
24
+ )
25
+
26
+ SYSTEM_MESSAGE_QWEN_QWQ = f"<|im_start|>system\nYou are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.<|im_end|>\n<|im_start|>user"
27
+
28
+ SYSTEM_MESSAGE_DEEPSEEK_R1 = (
29
+ "<|begin▁of▁sentence|>A conversation between User and Assistant. "
30
+ "The user asks a question, and the Assistant solves it. "
31
+ "The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. "
32
+ "The reasoning process and answer are enclosed within <think> </think> and <answer> </answer> tags, respectively, i.e., <think> reasoning process here </think> <answer> answer here </answer>.<|User|>"
33
+ )
34
+
35
+ FORMATTING_MESSAGE_WITH_STARTER_CODE = "You will use the following starter code to write the solution to the problem and enclose your code within delimiters."
36
+
37
+ FORMATTING_WITHOUT_STARTER_CODE = "Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows. Ensure that when the python program runs, it reads the inputs, runs the algorithm and writes output to STDOUT."
38
+
39
+
40
+ def get_generic_question_template_answer(question: CodeGenerationProblem):
41
+ prompt = f"### Question:\n{question.question_content}\n\n"
42
+ if question.starter_code:
43
+ prompt += (
44
+ f"### Format: {PromptConstants.FORMATTING_MESSAGE_WITH_STARTER_CODE}\n"
45
+ )
46
+ prompt += f"```python\n{question.starter_code}\n```\n\n"
47
+ else:
48
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
49
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
50
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
51
+ return prompt
52
+
53
+
54
+ def get_oaireason_question_template_answer(question: CodeGenerationProblem):
55
+ prompt = f"### Question:\n{question.question_content}\n\n"
56
+ if question.starter_code:
57
+ prompt += (
58
+ f"### Format: {PromptConstants.FORMATTING_MESSAGE_WITH_STARTER_CODE}\n"
59
+ )
60
+ prompt += f"```python\n{question.starter_code}\n```\n\n"
61
+ else:
62
+ prompt += f"### Format: Implement a function called `main()` which orchastrates the solution by reading inputs from stdin and writing the answer to stdout. Feel free to use additional functions as necessary. Next do NOT forget to call `main` function at the end of the program otherwise you will not be awarded any points.\n"
63
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
64
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
65
+ return prompt
66
+
67
+
68
+ def get_geminithinking_question_template_answer(question: CodeGenerationProblem):
69
+ prompt = f"### Question:\n{question.question_content}\n\n"
70
+ if question.starter_code:
71
+ prompt += (
72
+ f"### Format: {PromptConstants.FORMATTING_MESSAGE_WITH_STARTER_CODE}\n"
73
+ )
74
+ prompt += f"```python\n{question.starter_code}\n```\n\n"
75
+ else:
76
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
77
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
78
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
79
+ return prompt
80
+
81
+
82
+ def get_deepseekcode_question_template_answer(question: CodeGenerationProblem):
83
+ prompt = f"### Instruction: You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.\n\n"
84
+ prompt += f"Question:\n{question.question_content}\n\n"
85
+ if question.starter_code:
86
+ prompt += (
87
+ f"### Instruction: {PromptConstants.FORMATTING_MESSAGE_WITH_STARTER_CODE}\n"
88
+ )
89
+ prompt += f"```python\n{question.starter_code}\n```\n\n"
90
+ else:
91
+ prompt += (
92
+ f"### Instruction: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
93
+ )
94
+ prompt += f"```python\n# YOUR CODE HERE\n```\n\n"
95
+ prompt += f"### Response:\n\n"
96
+ return prompt
97
+
98
+
99
+ def get_qwen_question_template_answer(question: CodeGenerationProblem):
100
+ from transformers import AutoTokenizer
101
+
102
+ tokenizer = AutoTokenizer.from_pretrained(
103
+ "/abacus/models/Qwen1.5-72B-Chat/", padding_side="left", use_fast=False
104
+ )
105
+ prompt = "You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.\n\n"
106
+ prompt += f"Question:\n{question.question_content}\n\n"
107
+ if question.starter_code:
108
+ prompt += f"{PromptConstants.FORMATTING_MESSAGE_WITH_STARTER_CODE}\n"
109
+ prompt += f"```python\n{question.starter_code}\n```\n\n"
110
+ else:
111
+ prompt += f"{PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n\n"
112
+ prompt += f"```python\n# YOUR CODE HERE\n```\n\n"
113
+
114
+ messages = [
115
+ {"role": "system", "content": PromptConstants.SYSTEM_MESSAGE_GENERIC},
116
+ {"role": "user", "content": prompt},
117
+ ]
118
+
119
+ prompt = tokenizer.apply_chat_template(
120
+ messages,
121
+ tokenize=False,
122
+ add_generation_prompt=True,
123
+ truncation=False,
124
+ padding=False,
125
+ )
126
+ return prompt
127
+
128
+
129
+ def get_codeqwen_question_template_answer(question: CodeGenerationProblem):
130
+ prompt = "You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.\n\n"
131
+ prompt += f"Question: {question.question_content}\n\n"
132
+ if question.starter_code:
133
+ prompt += f"{PromptConstants.FORMATTING_MESSAGE_WITH_STARTER_CODE}\n"
134
+ prompt += f"```python\n{question.starter_code}\n```\n\n<|im_end|>\n"
135
+ else:
136
+ prompt += f"{PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
137
+ prompt += f"```python\n# YOUR CODE HERE\n```\n\n<|im_end|>\n"
138
+ prompt += f"<|im_start|>assistant\n"
139
+ return prompt
140
+
141
+
142
+ def get_qwen_qwq_question_template_answer(question: CodeGenerationProblem):
143
+ prompt = "You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests.\n\n"
144
+ prompt += f"Question: {question.question_content}\n\n"
145
+ if question.starter_code:
146
+ prompt += f"{PromptConstants.FORMATTING_MESSAGE_WITH_STARTER_CODE}\n"
147
+ prompt += f"```python\n{question.starter_code}\n```\n\n<|im_end|>\n"
148
+ else:
149
+ prompt += f"{PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
150
+ prompt += f"```python\n# YOUR CODE HERE\n```\n\n<|im_end|>\n"
151
+ prompt += f"<|im_start|>assistant\n"
152
+ return prompt
153
+
154
+
155
+ def get_deepseek_r1_question_template_answer(question: CodeGenerationProblem):
156
+ # Following modifications from: https://github.com/fanqiwan/FuseAI/blob/main/FuseO1-Preview/code_evaluation/lcb_runner_cq/prompts/code_generation.py
157
+ prompt = "You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests.\n\n"
158
+ prompt += f"Question: {question.question_content}\n\n"
159
+ if question.starter_code:
160
+ prompt += f"{PromptConstants.FORMATTING_MESSAGE_WITH_STARTER_CODE}\n"
161
+ prompt += f"```python\n{question.starter_code}\n```\n\n"
162
+ else:
163
+ prompt += f"{PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
164
+ prompt += f"```python\n# YOUR CODE HERE\n```\n\n"
165
+ prompt += f"<|Assistant|>"
166
+ return prompt
167
+
168
+
169
+ with open("lcb_runner/prompts/few_shot_examples/generation/func.json") as f:
170
+ func = json.load(f)
171
+
172
+ with open("lcb_runner/prompts/few_shot_examples/generation/stdin.json") as f:
173
+ stdin = json.load(f)
174
+
175
+
176
+ def get_base_model_question_template_answer(question: CodeGenerationProblem):
177
+ if question.starter_code:
178
+ examples_json = func
179
+ else:
180
+ examples_json = stdin
181
+
182
+ def get_example_prompt(example):
183
+ prompt = ""
184
+ prompt += "### Question\n"
185
+ prompt += example["question"]
186
+ prompt += "\n\n"
187
+ if question.starter_code:
188
+ prompt += "### Starter Code\n"
189
+ prompt += example["sample_code"]
190
+ prompt += "\n\n"
191
+ prompt += "### Answer\n\n"
192
+ prompt += example["answer"]
193
+ if example["answer"]:
194
+ prompt += "\n\n"
195
+ return prompt
196
+
197
+ prompt = ""
198
+ prompt += get_example_prompt(examples_json[0])
199
+ prompt += get_example_prompt(
200
+ {
201
+ "question": question.question_content,
202
+ "sample_code": question.starter_code,
203
+ "answer": "",
204
+ }
205
+ )
206
+ return prompt
207
+
208
+
209
+ def format_prompt_generation(
210
+ question: CodeGenerationProblem, LanguageModelStyle: LMStyle
211
+ ) -> str:
212
+ if LanguageModelStyle in [
213
+ LMStyle.OpenAIChat,
214
+ LMStyle.DeepSeekAPI,
215
+ LMStyle.TogetherAI,
216
+ LMStyle.CohereCommand,
217
+ ]:
218
+ chat_messages = [
219
+ {
220
+ "role": "system",
221
+ "content": PromptConstants.SYSTEM_MESSAGE_GENERIC,
222
+ },
223
+ ]
224
+ chat_messages += [
225
+ {
226
+ "role": "user",
227
+ "content": get_generic_question_template_answer(question),
228
+ },
229
+ ]
230
+ return chat_messages
231
+ elif LanguageModelStyle in [LMStyle.OpenAIReasonPreview, LMStyle.Grok]:
232
+ chat_messages = [
233
+ {
234
+ "role": "user",
235
+ "content": PromptConstants.SYSTEM_MESSAGE_GENERIC
236
+ + "\n\n"
237
+ + get_generic_question_template_answer(question),
238
+ },
239
+ ]
240
+ return chat_messages
241
+ elif LanguageModelStyle == LMStyle.OpenAIReason:
242
+ chat_messages = [
243
+ {
244
+ "role": "user",
245
+ "content": PromptConstants.SYSTEM_MESSAGE_GENERIC
246
+ + "\n\n"
247
+ + get_oaireason_question_template_answer(question),
248
+ },
249
+ ]
250
+ return chat_messages
251
+
252
+ if LanguageModelStyle == LMStyle.LLaMa3:
253
+ chat_messages = [
254
+ {
255
+ "role": "system",
256
+ "content": PromptConstants.SYSTEM_MESSAGE_GENERIC,
257
+ },
258
+ ]
259
+ chat_messages += [
260
+ {
261
+ "role": "user",
262
+ "content": get_generic_question_template_answer(question),
263
+ },
264
+ ]
265
+ from transformers import AutoTokenizer
266
+
267
+ tokenizer = AutoTokenizer.from_pretrained(
268
+ "meta-llama/Meta-Llama-3-8B-Instruct", padding_side="left", use_fast=False
269
+ )
270
+ return tokenizer.apply_chat_template(
271
+ chat_messages,
272
+ tokenize=False,
273
+ add_generation_prompt=True,
274
+ truncation=False,
275
+ padding=False,
276
+ )
277
+
278
+ if LanguageModelStyle == LMStyle.Claude:
279
+ prompt = f"{HUMAN_PROMPT}\n"
280
+ prompt += f"{PromptConstants.SYSTEM_MESSAGE_GENERIC}\n\n"
281
+ prompt += f"{get_generic_question_template_answer(question).rstrip()}\n"
282
+ prompt += f"{AI_PROMPT}"
283
+ return prompt
284
+
285
+ if LanguageModelStyle in [LMStyle.Claude3, LMStyle.Claude3Thinking]:
286
+ system = PromptConstants.SYSTEM_MESSAGE_GENERIC
287
+ prompt = [
288
+ {
289
+ "role": "user",
290
+ "content": get_generic_question_template_answer(question).rstrip(),
291
+ }
292
+ ]
293
+ return system, prompt
294
+
295
+ if LanguageModelStyle == LMStyle.Gemini:
296
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_GEMINI}\n"
297
+ prompt += f"{get_generic_question_template_answer(question)}"
298
+ return prompt
299
+
300
+ if LanguageModelStyle == LMStyle.GeminiThinking:
301
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_GEMINITHINK}\n"
302
+ prompt += f"{get_geminithinking_question_template_answer(question)}"
303
+ return prompt
304
+
305
+ if LanguageModelStyle == LMStyle.MistralWeb:
306
+ chat_messages = [
307
+ {
308
+ "role": "system",
309
+ "content": PromptConstants.SYSTEM_MESSAGE_GENERIC,
310
+ },
311
+ {
312
+ "role": "user",
313
+ "content": get_generic_question_template_answer(question),
314
+ },
315
+ ]
316
+ return chat_messages
317
+
318
+ if LanguageModelStyle == LMStyle.DeepSeekCodeInstruct:
319
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_DEEPSEEK}\n\n"
320
+ prompt += f"{get_deepseekcode_question_template_answer(question)}"
321
+ return prompt
322
+
323
+ if LanguageModelStyle == LMStyle.CodeQwenInstruct:
324
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_CODEQWEN}\n\n"
325
+ prompt += f"{get_codeqwen_question_template_answer(question)}"
326
+ return prompt
327
+
328
+ if LanguageModelStyle == LMStyle.QwQ:
329
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_QWEN_QWQ}\n\n"
330
+ prompt += f"{get_qwen_qwq_question_template_answer(question)}"
331
+ return prompt
332
+
333
+ if LanguageModelStyle == LMStyle.DeepSeekR1:
334
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_DEEPSEEK_R1}"
335
+ prompt += f"{get_deepseek_r1_question_template_answer(question)}"
336
+ return prompt
337
+
338
+ if LanguageModelStyle == LMStyle.GenericBase:
339
+ prompt = get_base_model_question_template_answer(question)
340
+ return prompt
341
+
342
+ raise NotImplementedError(
343
+ f"LanguageModelStyle {LanguageModelStyle} not implemented"
344
+ )
345
+
346
+
347
+ def test():
348
+ import pathlib
349
+
350
+ base_dir = "logs/example_prompts/generation"
351
+ pathlib.Path(base_dir).mkdir(parents=True, exist_ok=True)
352
+
353
+ for lmstyle in LMStyle:
354
+ generation_problem = CodeGenerationProblem(
355
+ "title",
356
+ "question-content",
357
+ "leetcode",
358
+ "question_id",
359
+ "contest_id",
360
+ "contest_date",
361
+ "",
362
+ "easy",
363
+ "[]",
364
+ "[]",
365
+ "{}",
366
+ )
367
+ prompt1 = format_prompt_generation(generation_problem, lmstyle)
368
+ with open(f"{base_dir}/{lmstyle}_1.txt", "w") as f:
369
+ try:
370
+ f.write(prompt1)
371
+ except TypeError:
372
+ f.write(json.dumps(prompt1))
373
+
374
+ generation_problem.starter_code = "starter code"
375
+ prompt2 = format_prompt_generation(generation_problem, lmstyle)
376
+ with open(f"{base_dir}/{lmstyle}_2.txt", "w") as f:
377
+ try:
378
+ f.write(prompt2)
379
+ except TypeError:
380
+ f.write(json.dumps(prompt2))
381
+
382
+
383
+ if __name__ == "__main__":
384
+ test()
lcb_runner/prompts/few_shot_examples/generation/func.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "question": "You are given a 0-indexed array of positive integers nums. Find the number of triplets (i, j, k) that meet the following conditions:\n\n0 <= i < j < k < nums.length\nnums[i], nums[j], and nums[k] are pairwise distinct.\n\t\nIn other words, nums[i] != nums[j], nums[i] != nums[k], and nums[j] != nums[k].\n\n\n\nReturn the number of triplets that meet the conditions.\n \nExample 1:\n\nInput: nums = [4,4,2,4,3]\nOutput: 3\nExplanation: The following triplets meet the conditions:\n- (0, 2, 4) because 4 != 2 != 3\n- (1, 2, 4) because 4 != 2 != 3\n- (2, 3, 4) because 2 != 4 != 3\nSince there are 3 triplets, we return 3.\nNote that (2, 0, 4) is not a valid triplet because 2 > 0.\n\nExample 2:\n\nInput: nums = [1,1,1,1,1]\nOutput: 0\nExplanation: No triplets meet the conditions so we return 0.\n\n \nConstraints:\n\n3 <= nums.length <= 100\n1 <= nums[i] <= 1000\n\n",
4
+ "sample_code": "class Solution:\n def unequalTriplets(self, nums: List[int]) -> int:\n ",
5
+ "answer": "class Solution:\n def unequalTriplets(self, a: List[int]) -> int:\n ans = 0\n n = len(a)\n for i in range(n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n ans += len({a[i], a[j], a[k]}) == 3\n return ans"
6
+ },
7
+ {
8
+ "question": "You are given two strings s and t consisting of only lowercase English letters.\nReturn the minimum number of characters that need to be appended to the end of s so that t becomes a subsequence of s.\nA subsequence is a string that can be derived from another string by deleting some or no characters without changing the order of the remaining characters.\n \nExample 1:\n\nInput: s = \"coaching\", t = \"coding\"\nOutput: 4\nExplanation: Append the characters \"ding\" to the end of s so that s = \"coachingding\".\nNow, t is a subsequence of s (\"coachingding\").\nIt can be shown that appending any 3 characters to the end of s will never make t a subsequence.\n\nExample 2:\n\nInput: s = \"abcde\", t = \"a\"\nOutput: 0\nExplanation: t is already a subsequence of s (\"abcde\").\n\nExample 3:\n\nInput: s = \"z\", t = \"abcde\"\nOutput: 5\nExplanation: Append the characters \"abcde\" to the end of s so that s = \"zabcde\".\nNow, t is a subsequence of s (\"zabcde\").\nIt can be shown that appending any 4 characters to the end of s will never make t a subsequence.\n\n \nConstraints:\n\n1 <= s.length, t.length <= 10^5\ns and t consist only of lowercase English letters.\n\n",
9
+ "sample_code": "class Solution:\n def appendCharacters(self, s: str, t: str) -> int:\n ",
10
+ "answer": "class Solution:\n def appendCharacters(self, s: str, t: str) -> int:\n i = 0\n for char in s:\n if i < len(t) and char == t[i]:\n i += 1\n return len(t) - i"
11
+ }
12
+ ]
lcb_runner/prompts/few_shot_examples/generation/stdin.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "question": "You have $n$ gifts and you want to give all of them to children. Of course, you don't want to offend anyone, so all gifts should be equal between each other. The $i$-th gift consists of $a_i$ candies and $b_i$ oranges.\n\nDuring one move, you can choose some gift $1 \\le i \\le n$ and do one of the following operations:\n\n eat exactly one candy from this gift (decrease $a_i$ by one); eat exactly one orange from this gift (decrease $b_i$ by one); eat exactly one candy and exactly one orange from this gift (decrease both $a_i$ and $b_i$ by one). \n\nOf course, you can not eat a candy or orange if it's not present in the gift (so neither $a_i$ nor $b_i$ can become less than zero).\n\nAs said above, all gifts should be equal. This means that after some sequence of moves the following two conditions should be satisfied: $a_1 = a_2 = \\dots = a_n$ and $b_1 = b_2 = \\dots = b_n$ (and $a_i$ equals $b_i$ is not necessary).\n\nYour task is to find the minimum number of moves required to equalize all the given gifts.\n\nYou have to answer $t$ independent test cases.\n\n\n-----Input-----\n\nThe first line of the input contains one integer $t$ ($1 \\le t \\le 1000$) \u2014 the number of test cases. Then $t$ test cases follow.\n\nThe first line of the test case contains one integer $n$ ($1 \\le n \\le 50$) \u2014 the number of gifts. The second line of the test case contains $n$ integers $a_1, a_2, \\dots, a_n$ ($1 \\le a_i \\le 10^9$), where $a_i$ is the number of candies in the $i$-th gift. The third line of the test case contains $n$ integers $b_1, b_2, \\dots, b_n$ ($1 \\le b_i \\le 10^9$), where $b_i$ is the number of oranges in the $i$-th gift.\n\n\n-----Output-----\n\nFor each test case, print one integer: the minimum number of moves required to equalize all the given gifts.\n\n\n-----Example-----\nInput\n5\n3\n3 5 6\n3 2 3\n5\n1 2 3 4 5\n5 4 3 2 1\n3\n1 1 1\n2 2 2\n6\n1 1000000000 1000000000 1000000000 1000000000 1000000000\n1 1 1 1 1 1\n3\n10 12 8\n7 5 4\n\nOutput\n6\n16\n0\n4999999995\n7\n\n\n\n-----Note-----\n\nIn the first test case of the example, we can perform the following sequence of moves:\n\n choose the first gift and eat one orange from it, so $a = [3, 5, 6]$ and $b = [2, 2, 3]$; choose the second gift and eat one candy from it, so $a = [3, 4, 6]$ and $b = [2, 2, 3]$; choose the second gift and eat one candy from it, so $a = [3, 3, 6]$ and $b = [2, 2, 3]$; choose the third gift and eat one candy and one orange from it, so $a = [3, 3, 5]$ and $b = [2, 2, 2]$; choose the third gift and eat one candy from it, so $a = [3, 3, 4]$ and $b = [2, 2, 2]$; choose the third gift and eat one candy from it, so $a = [3, 3, 3]$ and $b = [2, 2, 2]$.",
4
+ "answer": "def minimum_moves(t, test_cases):\n for _ in range(t):\n n = test_cases[_][0]\n candies = test_cases[_][1]\n oranges = test_cases[_][2]\n min_candies = min(candies)\n min_oranges = min(oranges)\n ans = 0\n for i in range(n):\n ans += max(candies[i] - min_candies, oranges[i] - min_oranges)\n print(ans)\n\n\ndef main():\n t = int(input())\n test_cases = []\n for _ in range(t):\n n = int(input())\n candies = list(map(int, input().split()))\n oranges = list(map(int, input().split()))\n test_cases.append((n, candies, oranges))\n minimum_moves(t, test_cases)\n\n\nmain()\n"
5
+ },
6
+ {
7
+ "question": "Let's call a string a phone number if it has length 11 and fits the pattern \"8xxxxxxxxxx\", where each \"x\" is replaced by a digit.\n\nFor example, \"80123456789\" and \"80000000000\" are phone numbers, while \"8012345678\" and \"79000000000\" are not.\n\nYou have n cards with digits, and you want to use them to make as many phone numbers as possible. Each card must be used in at most one phone number, and you don't have to use all cards. The phone numbers do not necessarily have to be distinct.\n\nInput\n\nThe first line contains an integer n \u2014 the number of cards with digits that you have (1 \u2264 n \u2264 100).\n\nThe second line contains a string of n digits (characters \"0\", \"1\", ..., \"9\") s_1, s_2, \u2026, s_n. The string will not contain any other characters, such as leading or trailing spaces.\n\nOutput\n\nIf at least one phone number can be made from these cards, output the maximum number of phone numbers that can be made. Otherwise, output 0.\n\nExamples\n\nInput\n\n11\n00000000008\n\n\nOutput\n\n1\n\n\nInput\n\n22\n0011223344556677889988\n\n\nOutput\n\n2\n\n\nInput\n\n11\n31415926535\n\n\nOutput\n\n0\n\nNote\n\nIn the first example, one phone number, \"8000000000\", can be made from these cards.\n\nIn the second example, you can make two phone numbers from the cards, for example, \"80123456789\" and \"80123456789\".\n\nIn the third example you can't make any phone number from the given cards.",
8
+ "answer": "def count_phone_numbers(num_cards, card_digits):\n count_eights = card_digits.count(\"8\")\n max_phone_numbers = num_cards // 11\n max_possible = min(count_eights, max_phone_numbers)\n return max_possible\n\ndef main():\n num_cards = int(input())\n card_digits = input().strip()\n max_possible = count_phone_numbers(num_cards, card_digits)\n print(max_possible)\n\nmain()"
9
+ }
10
+ ]
lcb_runner/prompts/self_repair.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from anthropic import HUMAN_PROMPT, AI_PROMPT
4
+
5
+ from lcb_runner.lm_styles import LMStyle
6
+
7
+
8
+ class PromptConstants:
9
+ SYSTEM_MESSAGE_GENERIC = f"You are a helpful programming assistant and an expert Python programmer. You are helping a user write a program to solve a problem. The user has written some code, but it has some errors and is not passing the tests. You will help the user by first giving a concise (at most 2-3 sentences) textual explanation of what is wrong with the code. After you have pointed out what is wrong with the code, you will then generate a fixed version of the program. You must put the entired fixed program within code delimiters only for once."
10
+
11
+ SYSTEM_MESSAGE_DEEPSEEK = f"You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you are helping a user correct a error program for code competition. The user has written some code, but it has some errors and is not passing the tests. You will help the user by first giving a concise (at most 2-3 sentences) textual explanation of what is wrong with the code. After you have pointed out what is wrong with the code, you will then generate a fixed version of the entire executable program. You must put the entire fixed executable program within code delimiters."
12
+
13
+ SYSTEM_MESSAGE_MAGIC = f"You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n@@ Instruction\n"
14
+
15
+ SYSTEM_MESSAGE_WIZARD = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
16
+
17
+ SYSTEM_MESSAGE_PHIND = f"""You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program. You must put the entired fixed program within code delimiters only for once., for example:
18
+ ```python
19
+ # YOUR CODE HERE
20
+ ```"""
21
+
22
+ FORMATTING_REPEAT = f"First reason about the code providing a textual explanation of what is wrong with the code and then generate a fixed of the program enclosed code delimiters."
23
+
24
+ FORMATTING_MESSAGE = "You will use the following starter code to write the solution to the problem and enclose your code within delimiters."
25
+
26
+ FORMATTING_WITHOUT_STARTER_CODE = "Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows."
27
+
28
+
29
+ # def truncate_io(io):
30
+ # if len(str(io)) > 200:
31
+ # io = str(io)[:200] + "...."
32
+ # return io
33
+
34
+
35
+ def get_check_prompt(question: str, result, metadata):
36
+ ## assumes i/o examples are already truncated!
37
+ ## less pressure on storing 10 MB json because on a single large input-output pair
38
+ # result_by_test_case = result
39
+ # assert len(metadata) == 1, f"metadata = {metadata}"
40
+ # metadata = metadata[0]
41
+ metadata = json.loads(metadata)
42
+ if "error_code" not in metadata:
43
+ return ""
44
+ if metadata["error_code"] == -1:
45
+ # time limit exceeded
46
+ message = f"The above code is incorrect and got the following compilation error.\n{metadata['error']}"
47
+ elif metadata["error_code"] == -2:
48
+ # wrong answer
49
+ message = f"The above code is incorrect and got a wrong answer.\nInput: {metadata['inputs']}\nGenerated Output: {metadata['output']}\nExpected: {metadata['expected']}"
50
+ elif metadata["error_code"] == -3:
51
+ # time limit exceeded
52
+ message = f"The above code is incorrect and got time limit exceeded.\n{metadata['error']}\nInput: {metadata['inputs']}\nExpected: {metadata['expected']}"
53
+ pass
54
+ elif metadata["error_code"] == -4:
55
+ # runtime error
56
+ message = f"The above code is incorrect and got a runtime error.\nInput: {metadata['inputs']}\nExpected: {metadata['expected']}\n{metadata['error']}"
57
+ else:
58
+ raise NotImplementedError(
59
+ f"metadata['error_code'] = {metadata['error_code']} not implemented || {metadata=}"
60
+ )
61
+ return message
62
+
63
+
64
+ def get_generic_question_template_answer(question: str, code, result, metadata):
65
+ prompt = f"### Question:\n{question}\n\n"
66
+ prompt += f"### Answer:\n```python\n{code}\n```\n\n"
67
+ prompt += get_check_prompt(question, result, metadata) + "\n"
68
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
69
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
70
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
71
+ return prompt
72
+
73
+
74
+ def get_cllama_question_template_answer(question: str, code, result, metadata):
75
+ prompt = f"### Question\n{question}\n\n"
76
+ prompt += f"### Answer\n```python\n{code}\n```\n\n"
77
+ prompt += get_check_prompt(question, result, metadata)
78
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
79
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
80
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
81
+ return prompt
82
+
83
+
84
+ def get_deepseekcode_question_template_answer(question: str, code, result, metadata):
85
+ prompt = f"### Instruction: You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.\n\n"
86
+ prompt += f"Question:\n{question}\n\n"
87
+ prompt += f"### Response:\n```python\n{code}\n```\n\n"
88
+ prompt += get_check_prompt(question, result, metadata)
89
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
90
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
91
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
92
+ return prompt
93
+
94
+
95
+ def get_magicoder_question_template_answer(question: str, code, result, metadata):
96
+ prompt = f"You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.\n\n"
97
+ prompt += f"Question:\n{question}\n\n"
98
+ prompt += f"@@ Response \n```python\n{code}\n```\n\n"
99
+ prompt += get_check_prompt(question, result, metadata)
100
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
101
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
102
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
103
+ return prompt
104
+
105
+
106
+ def get_mixtral_question_template_answer(question: str, code, result, metadata):
107
+ prompt = f"Question:\n"
108
+ prompt += f"{question}\n\n"
109
+ prompt += f"Answer:\n\n"
110
+ prompt += f"```python\n\n{code}\n``\n\n"
111
+ prompt += get_check_prompt(question, result, metadata)
112
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
113
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
114
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
115
+ return prompt
116
+
117
+
118
+ def get_wizard_question_template_answer(question: str, code, result, metadata):
119
+ prompt = f"""### Instruction: You are a helpful programming assistant and an expert Python programmer. You are helping a user write a program to solve a problem. The user has written some code, but it has some errors and is not passing the tests. You will help the user by first giving a concise (at most 2-3 sentences) textual explanation of what is wrong with the code. After you have pointed out what is wrong with the code, you will then generate a fixed version of the program. You must put the entired fixed program within code delimiters only for once., for example:
120
+ ```python
121
+ # YOUR CODE HERE
122
+ ```
123
+ """
124
+ prompt += f"{question}\n\n"
125
+ prompt += f"### Response:```python\n\n{code}\n```\n\n"
126
+ prompt += get_check_prompt(question, result, metadata)
127
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
128
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
129
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
130
+ return prompt
131
+
132
+
133
+ def get_phind_question_template_answer(question: str, code, result, metadata):
134
+ prompt = f"{question}\n\n"
135
+ prompt += f"```python\n{code}\n``` \n\n"
136
+ prompt += get_check_prompt(question, result, metadata)
137
+ prompt += f"\n\n### Assistant"
138
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
139
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
140
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
141
+ return prompt
142
+
143
+ def get_qwen_question_template_answer(question: str, code, result, metadata):
144
+ from transformers import AutoTokenizer
145
+
146
+ tokenizer = AutoTokenizer.from_pretrained(
147
+ "abacusai/Dracarys-72B-Instruct", padding_side="left", use_fast=False
148
+ )
149
+ prompt = f"""### Instruction: You are a helpful programming assistant and an expert Python programmer. You are helping a user write a program to solve a problem. The user has written some code, but it has some errors and is not passing the tests. You will help the user by first giving a concise (at most 2-3 sentences) textual explanation of what is wrong with the code. After you have pointed out what is wrong with the code, you will then generate a fixed version of the program. You must put the entired fixed program within code delimiters only for once., for example:
150
+ ```python
151
+ # YOUR CODE HERE
152
+ ```\n\n
153
+ """
154
+ prompt += f"Question:\n{question}\n\n"
155
+ prompt += f"```python\n{code}\n``` \n\n"
156
+ prompt += get_check_prompt(question, result, metadata)
157
+ prompt += f"\n\n### Assistant"
158
+ prompt += f"### Format: {PromptConstants.FORMATTING_WITHOUT_STARTER_CODE}\n"
159
+ prompt += "```python\n# YOUR CODE HERE\n```\n\n"
160
+ prompt += f"### Answer: (use the provided format with backticks)\n\n"
161
+
162
+ messages = [
163
+ {"role": "user", "content": prompt},
164
+ ]
165
+
166
+ prompt = tokenizer.apply_chat_template(
167
+ messages,
168
+ tokenize=False,
169
+ add_generation_prompt=True,
170
+ truncation=False,
171
+ padding=False,
172
+ )
173
+ return prompt
174
+
175
+ def format_prompt_self_repair(
176
+ question: str, LanguageModelStyle: LMStyle, code, result, metadata
177
+ ) -> str:
178
+ if result:
179
+ # The code is accepted, no need to change anything.
180
+ return ""
181
+ if LanguageModelStyle == LMStyle.OpenAIChat:
182
+ chat_messages = [
183
+ {"role": "system", "content": PromptConstants.SYSTEM_MESSAGE_GENERIC},
184
+ ]
185
+ chat_messages += [
186
+ {
187
+ "role": "user",
188
+ "content": get_generic_question_template_answer(
189
+ question, code, result, metadata
190
+ )
191
+ + "\n\n"
192
+ + PromptConstants.FORMATTING_REPEAT,
193
+ },
194
+ ]
195
+ return chat_messages
196
+ if LanguageModelStyle == LMStyle.LLaMa3:
197
+ chat_messages = [
198
+ {"role": "system", "content": PromptConstants.SYSTEM_MESSAGE_GENERIC},
199
+ ]
200
+ chat_messages += [
201
+ {
202
+ "role": "user",
203
+ "content": get_generic_question_template_answer(
204
+ question, code, result, metadata
205
+ ),
206
+ },
207
+ ]
208
+
209
+ from transformers import AutoTokenizer
210
+
211
+ tokenizer = AutoTokenizer.from_pretrained(
212
+ "meta-llama/Meta-Llama-3-8B-Instruct", padding_side="left", use_fast=False
213
+ )
214
+ return tokenizer.apply_chat_template(
215
+ chat_messages,
216
+ tokenize=False,
217
+ add_generation_prompt=True,
218
+ truncation=False,
219
+ padding=False,
220
+ )
221
+ elif LanguageModelStyle == LMStyle.Claude:
222
+ prompt = f"{HUMAN_PROMPT}\n{PromptConstants.SYSTEM_MESSAGE_GENERIC}\n\n{get_generic_question_template_answer(question, code, result, metadata).rstrip()}\n{AI_PROMPT}"
223
+ return prompt
224
+ elif LanguageModelStyle == LMStyle.Claude3:
225
+ system = PromptConstants.SYSTEM_MESSAGE_GENERIC
226
+ prompt = [
227
+ {
228
+ "role": "user",
229
+ "content": get_generic_question_template_answer(
230
+ question, code, result, metadata
231
+ ).rstrip(),
232
+ }
233
+ ]
234
+ return system, prompt
235
+ elif LanguageModelStyle == LMStyle.MistralWeb:
236
+ chat_messages = [
237
+ {
238
+ "role": "system",
239
+ "content": PromptConstants.SYSTEM_MESSAGE_GENERIC,
240
+ },
241
+ ]
242
+ chat_messages += [
243
+ {
244
+ "role": "user",
245
+ "content": get_generic_question_template_answer(question, code, result, metadata),
246
+ },
247
+ ]
248
+ return chat_messages
249
+ elif LanguageModelStyle == LMStyle.Gemini:
250
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_GENERIC}\n{get_generic_question_template_answer(question, code, result,metadata)}"
251
+ return prompt
252
+ elif LanguageModelStyle == LMStyle.StarCoderInstruct:
253
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_GENERIC}\n{get_generic_question_template_answer(question, code, result,metadata)}"
254
+ return prompt
255
+
256
+ elif LanguageModelStyle == LMStyle.DeepSeekCodeInstruct:
257
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_DEEPSEEK}\n\n{get_deepseekcode_question_template_answer(question, code, result,metadata)}"
258
+ return prompt
259
+ elif LanguageModelStyle == LMStyle.CodeLLaMaInstruct:
260
+ prompt = f"[INST] <<SYS>>\n{PromptConstants.SYSTEM_MESSAGE_GENERIC}\n<</SYS>>\n\n{get_cllama_question_template_answer(question, code, result,metadata)}\n[/INST]"
261
+ return prompt
262
+ elif LanguageModelStyle == LMStyle.MagiCoder:
263
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_MAGIC}\n{get_magicoder_question_template_answer(question, code, result,metadata)}"
264
+ return prompt
265
+ elif LanguageModelStyle == LMStyle.WizardCoder:
266
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_WIZARD}\n\n{get_wizard_question_template_answer(question, code, result,metadata)}"
267
+ return prompt
268
+ elif LanguageModelStyle == LMStyle.Phind:
269
+ prompt = f"### System Prompt\n\n{PromptConstants.SYSTEM_MESSAGE_PHIND}\n\n### User Message\n\n{get_phind_question_template_answer(question, code, result,metadata)}"
270
+ return prompt
271
+ elif LanguageModelStyle == LMStyle.DracarysQwen:
272
+ prompt = f"{get_qwen_question_template_answer(question, code, result,metadata)}"
273
+ return prompt
274
+ elif LanguageModelStyle == LMStyle.DracarysLlama:
275
+ chat_messages = [
276
+ {"role": "system", "content": PromptConstants.SYSTEM_MESSAGE_GENERIC},
277
+ ]
278
+ chat_messages += [
279
+ {
280
+ "role": "user",
281
+ "content": get_generic_question_template_answer(
282
+ question, code, result, metadata
283
+ ),
284
+ },
285
+ ]
286
+
287
+ from transformers import AutoTokenizer
288
+
289
+ tokenizer = AutoTokenizer.from_pretrained(
290
+ "abacusai/Dracarys-Llama-3.1-70B-Instruct", padding_side="right", use_fast=False
291
+ )
292
+ return tokenizer.apply_chat_template(
293
+ chat_messages,
294
+ tokenize=False,
295
+ add_generation_prompt=True,
296
+ truncation=False,
297
+ padding=False,
298
+ )
299
+ if LanguageModelStyle == LMStyle.Eurusx:
300
+ prompt = "[INST] Write Python code to solve the task:\n"
301
+ prompt += f"{get_wizard_question_template_answer(question, code, result,metadata)}"
302
+ prompt += "[/INST]"
303
+ return prompt
304
+ else:
305
+ raise NotImplementedError(
306
+ f"LanguageModelStyle {LanguageModelStyle} not implemented"
307
+ )
308
+
309
+
310
+ def extract_code(model_output: str, lmstyle: LMStyle):
311
+ outputlines = model_output.split("\n")
312
+ if lmstyle == LMStyle.CodeLLaMa:
313
+ indexlines = [i for i, line in enumerate(outputlines) if "PYTHON]" in line]
314
+ else:
315
+ indexlines = [i for i, line in enumerate(outputlines) if "```" in line]
316
+ if len(indexlines) < 2:
317
+ return ""
318
+ return "\n".join(outputlines[indexlines[0] + 1 : indexlines[1]])
319
+
320
+
321
+ def test():
322
+ def write_str_or_json(prompt):
323
+ if isinstance(prompt, str):
324
+ fp.write(prompt)
325
+ else:
326
+ fp.write(json.dumps(prompt))
327
+ return
328
+
329
+ for lm_style in [LMStyle.OpenAIChat]:
330
+ with open(
331
+ "output/GPT-3.5-Turbo-0125/Scenario.codegeneration_10_0.2_eval_all.json"
332
+ ) as f:
333
+ check_metadata = json.load(f)[0]
334
+ checked_base_question_cotent = check_metadata["question_content"]
335
+ checked_base_codes = check_metadata["code_list"][0]
336
+ checked_base_results = check_metadata["graded_list"][0]
337
+ checked_base_metadata = check_metadata["metadata"][0]
338
+ leetcode_prompt = format_prompt_self_repair(
339
+ checked_base_question_cotent,
340
+ lm_style,
341
+ checked_base_codes,
342
+ checked_base_results,
343
+ checked_base_metadata,
344
+ )
345
+
346
+ with open(f"/tmp/leetcode_{lm_style}.txt", "w") as fp:
347
+ write_str_or_json(leetcode_prompt)
348
+ return
349
+
350
+
351
+ if __name__ == "__main__":
352
+ test()
lcb_runner/prompts/test_output_prediction.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from anthropic import HUMAN_PROMPT, AI_PROMPT
4
+
5
+ from lcb_runner.lm_styles import LMStyle
6
+ from lcb_runner.benchmarks import TestOutputPredictionProblem
7
+
8
+
9
+ class PromptConstants:
10
+ SYSTEM_MESSAGE_CHAT_GENERIC = f"You are a helpful programming assistant and an expert Python programmer.\
11
+ You are helping a user to write a test case to help to check the correctness of the function.\
12
+ The user has written a input for the testcase.\
13
+ You will calculate the output of the testcase and\
14
+ write the whole assertion statement in the markdown code block with the correct output."
15
+
16
+ SYSTEM_MESSAGE_COMPLETION_GENERIC = f"You are a helpful programming assistant and an expert Python programmer.\
17
+ You are helping a user to write a test case to help to check the correctness of the function."
18
+
19
+ SYSTEM_MESSAGE_INST_CLLAMA = f"You are a helpful programming assistant and an expert Python programmer.\
20
+ You are helping a user to write a test case to help to check the correctness of the function.\
21
+ The user has written a input for the testcase.\
22
+ You will calculate the output of the testcase and \
23
+ write out the complete assertion statement between [PYTHON] and [/PYTHON] tags."
24
+
25
+ SYSTEM_MESSAGE_WIZARD = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
26
+
27
+ SYSTEM_MESSAGE_PHIND = f"""You are an expert Python programmer. You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program. You must put the entired fixed program within code delimiters only for once., for example:
28
+ ```python
29
+ # YOUR CODE HERE
30
+ ```"""
31
+
32
+ FORMATTING_MESSAGE = "You will use the following starter code to write the solution to the problem and enclose your code within delimiters."
33
+
34
+ FORMATTING_WITHOUT_STARTER_MESSAGE = "Read the inputs from stdin solve the problem and write the answer to stdout (do not directly test on the sample inputs). Enclose your code within delimiters as follows."
35
+
36
+
37
+ def truncate_io(io):
38
+ if len(str(io)) > 1000:
39
+ io = str(io)[:1000] + "...."
40
+ print(io)
41
+ return io
42
+
43
+
44
+ def format_testcase_func_name_input(function_name, testcase):
45
+ """
46
+ use the form of "assert func_name(input) == "
47
+ """
48
+ # TODO should there be a space after the == ?
49
+ input_str = ", ".join(testcase.split("\n"))
50
+ return f"assert {function_name}({input_str}) == # TODO"
51
+
52
+
53
+ def parse_function_name_from_starter_code(starter_code):
54
+ """
55
+ starter_code : str
56
+ """
57
+ import ast
58
+
59
+ tree = ast.parse(starter_code)
60
+ fn = None
61
+ for node in ast.walk(tree):
62
+ if isinstance(node, ast.FunctionDef):
63
+ assert fn is None
64
+ fn = node.name
65
+ return fn
66
+
67
+
68
+ def get_generic_question_template_test_completion(
69
+ question: TestOutputPredictionProblem, testcase_input: str
70
+ ):
71
+ prompt = f"Problem:\n{question.question_content}"
72
+ prompt += f"Function:\n```\n{question.starter_code}\n```\n"
73
+
74
+ # parse function name from starter_code
75
+ func_name = parse_function_name_from_starter_code(question.starter_code)
76
+ prompt += "Please complete the following test case:\n\n"
77
+ prompt += (
78
+ f"```\n{format_testcase_func_name_input(func_name, testcase_input)}\n```\n"
79
+ )
80
+
81
+ return prompt
82
+
83
+
84
+ def get_cllama_question_template_answer(
85
+ question: TestOutputPredictionProblem, testcase_input: str
86
+ ):
87
+ prompt = f"### Question\n"
88
+ prompt += get_generic_question_template_test_completion(question, testcase_input)
89
+ prompt += f"### Answer\n"
90
+ return prompt
91
+
92
+
93
+ def get_deepseekcode_question_template_answer(
94
+ question: TestOutputPredictionProblem, testcase_input: str
95
+ ):
96
+ prompt = f"### Instruction: {PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n\n"
97
+ prompt += get_generic_question_template_test_completion(question, testcase_input)
98
+ prompt += f"### Response:\n\n"
99
+ return prompt
100
+
101
+
102
+ def get_magicoder_question_template_answer(
103
+ question: TestOutputPredictionProblem, testcase_input: str
104
+ ):
105
+ # prompt = f"You will be given a question (problem specification) and will generate a correct Python program that matches the specification and passes all tests. You will NOT return anything except for the program.\n\n"
106
+ prompt = f"Question:\n"
107
+ prompt += get_generic_question_template_test_completion(question, testcase_input)
108
+ prompt += f"@@ Response \n"
109
+ return prompt
110
+
111
+
112
+ def get_mixtral_question_template_answer(
113
+ question: TestOutputPredictionProblem, testcase_input: str
114
+ ):
115
+ prompt = get_generic_question_template_test_completion(question, testcase_input)
116
+ return prompt
117
+
118
+
119
+ def get_wizard_question_template_answer(
120
+ question: TestOutputPredictionProblem, testcase_input: str
121
+ ):
122
+ prompt = f"""### Instruction: {PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"""
123
+ prompt += get_generic_question_template_test_completion(question, testcase_input)
124
+ prompt += f"### Response:\n"
125
+ return prompt
126
+
127
+
128
+ def get_phind_question_template_answer(
129
+ question: TestOutputPredictionProblem, testcase_input: str
130
+ ):
131
+ prompt = get_generic_question_template_test_completion(question, testcase_input)
132
+ prompt += f"\n\n### Assistant"
133
+ return prompt
134
+
135
+ def get_qwen_question_template_answer(question: TestOutputPredictionProblem, testcase_input: str):
136
+ from transformers import AutoTokenizer
137
+
138
+ tokenizer = AutoTokenizer.from_pretrained(
139
+ "abacusai/Dracarys-72B-Instruct", padding_side="left", use_fast=False
140
+ )
141
+
142
+ prompt = f"""### Instruction: {PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"""
143
+ prompt += get_generic_question_template_test_completion(question, testcase_input)
144
+ prompt += f"### Response:\n"
145
+
146
+ messages = [
147
+ {"role": "user", "content": prompt},
148
+ ]
149
+
150
+ prompt = tokenizer.apply_chat_template(
151
+ messages,
152
+ tokenize=False,
153
+ add_generation_prompt=True,
154
+ truncation=False,
155
+ padding=False,
156
+ )
157
+ return prompt
158
+
159
+ def format_prompt_test_output(
160
+ question: TestOutputPredictionProblem, LanguageModelStyle: LMStyle
161
+ ) -> str:
162
+ testcase_input = question.test[0].input
163
+ if LanguageModelStyle == LMStyle.OpenAIChat:
164
+ chat_messages = [
165
+ {
166
+ "role": "system",
167
+ "content": PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC,
168
+ },
169
+ ]
170
+ chat_messages += [
171
+ {
172
+ "role": "user",
173
+ "content": get_generic_question_template_test_completion(
174
+ question, testcase_input
175
+ ),
176
+ },
177
+ ]
178
+ return chat_messages
179
+ if LanguageModelStyle == LMStyle.LLaMa3:
180
+ chat_messages = [
181
+ {
182
+ "role": "system",
183
+ "content": PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC,
184
+ },
185
+ ]
186
+ chat_messages += [
187
+ {
188
+ "role": "user",
189
+ "content": get_generic_question_template_test_completion(
190
+ question, testcase_input
191
+ ),
192
+ },
193
+ ]
194
+ from transformers import AutoTokenizer
195
+
196
+ tokenizer = AutoTokenizer.from_pretrained(
197
+ "meta-llama/Meta-Llama-3-8B-Instruct", padding_side="left", use_fast=False
198
+ )
199
+ return tokenizer.apply_chat_template(
200
+ chat_messages,
201
+ tokenize=False,
202
+ add_generation_prompt=True,
203
+ truncation=False,
204
+ padding=False,
205
+ )
206
+ elif LanguageModelStyle == LMStyle.Claude:
207
+ prompt = f"{HUMAN_PROMPT}\n{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n\n"
208
+ prompt += f"{get_generic_question_template_test_completion(question, testcase_input).rstrip()}\n{AI_PROMPT}"
209
+ return prompt
210
+ elif LanguageModelStyle == LMStyle.Claude3:
211
+ system = PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC
212
+ prompt = [
213
+ {
214
+ "role": "user",
215
+ "content": get_generic_question_template_test_completion(
216
+ question, testcase_input
217
+ ).rstrip(),
218
+ }
219
+ ]
220
+ return system, prompt
221
+ elif LanguageModelStyle == LMStyle.Gemini:
222
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"
223
+ prompt += (
224
+ f"{get_generic_question_template_test_completion(question, testcase_input)}"
225
+ )
226
+ return prompt
227
+
228
+ elif LanguageModelStyle == LMStyle.StarCoderInstruct:
229
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"
230
+ prompt += (
231
+ f"{get_generic_question_template_test_completion(question, testcase_input)}"
232
+ )
233
+ return prompt
234
+
235
+ elif LanguageModelStyle == LMStyle.DeepSeekCodeInstruct:
236
+ prompt = (
237
+ f"{get_deepseekcode_question_template_answer(question, testcase_input)}"
238
+ )
239
+ return prompt
240
+ elif LanguageModelStyle == LMStyle.CodeLLaMaInstruct:
241
+ prompt = f"[INST] <<SYS>>\n{PromptConstants.SYSTEM_MESSAGE_INST_CLLAMA}\n<</SYS>>\n\n"
242
+ prompt += (
243
+ f"{get_cllama_question_template_answer(question, testcase_input)}\n[/INST]"
244
+ )
245
+ return prompt
246
+ elif LanguageModelStyle == LMStyle.MagiCoder:
247
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"
248
+ prompt += f"{get_magicoder_question_template_answer(question, testcase_input)}"
249
+ return prompt
250
+ elif LanguageModelStyle == LMStyle.WizardCoder:
251
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_WIZARD}\n\n{get_wizard_question_template_answer(question, testcase_input)}"
252
+ return prompt
253
+ elif LanguageModelStyle == LMStyle.Phind:
254
+ prompt = f"### System Prompt\n\n{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n\n### User Message\n\n{get_phind_question_template_answer(question, testcase_input)}"
255
+ return prompt
256
+ elif LanguageModelStyle == LMStyle.OC:
257
+ prompt = f"{PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC}\n"
258
+ prompt += (
259
+ f"{get_generic_question_template_test_completion(question, testcase_input)}"
260
+ )
261
+ return prompt
262
+ elif LanguageModelStyle == LMStyle.MistralWeb:
263
+ chat_messages = [
264
+ {
265
+ "role": "system",
266
+ "content": PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC,
267
+ },
268
+ {
269
+ "role": "user",
270
+ "content": get_generic_question_template_test_completion(
271
+ question, testcase_input
272
+ ),
273
+ },
274
+ ]
275
+ return chat_messages
276
+ elif (
277
+ LanguageModelStyle == LMStyle.DracarysQwen
278
+ ):
279
+ prompt = f"{get_qwen_question_template_answer(question, testcase_input)}"
280
+ return prompt
281
+ elif LanguageModelStyle == LMStyle.DracarysLlama:
282
+ chat_messages = [
283
+ {
284
+ "role": "system",
285
+ "content": PromptConstants.SYSTEM_MESSAGE_CHAT_GENERIC,
286
+ },
287
+ ]
288
+ chat_messages += [
289
+ {
290
+ "role": "user",
291
+ "content": get_generic_question_template_test_completion(
292
+ question, testcase_input
293
+ ),
294
+ },
295
+ ]
296
+ from transformers import AutoTokenizer
297
+
298
+ tokenizer = AutoTokenizer.from_pretrained(
299
+ "abacusai/Dracarys-Llama-3.1-70B-Instruct", padding_side="right", use_fast=False
300
+ )
301
+ return tokenizer.apply_chat_template(
302
+ chat_messages,
303
+ tokenize=False,
304
+ add_generation_prompt=True,
305
+ truncation=False,
306
+ padding=False,
307
+ )
308
+ else:
309
+ raise NotImplementedError(
310
+ f"LanguageModelStyle {LanguageModelStyle} not implemented"
311
+ )
lcb_runner/runner/base_runner.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from abc import ABC, abstractmethod
4
+
5
+ from tqdm import tqdm
6
+
7
+ from lcb_runner.lm_styles import LanguageModel
8
+ from lcb_runner.utils.path_utils import get_cache_path
9
+ from lcb_runner.utils.multiprocess import run_tasks_in_parallel
10
+ from lcb_runner.runner.scenario_router import Scenario
11
+
12
+
13
+ class BaseRunner(ABC):
14
+ def __init__(self, args, model: LanguageModel):
15
+ self.args = args
16
+ self.model = model
17
+ self.client_kwargs: dict[str | str] = {}
18
+
19
+ if self.args.use_cache:
20
+ self.cache_path = get_cache_path(model.model_repr, args)
21
+ if os.path.exists(self.cache_path):
22
+ with open(self.cache_path) as f:
23
+ self.cache: dict = json.load(f)
24
+ else:
25
+ self.cache = {}
26
+ else:
27
+ self.cache_path = None
28
+ self.cache = None
29
+
30
+ def save_cache(self):
31
+ if self.args.use_cache:
32
+ with open(self.cache_path, "w") as f:
33
+ json.dump(self.cache, f, indent=4)
34
+
35
+ # @abstractmethod
36
+ def _run_single(self, prompt: str | list[dict[str, str]]) -> list[str]:
37
+ pass
38
+
39
+ @staticmethod
40
+ def run_single(combined_args) -> list[str]:
41
+ """
42
+ Run the model for a single prompt and return the output
43
+ Static method to be used in multiprocessing
44
+ Calls the _run_single method with the combined arguments
45
+ """
46
+ prompt: str | list[dict[str, str]]
47
+ cache: dict[str, str]
48
+ call_method: callable
49
+ prompt, cache, args, call_method = combined_args
50
+
51
+ if isinstance(prompt, list):
52
+ prompt_cache = json.dumps(prompt)
53
+ elif isinstance(prompt, tuple):
54
+ prompt_cache = prompt[0] + json.dumps(prompt[1])
55
+ else:
56
+ prompt_cache = prompt
57
+
58
+ if cache is not None and prompt_cache in cache:
59
+ if len(cache[prompt_cache]) == args.n:
60
+ return cache[prompt_cache]
61
+
62
+ result = call_method(prompt)
63
+ assert len(result) == args.n
64
+
65
+ return result
66
+
67
+ def run_batch(self, prompts: list[str | list[dict[str, str]]]) -> list[list[str]]:
68
+ outputs = []
69
+ arguments = [
70
+ (
71
+ prompt,
72
+ self.cache, ## pass the cache as argument for cache check
73
+ self.args, ## pass the args as argument for cache check
74
+ self._run_single, ## pass the _run_single method as argument because of multiprocessing
75
+ )
76
+ for prompt in prompts
77
+ ]
78
+ if self.args.multiprocess > 1:
79
+ parallel_outputs = run_tasks_in_parallel(
80
+ self.run_single,
81
+ arguments,
82
+ self.args.multiprocess,
83
+ use_progress_bar=True,
84
+ )
85
+ for output in parallel_outputs:
86
+ if output.is_success():
87
+ outputs.append(output.result)
88
+ else:
89
+ print("Failed to run the model for some prompts")
90
+ print(output.status)
91
+ print(output.exception_tb)
92
+ outputs.extend([""] * self.args.n)
93
+ else:
94
+ outputs = [self.run_single(argument) for argument in tqdm(arguments)]
95
+
96
+ if self.args.use_cache:
97
+ for prompt, output in zip(prompts, outputs):
98
+ if isinstance(prompt, list):
99
+ prompt_cache = json.dumps(prompt)
100
+ elif isinstance(prompt, tuple):
101
+ prompt_cache = prompt[0] + json.dumps(prompt[1])
102
+ else:
103
+ prompt_cache = prompt
104
+ self.cache[prompt_cache] = output ## save the output to cache
105
+
106
+ return outputs
107
+
108
+ def prompts_to_outputs(
109
+ self, prompts: list[str | list[dict[str, str]]]
110
+ ) -> list[list[str]]:
111
+ if self.args.use_cache:
112
+ outputs = []
113
+ batch_size = self.args.cache_batch_size
114
+ for i in range(0, len(prompts), batch_size):
115
+ batch = prompts[i : i + batch_size]
116
+ batch_outputs = self.run_batch(batch)
117
+ outputs.extend(batch_outputs)
118
+ self.save_cache()
119
+ else:
120
+ outputs = self.run_batch(prompts)
121
+ return outputs
122
+
123
+ def run_main_repair(self, benchmark: list, format_prompt: callable) -> list[list[str]]:
124
+ assert self.args.n == 1
125
+ with open(
126
+ f"output/{self.model.model_repr}/{Scenario.codegeneration}_{self.args.codegen_n}_{self.args.temperature}_eval_all.json"
127
+ ) as f:
128
+ check_metadata_list = json.load(f)
129
+
130
+ outputs = [
131
+ [None for _ in range(self.args.codegen_n)]
132
+ for _ in range(len(benchmark))
133
+ ]
134
+ prompts = []
135
+ prompt_index_to_question_idx = {}
136
+ prompt_index_to_code_idx = {}
137
+ count = 0
138
+
139
+ for problem_idx, problem in enumerate(benchmark):
140
+ for check_metadata_idx, check_metadata in enumerate(check_metadata_list):
141
+ if problem.question_id == check_metadata['question_id']:
142
+ count += 1
143
+ question_content = check_metadata["question_content"]
144
+ code_list = check_metadata["code_list"]
145
+ output_list = check_metadata["output_list"]
146
+ graded_list = check_metadata["graded_list"]
147
+ metadata = check_metadata["metadata"]
148
+ for code_idx in range(len(code_list)):
149
+ prompt = format_prompt(
150
+ question_content,
151
+ self.model.model_style,
152
+ code_list[code_idx],
153
+ graded_list[code_idx],
154
+ metadata[code_idx],
155
+ )
156
+ if prompt == "":
157
+ outputs[problem_idx][code_idx] = output_list[code_idx]
158
+ continue
159
+ prompts.append(prompt)
160
+ prompt_index_to_question_idx[len(prompts) - 1] = problem_idx
161
+ prompt_index_to_code_idx[len(prompts) - 1] = code_idx
162
+
163
+ assert len(benchmark)==count, f"{len(benchmark)=}!={count=}"
164
+
165
+ prompt_outputs = self.prompts_to_outputs(prompts)
166
+ for prompt_idx, output in enumerate(prompt_outputs):
167
+ question_idx = prompt_index_to_question_idx[prompt_idx]
168
+ code_idx = prompt_index_to_code_idx[prompt_idx]
169
+ outputs[question_idx][code_idx] = output
170
+
171
+ return outputs
172
+
173
+ def run_main(self, benchmark: list, format_prompt: callable) -> list[list[str]]:
174
+ if self.args.scenario == Scenario.selfrepair:
175
+ return self.run_main_repair(benchmark, format_prompt)
176
+
177
+ prompts = [
178
+ format_prompt(problem, self.model.model_style) for problem in benchmark
179
+ ]
180
+ outputs = self.prompts_to_outputs(prompts)
181
+ return outputs
lcb_runner/runner/claude3_runner.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ from anthropic import Anthropic
6
+ except ImportError as e:
7
+ pass
8
+
9
+ from lcb_runner.runner.base_runner import BaseRunner
10
+
11
+
12
+ class Claude3Runner(BaseRunner):
13
+ client = Anthropic(api_key=os.getenv("ANTHROPIC_KEY"), timeout=1200)
14
+
15
+ def __init__(self, args, model):
16
+ super().__init__(args, model)
17
+ if "Thinking" in model.model_style.value:
18
+ self.client_kwargs: dict[str | str] = {
19
+ "model": args.model,
20
+ "max_tokens": 32000,
21
+ "thinking": {"type": "enabled", "budget_tokens": 24000},
22
+ "stream": False,
23
+ }
24
+ else:
25
+ self.client_kwargs: dict[str | str] = {
26
+ "model": args.model,
27
+ "temperature": args.temperature,
28
+ "max_tokens": args.max_tokens,
29
+ "top_p": args.top_p,
30
+ }
31
+
32
+ def _run_single(self, prompt: tuple[str, str]) -> list[str]:
33
+
34
+ def __run_single(counter):
35
+ try:
36
+ response = self.client.messages.create(
37
+ system=prompt[0],
38
+ messages=prompt[1],
39
+ **self.client_kwargs,
40
+ )
41
+ content = "\n".join(
42
+ [
43
+ getattr(x, "text", getattr(x, "thinking", "\nREDACTED\n"))
44
+ for x in response.content
45
+ ]
46
+ )
47
+ return content
48
+ except Exception as e:
49
+ print("Exception: ", repr(e), "Sleeping for 20 seconds...")
50
+ sleep(20 * (11 - counter))
51
+ counter = counter - 1
52
+ if counter == 0:
53
+ print(f"Failed to run model for {prompt}!")
54
+ print("Exception: ", repr(e))
55
+ raise e
56
+ return __run_single(counter)
57
+
58
+ outputs = []
59
+ try:
60
+ for _ in range(self.args.n):
61
+ outputs.append(__run_single(10))
62
+ except Exception as e:
63
+ raise e
64
+
65
+ return outputs
lcb_runner/runner/claude_runner.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ from anthropic import Anthropic
6
+ except ImportError as e:
7
+ pass
8
+
9
+ from lcb_runner.runner.base_runner import BaseRunner
10
+
11
+
12
+ class ClaudeRunner(BaseRunner):
13
+ client = Anthropic(api_key=os.getenv("ANTHROPIC_KEY"))
14
+
15
+ def __init__(self, args, model):
16
+ super().__init__(args, model)
17
+ self.client_kwargs: dict[str | str] = {
18
+ "model": args.model,
19
+ "temperature": args.temperature,
20
+ "max_tokens_to_sample": args.max_tokens,
21
+ "top_p": args.top_p,
22
+ }
23
+
24
+ def _run_single(self, prompt: str) -> list[str]:
25
+
26
+ def __run_single(counter):
27
+ try:
28
+ response = self.client.completions.create(
29
+ prompt=prompt,
30
+ **self.client_kwargs,
31
+ )
32
+ content = response.completion
33
+ return content
34
+ except Exception as e:
35
+ print("Exception: ", repr(e), "Sleeping for 20 seconds...")
36
+ sleep(20 * (11 - counter))
37
+ counter = counter - 1
38
+ if counter == 0:
39
+ print(f"Failed to run model for {prompt}!")
40
+ print("Exception: ", repr(e))
41
+ raise e
42
+ return __run_single(counter)
43
+
44
+ outputs = []
45
+ try:
46
+ for _ in range(self.args.n):
47
+ outputs.append(__run_single(10))
48
+ except Exception as e:
49
+ raise e
50
+
51
+ return outputs
lcb_runner/runner/cohere_runner.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ import cohere
6
+ except ImportError as e:
7
+ pass
8
+
9
+ from lcb_runner.runner.base_runner import BaseRunner
10
+
11
+
12
+ class CohereRunner(BaseRunner):
13
+ client = cohere.ClientV2(os.getenv("COHERE_API_KEY"))
14
+
15
+ def __init__(self, args, model):
16
+ super().__init__(args, model)
17
+ self.client_kwargs: dict[str | str] = {
18
+ "model": args.model,
19
+ "temperature": args.temperature,
20
+ "max_tokens": args.max_tokens,
21
+ "p": args.top_p,
22
+ }
23
+
24
+ def _run_single(self, prompt: tuple[dict[str,str], str]) -> list[str]:
25
+ def __run_single(counter):
26
+ try:
27
+ response = self.client.chat(
28
+ messages=prompt,
29
+ **self.client_kwargs,
30
+ )
31
+ content = response.message.content[0].text
32
+ return content
33
+ except Exception as e:
34
+ print("Exception: ", repr(e), "Sleeping for 20 seconds...")
35
+ sleep(20 * (11 - counter))
36
+ counter = counter - 1
37
+ if counter == 0:
38
+ print(f"Failed to run model for {prompt}!")
39
+ print("Exception: ", repr(e))
40
+ raise e
41
+ return __run_single(counter)
42
+
43
+ outputs = []
44
+ try:
45
+ for _ in range(self.args.n):
46
+ outputs.append(__run_single(10))
47
+ except Exception as e:
48
+ raise e
49
+
50
+ return outputs
lcb_runner/runner/custom_evaluator.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ from lcb_runner.runner.parser import get_args
5
+ from lcb_runner.utils.scenarios import Scenario
6
+ from lcb_runner.utils.path_utils import get_output_path
7
+ from lcb_runner.evaluation import extract_instance_results
8
+ from lcb_runner.runner.scenario_router import (
9
+ build_prompt_benchmark,
10
+ sort_and_extract_save_results,
11
+ get_metrics,
12
+ )
13
+
14
+
15
+ def main():
16
+ args = get_args()
17
+
18
+ benchmark, _ = build_prompt_benchmark(args)
19
+
20
+ with open(args.custom_output_file, "r") as f:
21
+ custom_outputs = json.load(f)
22
+ assert isinstance(custom_outputs, list)
23
+ assert len(custom_outputs) == len(benchmark), f"{len(custom_outputs)} != {len(benchmark)}"
24
+ if isinstance(custom_outputs[0], list):
25
+ ## custom outputs must list[list[str]]
26
+ ## list of extracted outputs per question
27
+ ## sorted by the benchmark question_id, test_id, id depending on the scenario
28
+
29
+ assert all(
30
+ isinstance(custom_output, list) for custom_output in custom_outputs
31
+ )
32
+ elif isinstance(custom_outputs[0], dict):
33
+ ## custom outputs must list[dict[str, Any]]
34
+ ## list of extracted outputs per question
35
+ ## for codegeneration and selfrepair scenario -- `code_list` and `question_id` are required
36
+ ## for testoutputprediction -- `pred_list`, `question_id`, `test_id` are required
37
+ ## for codeexecution -- `pred_list`, `id` are required
38
+ ## code_list/pred_list is a list of extracted answers (code or assertions) for a question
39
+
40
+ assert all(
41
+ isinstance(custom_output, dict) for custom_output in custom_outputs
42
+ )
43
+ if args.scenario in [Scenario.codegeneration, Scenario.selfrepair]:
44
+ custom_outputs = [
45
+ custom_output["code_list"]
46
+ for custom_output in sorted(
47
+ custom_outputs, key=lambda x: str(x["question_id"])
48
+ )
49
+ ]
50
+ elif args.scenario == Scenario.testoutputprediction:
51
+ custom_outputs = [
52
+ custom_output['pred_list']
53
+ for custom_output in sorted(
54
+ custom_outputs, key=lambda x: (str(x["question_id"]), str(x['test_id']))
55
+ )
56
+ ]
57
+ elif args.scenario == Scenario.codeexecution:
58
+ custom_outputs = [
59
+ custom_output['pred_list']
60
+ for custom_output in sorted(
61
+ custom_outputs, key=lambda x: int(x.id.split("_")[1])
62
+ )
63
+ ]
64
+
65
+ save_results = [
66
+ instance.insert_output(custom_output, custom_output)
67
+ for instance, custom_output in zip(benchmark, custom_outputs)
68
+ ]
69
+
70
+ save_results, combined_results = sort_and_extract_save_results(
71
+ args.scenario, save_results
72
+ )
73
+
74
+ metrics = get_metrics(args.scenario, args, benchmark, combined_results)
75
+ graded = extract_instance_results(metrics[1])
76
+
77
+ if args.scenario == Scenario.codegeneration:
78
+ metadatas = metrics[2]
79
+ save_eval_results = [
80
+ instance.insert_output_evaluation(
81
+ outputs_list, extracted_list, graded_list, metadata=meta
82
+ )
83
+ for instance, (outputs_list, extracted_list), graded_list, meta in zip(
84
+ benchmark, combined_results, graded, metadatas
85
+ )
86
+ ]
87
+ else:
88
+ save_eval_results = [
89
+ instance.insert_output_evaluation(
90
+ outputs_list, extracted_list, graded_list
91
+ )
92
+ for instance, (outputs_list, extracted_list), graded_list in zip(
93
+ benchmark, combined_results, graded
94
+ )
95
+ ]
96
+
97
+
98
+ if args.custom_output_save_name is None:
99
+ output_path = args.custom_output_file[:-5] + f"_{args.scenario.value}_output.json"
100
+ else:
101
+ output_path = get_output_path(args.custom_output_save_name, args)
102
+
103
+ with open(output_path, "w") as f:
104
+ json.dump(save_results, f, indent=4)
105
+
106
+
107
+ with open(output_path.replace(".json", "_eval.json"), "w") as f:
108
+ json.dump(metrics, f, indent=4)
109
+
110
+ with open(output_path.replace(".json", "_eval_all.json"), "w") as f:
111
+ json.dump(save_eval_results, f, indent=4)
112
+
113
+ if __name__ == "__main__":
114
+ main()
lcb_runner/runner/deepseek_runner.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ import openai
6
+ from openai import OpenAI
7
+ except ImportError as e:
8
+ pass
9
+
10
+ from lcb_runner.runner.base_runner import BaseRunner
11
+
12
+
13
+ class DeepSeekRunner(BaseRunner):
14
+ client = OpenAI(
15
+ api_key=os.getenv("DEEPSEEK_API"),
16
+ base_url="https://api.deepseek.com",
17
+ )
18
+
19
+ def __init__(self, args, model):
20
+ super().__init__(args, model)
21
+ self.client_kwargs: dict[str | str] = {
22
+ "model": args.model,
23
+ "temperature": args.temperature,
24
+ "max_tokens": args.max_tokens,
25
+ "top_p": args.top_p,
26
+ "frequency_penalty": 0,
27
+ "presence_penalty": 0,
28
+ "n": 1,
29
+ "timeout": args.openai_timeout,
30
+ # "stop": args.stop, --> stop is only used for base models currently
31
+ }
32
+
33
+ def _run_single(self, prompt: list[dict[str, str]]) -> list[str]:
34
+ assert isinstance(prompt, list)
35
+
36
+ def __run_single(counter):
37
+ try:
38
+ response = self.client.chat.completions.create(
39
+ messages=prompt,
40
+ **self.client_kwargs,
41
+ )
42
+ content = response.choices[0].message.content
43
+ return content
44
+ except (
45
+ openai.APIError,
46
+ openai.RateLimitError,
47
+ openai.InternalServerError,
48
+ openai.OpenAIError,
49
+ openai.APIStatusError,
50
+ openai.APITimeoutError,
51
+ openai.InternalServerError,
52
+ openai.APIConnectionError,
53
+ ) as e:
54
+ print("Exception: ", repr(e))
55
+ print("Sleeping for 30 seconds...")
56
+ print("Consider reducing the number of parallel processes.")
57
+ sleep(30)
58
+ return DeepSeekRunner._run_single(prompt)
59
+ except Exception as e:
60
+ print(f"Failed to run the model for {prompt}!")
61
+ print("Exception: ", repr(e))
62
+ raise e
63
+
64
+ outputs = []
65
+ try:
66
+ for _ in range(self.args.n):
67
+ outputs.append(__run_single(10))
68
+ except Exception as e:
69
+ raise e
70
+ return outputs
lcb_runner/runner/fireworks_runner.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ import openai
6
+ from openai import OpenAI
7
+ except ImportError as e:
8
+ pass
9
+
10
+ from lcb_runner.runner.base_runner import BaseRunner
11
+
12
+
13
+ class FireWorksRunner(BaseRunner):
14
+ client = OpenAI(
15
+ api_key=os.getenv("FIREWORKS_API"),
16
+ base_url="https://api.fireworks.ai/inference/v1",
17
+ )
18
+
19
+ def __init__(self, args, model):
20
+ super().__init__(args, model)
21
+ self.client_kwargs: dict[str | str] = {
22
+ "model": args.model,
23
+ "temperature": args.temperature,
24
+ "max_tokens": args.max_tokens,
25
+ "top_p": args.top_p,
26
+ "frequency_penalty": 0,
27
+ "presence_penalty": 0,
28
+ "n": 1,
29
+ "timeout": args.openai_timeout,
30
+ # "stop": args.stop, --> stop is only used for base models currently
31
+ }
32
+
33
+ def _run_single(self, prompt: list[dict[str, str]]) -> list[str]:
34
+ if isinstance(prompt, list):
35
+ pass
36
+ else:
37
+ prompt = [{"role": "user", "content": prompt}]
38
+
39
+ def __run_single(counter):
40
+ try:
41
+ response = self.client.chat.completions.create(
42
+ messages=prompt,
43
+ **self.client_kwargs,
44
+ )
45
+ content = response.choices[0].message.content
46
+ return content
47
+ except (
48
+ openai.APIError,
49
+ openai.RateLimitError,
50
+ openai.InternalServerError,
51
+ openai.OpenAIError,
52
+ openai.APIStatusError,
53
+ openai.APITimeoutError,
54
+ openai.InternalServerError,
55
+ openai.APIConnectionError,
56
+ ) as e:
57
+ print("Exception: ", repr(e))
58
+ print("Sleeping for 30 seconds...")
59
+ print("Consider reducing the number of parallel processes.")
60
+ sleep(30)
61
+ return FireWorksRunner._run_single(prompt)
62
+ except Exception as e:
63
+ print(f"Failed to run the model for {prompt}!")
64
+ print("Exception: ", repr(e))
65
+ raise e
66
+
67
+ outputs = []
68
+ try:
69
+ for _ in range(self.args.n):
70
+ outputs.append(__run_single(10))
71
+ except Exception as e:
72
+ raise e
73
+ return outputs
lcb_runner/runner/gemini_runner.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ from google import genai
6
+ from google.genai.types import GenerateContentConfigDict, ThinkingConfig
7
+ except ImportError as e:
8
+ pass
9
+
10
+ from lcb_runner.runner.base_runner import BaseRunner
11
+ from lcb_runner.lm_styles import LMStyle
12
+
13
+
14
+ class GeminiRunner(BaseRunner):
15
+ client = genai.Client(
16
+ # api_key=os.getenv("GOOGLE_API_KEY"), http_options={"api_version": "v1alpha"}
17
+ vertexai=True,
18
+ project=os.getenv("VERTEX_GEMINI_PROJECT"),
19
+ location=os.getenv("VERTEX_GEMINI_LOCATION"),
20
+ )
21
+ safety_settings = [
22
+ {
23
+ "category": "HARM_CATEGORY_HARASSMENT",
24
+ "threshold": "BLOCK_NONE",
25
+ },
26
+ {
27
+ "category": "HARM_CATEGORY_HATE_SPEECH",
28
+ "threshold": "BLOCK_NONE",
29
+ },
30
+ {
31
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
32
+ "threshold": "BLOCK_NONE",
33
+ },
34
+ {
35
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
36
+ "threshold": "BLOCK_NONE",
37
+ },
38
+ ]
39
+
40
+ def __init__(self, args, model):
41
+ super().__init__(args, model)
42
+ self.args = args
43
+ self.model = model
44
+ if self.model.model_style == LMStyle.GeminiThinking:
45
+ self.generation_config = GenerateContentConfigDict(
46
+ # candidate_count=args.n,
47
+ # temperature=0.7,
48
+ # top_p=0.95,
49
+ # top_k=64,
50
+ # max_output_tokens=65536,
51
+ safety_settings=GeminiRunner.safety_settings,
52
+ thinking_config=ThinkingConfig(include_thoughts=True),
53
+ )
54
+ print("GeminiThinking model")
55
+ else:
56
+ self.generation_config = GenerateContentConfigDict(
57
+ max_output_tokens=args.max_tokens,
58
+ temperature=args.temperature,
59
+ top_p=args.top_p,
60
+ safety_settings=GeminiRunner.safety_settings,
61
+ candidate_count=args.n,
62
+ )
63
+
64
+ def _run_single(self, prompt: str) -> list[str]:
65
+
66
+ try:
67
+ outputs = self.client.models.generate_content(
68
+ model=self.model.model_name,
69
+ contents=prompt,
70
+ config=self.generation_config,
71
+ ).candidates
72
+
73
+ if outputs is None:
74
+ print("No outputs from Gemini")
75
+ return ["" for _ in range(self.args.n)]
76
+ except Exception as e:
77
+ print("Exception: ", repr(e))
78
+ print("Sleeping for 30 seconds...")
79
+ print("Consider reducing the number of parallel processes.")
80
+ sleep(30)
81
+ return self._run_single(prompt)
82
+
83
+ new_outputs = []
84
+ for output in outputs:
85
+ try:
86
+ texts = [part.text for part in output.content.parts]
87
+ texts = [
88
+ "## Part " + str(i) + "\n" + text for i, text in enumerate(texts)
89
+ ]
90
+ text = "\n\n\n".join(texts)
91
+ if text == "":
92
+ print("Empty text for output")
93
+ print(output.__dict__)
94
+ new_outputs.append(text)
95
+ except Exception as e:
96
+ print("Cannot extract text exception: ", repr(e))
97
+ print(output.__dict__)
98
+ new_outputs.append("")
99
+ outputs = new_outputs
100
+
101
+ return outputs
lcb_runner/runner/grok_runner.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ import openai
6
+ from openai import OpenAI
7
+ except ImportError as e:
8
+ pass
9
+
10
+ from lcb_runner.runner.base_runner import BaseRunner
11
+
12
+
13
+ class GrokRunner(BaseRunner):
14
+ client = OpenAI(
15
+ api_key=os.getenv("GROK_API_KEY"),
16
+ base_url="https://api.x.ai/v1",
17
+ )
18
+
19
+ def __init__(self, args, model):
20
+ super().__init__(args, model)
21
+ model_name = args.model.split("_")[0]
22
+ self.client_kwargs: dict[str | str] = {
23
+ "model": model_name,
24
+ "temperature": args.temperature,
25
+ "max_tokens": args.max_tokens,
26
+ "top_p": args.top_p,
27
+ "n": 1,
28
+ # "timeout": args.openai_timeout,
29
+ # "stop": args.stop, --> stop is only used for base models currently
30
+ }
31
+ if "_" in args.model:
32
+ self.client_kwargs["reasoning_effort"] = args.model.split("_")[1]
33
+
34
+ def _run_single(self, prompt: list[dict[str, str]]) -> list[str]:
35
+ assert isinstance(prompt, list)
36
+
37
+ def __run_single(counter):
38
+ try:
39
+ response = self.client.chat.completions.create(
40
+ messages=prompt,
41
+ **self.client_kwargs,
42
+ )
43
+ content = response.choices[0].message.content
44
+ return content
45
+ except (
46
+ openai.APIError,
47
+ openai.RateLimitError,
48
+ openai.InternalServerError,
49
+ openai.OpenAIError,
50
+ openai.APIStatusError,
51
+ openai.APITimeoutError,
52
+ openai.InternalServerError,
53
+ openai.APIConnectionError,
54
+ ) as e:
55
+ print("Exception: ", repr(e))
56
+ print(prompt[0]["content"])
57
+ print("Sleeping for 30 seconds...")
58
+ print("Consider reducing the number of parallel processes.")
59
+ sleep(30)
60
+ return GrokRunner._run_single(prompt)
61
+ except Exception as e:
62
+ print(f"Failed to run the model for {prompt}!")
63
+ print("Exception: ", repr(e))
64
+ raise e
65
+
66
+ outputs = []
67
+ try:
68
+ for _ in range(self.args.n):
69
+ outputs.append(__run_single(10))
70
+ except Exception as e:
71
+ raise e
72
+ return outputs
lcb_runner/runner/main.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ from lcb_runner.runner.parser import get_args
5
+ from lcb_runner.utils.scenarios import Scenario
6
+ from lcb_runner.lm_styles import LanguageModelStore, LanguageModel, LMStyle
7
+ from lcb_runner.runner.runner_utils import build_runner
8
+ from lcb_runner.utils.path_utils import get_output_path
9
+ from lcb_runner.evaluation import extract_instance_results
10
+ from lcb_runner.runner.scenario_router import (
11
+ build_prompt_benchmark,
12
+ combine_results,
13
+ sort_and_extract_save_results,
14
+ get_metrics,
15
+ )
16
+
17
+
18
+ def main():
19
+ args = get_args()
20
+
21
+ if args.model in LanguageModelStore:
22
+ model = LanguageModelStore[args.model]
23
+ else:
24
+ if args.nickname is None:
25
+ raise ValueError(
26
+ f"Model '{args.model}' not found in lm_styles.py. "
27
+ "Provide --nickname to use it as a custom model."
28
+ )
29
+ model = LanguageModel(
30
+ model_name=args.model,
31
+ model_repr=args.nickname,
32
+ model_style=LMStyle[args.model_style],
33
+ release_date=None,
34
+ )
35
+ benchmark, format_prompt = build_prompt_benchmark(args)
36
+ if args.debug:
37
+ print(f"Running with {len(benchmark)} instances in debug mode")
38
+ benchmark = benchmark[:15]
39
+
40
+ output_path = get_output_path(model.model_repr, args)
41
+ eval_file = output_path.replace(".json", "_eval.json")
42
+ eval_all_file = output_path.replace(".json", "_eval_all.json")
43
+
44
+ if args.continue_existing or args.continue_existing_with_eval:
45
+ if os.path.exists(output_path):
46
+ with open(output_path, "r") as f:
47
+ old_save_results = json.load(f)
48
+ elif os.path.exists(eval_all_file):
49
+ with open(eval_all_file, "r") as f:
50
+ old_save_results = json.load(f)
51
+ else:
52
+ print(
53
+ f"File {output_path} does not exist in --continue_existing, starting from scratch"
54
+ )
55
+ old_save_results = []
56
+
57
+ old_save_results = [
58
+ instance
59
+ for instance in old_save_results
60
+ if instance["output_list"] and [x for x in instance["output_list"] if x]
61
+ ]
62
+ old_save_results_question_ids = [
63
+ instance["question_id"] for instance in old_save_results
64
+ ]
65
+ remaining_benchmark = [
66
+ instance
67
+ for instance in benchmark
68
+ if instance.question_id not in old_save_results_question_ids
69
+ ]
70
+ print(
71
+ f"Found {len(old_save_results)} existing generations, continuing with {len(remaining_benchmark)} remaining"
72
+ )
73
+ else:
74
+ old_save_results = []
75
+ remaining_benchmark = benchmark
76
+
77
+ if len(remaining_benchmark) > 0:
78
+ runner = build_runner(args, model)
79
+ results: list[list[str]] = runner.run_main(remaining_benchmark, format_prompt)
80
+ else:
81
+ results = []
82
+
83
+ combined_results = combine_results(
84
+ args.scenario, results, model, args.cot_code_execution
85
+ )
86
+
87
+ save_results = [
88
+ instance.insert_output(outputs_list, extracted_list)
89
+ for instance, (outputs_list, extracted_list) in zip(
90
+ remaining_benchmark, combined_results
91
+ )
92
+ ]
93
+
94
+ if args.continue_existing or args.continue_existing_with_eval:
95
+ save_results += old_save_results
96
+
97
+ save_results, combined_results = sort_and_extract_save_results(
98
+ args.scenario, save_results
99
+ )
100
+
101
+ with open(output_path, "w") as f:
102
+ json.dump(save_results, f, indent=4)
103
+
104
+ # for i in range(len(combined_results)):
105
+ # for j in range(len(combined_results[i][1])):
106
+ # if "def solve()" in combined_results[i][1][j]:
107
+ # from lcb_runner.utils.extraction_utils import extract_code, LMStyle
108
+
109
+ # combined_results[i][1][j] = extract_code(
110
+ # combined_results[i][0][j], LMStyle.Gemini
111
+ # )
112
+ # if "\nsolve()" not in combined_results[i][1][j]:
113
+ # combined_results[i][1][j] += "\n\nsolve()"
114
+
115
+ # # combined_results[i][1][j] += "\n\nsolve()"
116
+ # print(combined_results[i][1][j])
117
+
118
+ if args.evaluate:
119
+ if args.continue_existing_with_eval and os.path.exists(eval_all_file):
120
+ with open(eval_all_file) as fp:
121
+ old_eval_all_results = json.load(fp)
122
+
123
+ if os.path.exists(eval_file):
124
+ with open(eval_file) as fp:
125
+ old_eval_results = json.load(fp)
126
+ else:
127
+ old_eval_results = None
128
+
129
+ old_eval_results_question_ids = [
130
+ instance["question_id"] for instance in old_eval_all_results
131
+ ]
132
+ remaining_indices = [
133
+ idx
134
+ for idx in range(len(benchmark))
135
+ if benchmark[idx].question_id not in old_eval_results_question_ids
136
+ ]
137
+ benchmark = [benchmark[idx] for idx in remaining_indices]
138
+ combined_results = [combined_results[idx] for idx in remaining_indices]
139
+
140
+ old_eval_size = len(old_eval_results_question_ids)
141
+ new_eval_size = len(benchmark)
142
+
143
+ if new_eval_size == 0:
144
+ return
145
+
146
+ print(f"Found {old_eval_size}, running evals for {new_eval_size} problems")
147
+
148
+ metrics = get_metrics(args.scenario, args, benchmark, combined_results)
149
+ graded = extract_instance_results(metrics[1])
150
+
151
+ if old_eval_results:
152
+ for key in metrics[0]:
153
+ if key in old_eval_results[0]:
154
+ if key != "detail":
155
+ metrics[0][key] = (
156
+ old_eval_size * old_eval_results[0][key]
157
+ + new_eval_size * metrics[0][key]
158
+ )
159
+ metrics[0][key] /= old_eval_size + new_eval_size
160
+
161
+ for key in metrics[0]["detail"]:
162
+ if key in old_eval_results[0]["detail"]:
163
+ metrics[0]["detail"][key] = {
164
+ **metrics[0]["detail"][key],
165
+ **old_eval_results[0]["detail"][key],
166
+ }
167
+ metrics[1] = {**metrics[1], **old_eval_results[1]}
168
+ else:
169
+ print("Old eval file not present, cannot update eval file")
170
+ metrics = {}
171
+
172
+ else:
173
+ metrics = get_metrics(args.scenario, args, benchmark, combined_results)
174
+ graded = extract_instance_results(metrics[1])
175
+ old_eval_all_results = []
176
+ old_eval_results = []
177
+
178
+ if args.scenario == Scenario.codegeneration:
179
+ if metrics:
180
+ metadatas = metrics[2]
181
+ else:
182
+ metadatas = [[] for _ in benchmark]
183
+ save_eval_results = [
184
+ instance.insert_output_evaluation(
185
+ outputs_list, extracted_list, graded_list, metadata=meta
186
+ )
187
+ for instance, (outputs_list, extracted_list), graded_list, meta in zip(
188
+ benchmark, combined_results, graded, metadatas
189
+ )
190
+ ]
191
+ if metrics and old_eval_results:
192
+ old_eval_results
193
+ metrics[2] = old_eval_results[2] + metrics[2]
194
+ elif args.scenario == Scenario.selfrepair:
195
+ metadatas = metrics[2]
196
+ with open(
197
+ f"output/{model.model_repr}/{Scenario.codegeneration}_{args.codegen_n}_{args.temperature}_eval_all.json"
198
+ ) as f:
199
+ code_gen_evals = json.load(f)
200
+ original_code_lists = [
201
+ code_gen_eval["code_list"] for code_gen_eval in code_gen_evals
202
+ ]
203
+
204
+ save_eval_results = [
205
+ instance.insert_output_evaluation(
206
+ outputs_list,
207
+ extracted_list,
208
+ graded_list,
209
+ metadata=meta,
210
+ original_code_list=original_code_list,
211
+ )
212
+ for instance, (
213
+ outputs_list,
214
+ extracted_list,
215
+ ), graded_list, meta, original_code_list in zip(
216
+ benchmark, combined_results, graded, metadatas, original_code_lists
217
+ )
218
+ ]
219
+
220
+ else:
221
+ save_eval_results = [
222
+ instance.insert_output_evaluation(
223
+ outputs_list, extracted_list, graded_list
224
+ )
225
+ for instance, (outputs_list, extracted_list), graded_list in zip(
226
+ benchmark, combined_results, graded
227
+ )
228
+ ]
229
+
230
+ save_eval_results = old_eval_all_results + save_eval_results
231
+
232
+ with open(eval_file, "w") as f:
233
+ json.dump(metrics, f, indent=4)
234
+
235
+ with open(eval_all_file, "w") as f:
236
+ json.dump(save_eval_results, f, indent=4)
237
+
238
+
239
+ if __name__ == "__main__":
240
+ main()
lcb_runner/runner/mistral_runner.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ from mistralai.client import MistralClient
6
+ except ImportError as e:
7
+ pass
8
+
9
+ from lcb_runner.runner.base_runner import BaseRunner
10
+
11
+
12
+ class MistralRunner(BaseRunner):
13
+ client = MistralClient(
14
+ api_key=os.environ["MISTRAL_API_KEY"],
15
+ )
16
+
17
+ def __init__(self, args, model):
18
+ super().__init__(args, model)
19
+ self.client_kwargs: dict[str | str] = {
20
+ "model": args.model,
21
+ "temperature": args.temperature,
22
+ "max_tokens": args.max_tokens,
23
+ "top_p": args.top_p,
24
+ }
25
+
26
+ def _run_single(self, prompt: list[dict[str, str]]) -> list[str]:
27
+
28
+ def __run_single(counter):
29
+ try:
30
+ response = self.client.chat(
31
+ messages=prompt,
32
+ **self.client_kwargs,
33
+ )
34
+ content = response.choices[0].message.content
35
+ return content
36
+ except Exception as e:
37
+ print("Exception: ", repr(e), "Sleeping for 20 seconds...")
38
+ sleep(20 * (11 - counter))
39
+ counter = counter - 1
40
+ if counter == 0:
41
+ print(f"Failed to run model for {prompt}!")
42
+ print("Exception: ", repr(e))
43
+ raise e
44
+ return __run_single(counter)
45
+
46
+ outputs = []
47
+ try:
48
+ for _ in range(self.args.n):
49
+ outputs.append(__run_single(10))
50
+ except Exception as e:
51
+ raise e
52
+
53
+ return outputs
lcb_runner/runner/oai_runner.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ import openai
6
+ from openai import OpenAI
7
+ except ImportError as e:
8
+ pass
9
+
10
+ from lcb_runner.lm_styles import LMStyle
11
+ from lcb_runner.runner.base_runner import BaseRunner
12
+
13
+
14
+ class OpenAIRunner(BaseRunner):
15
+ client = OpenAI(
16
+ api_key=os.getenv("OPENAI_KEY"),
17
+ )
18
+
19
+ def __init__(self, args, model):
20
+ super().__init__(args, model)
21
+ if model.model_style == LMStyle.OpenAIReasonPreview:
22
+ self.client_kwargs: dict[str | str] = {
23
+ "model": args.model,
24
+ "max_completion_tokens": 25000,
25
+ }
26
+ elif model.model_style == LMStyle.OpenAIReason:
27
+ assert (
28
+ "__" in args.model
29
+ ), f"Model {args.model} is not a valid OpenAI Reasoning model as we require reasoning effort in model name."
30
+ model, reasoning_effort = args.model.split("__")
31
+ self.client_kwargs: dict[str | str] = {
32
+ "model": model,
33
+ "reasoning_effort": reasoning_effort,
34
+ }
35
+ else:
36
+ self.client_kwargs: dict[str | str] = {
37
+ "model": args.model,
38
+ "temperature": args.temperature,
39
+ "max_tokens": args.max_tokens,
40
+ "top_p": args.top_p,
41
+ "frequency_penalty": 0,
42
+ "presence_penalty": 0,
43
+ "n": args.n,
44
+ "timeout": args.openai_timeout,
45
+ # "stop": args.stop, --> stop is only used for base models currently
46
+ }
47
+
48
+ def _run_single(self, prompt: list[dict[str, str]], n: int = 10) -> list[str]:
49
+ assert isinstance(prompt, list)
50
+
51
+ if n == 0:
52
+ print("Max retries reached. Returning empty response.")
53
+ return []
54
+
55
+ try:
56
+ response = OpenAIRunner.client.chat.completions.create(
57
+ messages=prompt,
58
+ **self.client_kwargs,
59
+ )
60
+ except (
61
+ openai.APIError,
62
+ openai.RateLimitError,
63
+ openai.InternalServerError,
64
+ openai.OpenAIError,
65
+ openai.APIStatusError,
66
+ openai.APITimeoutError,
67
+ openai.InternalServerError,
68
+ openai.APIConnectionError,
69
+ ) as e:
70
+ print("Exception: ", repr(e))
71
+ print("Sleeping for 30 seconds...")
72
+ print("Consider reducing the number of parallel processes.")
73
+ sleep(30)
74
+ return self._run_single(prompt, n=n - 1)
75
+ except Exception as e:
76
+ print(f"Failed to run the model for {prompt}!")
77
+ print("Exception: ", repr(e))
78
+ raise e
79
+ return [c.message.content for c in response.choices]
lcb_runner/runner/parser.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import argparse
4
+
5
+ from lcb_runner.utils.scenarios import Scenario
6
+
7
+
8
+ def get_args():
9
+ parser = argparse.ArgumentParser()
10
+ parser.add_argument(
11
+ "--model",
12
+ type=str,
13
+ default="gpt-3.5-turbo-0301",
14
+ help="Name of the model to use matching `lm_styles.py`, or a path if not in the store",
15
+ )
16
+ parser.add_argument(
17
+ "--nickname",
18
+ type=str,
19
+ default=None,
20
+ help="Short name used as model_repr when --model is not present in lm_styles.py",
21
+ )
22
+ parser.add_argument(
23
+ "--model_style",
24
+ type=str,
25
+ default="CodeQwenInstruct",
26
+ help="LMStyle to use when --model is not present in lm_styles.py (default: CodeQwenInstruct)",
27
+ )
28
+ parser.add_argument(
29
+ "--local_model_path",
30
+ type=str,
31
+ default=None,
32
+ help="If you have a local model, specify it here in conjunction with --model",
33
+ )
34
+ parser.add_argument(
35
+ "--trust_remote_code",
36
+ action="store_true",
37
+ help="trust_remote_code option used in huggingface models",
38
+ )
39
+ parser.add_argument(
40
+ "--scenario",
41
+ type=Scenario,
42
+ default=Scenario.codegeneration,
43
+ help="Type of scenario to run",
44
+ )
45
+ parser.add_argument(
46
+ "--not_fast",
47
+ action="store_true",
48
+ help="whether to use full set of tests (slower and more memory intensive evaluation)",
49
+ )
50
+ parser.add_argument(
51
+ "--release_version",
52
+ type=str,
53
+ default="release_latest",
54
+ help="whether to use full set of tests (slower and more memory intensive evaluation)",
55
+ )
56
+ parser.add_argument(
57
+ "--cot_code_execution",
58
+ action="store_true",
59
+ help="whether to use CoT in code execution scenario",
60
+ )
61
+ parser.add_argument(
62
+ "--n", type=int, default=10, help="Number of samples to generate"
63
+ )
64
+ parser.add_argument(
65
+ "--codegen_n",
66
+ type=int,
67
+ default=10,
68
+ help="Number of samples for which code generation was run (used to map the code generation file during self-repair)",
69
+ )
70
+ parser.add_argument(
71
+ "--temperature", type=float, default=0.2, help="Temperature for sampling"
72
+ )
73
+ parser.add_argument("--top_p", type=float, default=0.95, help="Top p for sampling")
74
+ parser.add_argument(
75
+ "--max_tokens", type=int, default=2000, help="Max tokens for sampling"
76
+ )
77
+ parser.add_argument(
78
+ "--multiprocess",
79
+ default=0,
80
+ type=int,
81
+ help="Number of processes to use for generation (vllm runs do not use this)",
82
+ )
83
+ parser.add_argument(
84
+ "--stop",
85
+ default="###",
86
+ type=str,
87
+ help="Stop token (use `,` to separate multiple tokens)",
88
+ )
89
+ parser.add_argument("--continue_existing", action="store_true")
90
+ parser.add_argument("--continue_existing_with_eval", action="store_true")
91
+ parser.add_argument(
92
+ "--use_cache", action="store_true", help="Use cache for generation"
93
+ )
94
+ parser.add_argument(
95
+ "--cache_batch_size", type=int, default=100, help="Batch size for caching"
96
+ )
97
+ parser.add_argument("--debug", action="store_true", help="Debug mode")
98
+ parser.add_argument("--evaluate", action="store_true", help="Evaluate the results")
99
+ parser.add_argument(
100
+ "--num_process_evaluate",
101
+ type=int,
102
+ default=12,
103
+ help="Number of processes to use for evaluation",
104
+ )
105
+ parser.add_argument("--timeout", type=int, default=6, help="Timeout for evaluation")
106
+ parser.add_argument(
107
+ "--openai_timeout", type=int, default=90, help="Timeout for requests to OpenAI"
108
+ )
109
+ parser.add_argument(
110
+ "--tensor_parallel_size",
111
+ type=int,
112
+ default=-1,
113
+ help="Tensor parallel size for vllm",
114
+ )
115
+ parser.add_argument(
116
+ "--enable_prefix_caching",
117
+ action="store_true",
118
+ help="Enable prefix caching for vllm",
119
+ )
120
+ parser.add_argument(
121
+ "--custom_output_file",
122
+ type=str,
123
+ default=None,
124
+ help="Path to the custom output file used in `custom_evaluator.py`",
125
+ )
126
+ parser.add_argument(
127
+ "--custom_output_save_name",
128
+ type=str,
129
+ default=None,
130
+ help="Folder name to save the custom output results (output file folder modified if None)",
131
+ )
132
+ parser.add_argument("--dtype", type=str, default="bfloat16", help="Dtype for vllm")
133
+ # Added to avoid running extra generations (it's slow for reasoning models)
134
+ parser.add_argument(
135
+ "--start_date",
136
+ type=str,
137
+ default=None,
138
+ help="Start date for the contest to filter the evaluation file (format - YYYY-MM-DD)",
139
+ )
140
+ parser.add_argument(
141
+ "--end_date",
142
+ type=str,
143
+ default=None,
144
+ help="End date for the contest to filter the evaluation file (format - YYYY-MM-DD)",
145
+ )
146
+
147
+ args = parser.parse_args()
148
+
149
+ args.stop = args.stop.split(",")
150
+
151
+ if args.tensor_parallel_size == -1:
152
+ args.tensor_parallel_size = torch.cuda.device_count()
153
+
154
+ if args.multiprocess == -1:
155
+ args.multiprocess = os.cpu_count()
156
+
157
+ return args
158
+
159
+
160
+ def test():
161
+ args = get_args()
162
+ print(args)
163
+
164
+
165
+ if __name__ == "__main__":
166
+ test()
lcb_runner/runner/runner_utils.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lcb_runner.lm_styles import LMStyle, LanguageModel
2
+
3
+
4
+ def build_runner(args, model: LanguageModel):
5
+ if model.model_style == LMStyle.OpenAIChat:
6
+ from lcb_runner.runner.oai_runner import OpenAIRunner
7
+
8
+ return OpenAIRunner(args, model)
9
+ if model.model_style in [LMStyle.OpenAIReason, LMStyle.OpenAIReasonPreview]:
10
+ from lcb_runner.runner.oai_runner import OpenAIRunner
11
+
12
+ return OpenAIRunner(args, model)
13
+ if model.model_style in [LMStyle.Gemini, LMStyle.GeminiThinking]:
14
+ from lcb_runner.runner.gemini_runner import GeminiRunner
15
+
16
+ return GeminiRunner(args, model)
17
+ if model.model_style in [LMStyle.Claude3, LMStyle.Claude3Thinking]:
18
+ from lcb_runner.runner.claude3_runner import Claude3Runner
19
+
20
+ return Claude3Runner(args, model)
21
+ if model.model_style == LMStyle.Claude:
22
+ from lcb_runner.runner.claude_runner import ClaudeRunner
23
+
24
+ return ClaudeRunner(args, model)
25
+ if model.model_style == LMStyle.MistralWeb:
26
+ from lcb_runner.runner.mistral_runner import MistralRunner
27
+
28
+ return MistralRunner(args, model)
29
+ if model.model_style == LMStyle.CohereCommand:
30
+ from lcb_runner.runner.cohere_runner import CohereRunner
31
+
32
+ return CohereRunner(args, model)
33
+ if model.model_style == LMStyle.DeepSeekAPI:
34
+ from lcb_runner.runner.deepseek_runner import DeepSeekRunner
35
+
36
+ return DeepSeekRunner(args, model)
37
+ if model.model_style == LMStyle.DeepSeekAPI:
38
+ from lcb_runner.runner.deepseek_runner import DeepSeekRunner
39
+
40
+ return DeepSeekRunner(args, model)
41
+ if model.model_style == LMStyle.Grok:
42
+ from lcb_runner.runner.grok_runner import GrokRunner
43
+
44
+ return GrokRunner(args, model)
45
+ if model.model_style == LMStyle.TogetherAI:
46
+ from lcb_runner.runner.together_runner import TogetherAIRunner
47
+
48
+ return TogetherAIRunner(args, model)
49
+ if "/fireworks/" in model.model_name:
50
+ from lcb_runner.runner.fireworks_runner import FireWorksRunner
51
+
52
+ return FireWorksRunner(args, model)
53
+ elif model.model_style in []:
54
+ raise NotImplementedError(
55
+ f"Runner for language model style {model.model_style} not implemented yet"
56
+ )
57
+ else:
58
+ from lcb_runner.runner.vllm_runner import VLLMRunner
59
+
60
+ return VLLMRunner(args, model)
lcb_runner/runner/scenario_router.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ from lcb_runner.utils.scenarios import Scenario
4
+ from lcb_runner.lm_styles import LanguageModel
5
+ from lcb_runner.evaluation import (
6
+ codegen_metrics,
7
+ test_output_metrics,
8
+ code_execution_metrics,
9
+ )
10
+
11
+ from lcb_runner.prompts import (
12
+ format_prompt_generation,
13
+ format_prompt_test_output,
14
+ format_prompt_execution,
15
+ format_prompt_execution_cot,
16
+ format_prompt_self_repair,
17
+ )
18
+ from lcb_runner.utils.extraction_utils import (
19
+ extract_code,
20
+ extract_test_output_code,
21
+ extract_execution_code,
22
+ )
23
+
24
+ from lcb_runner.benchmarks import (
25
+ CodeGenerationProblem,
26
+ TestOutputPredictionProblem,
27
+ CodeExecutionProblem,
28
+ load_code_generation_dataset,
29
+ load_code_generation_dataset_not_fast,
30
+ load_test_prediction_dataset,
31
+ load_code_execution_dataset,
32
+ )
33
+
34
+ # BenchMarkType = list[CodeGenerationProblem | TestOutputPredictionProblem]
35
+ BenchMarkType = list[
36
+ Union[CodeGenerationProblem, CodeExecutionProblem, TestOutputPredictionProblem]
37
+ ]
38
+
39
+
40
+ def build_prompt_benchmark(
41
+ args,
42
+ ) -> tuple[
43
+ list[CodeExecutionProblem]
44
+ | list[CodeGenerationProblem]
45
+ | list[TestOutputPredictionProblem],
46
+ callable,
47
+ ]:
48
+ scenario: Scenario = args.scenario
49
+
50
+ if scenario == Scenario.codegeneration:
51
+ not_fast: bool = args.not_fast
52
+ if not_fast:
53
+ benchmark = load_code_generation_dataset_not_fast(args.release_version)
54
+ else:
55
+ benchmark = load_code_generation_dataset(
56
+ args.release_version,
57
+ start_date=args.start_date,
58
+ end_date=args.end_date
59
+ )
60
+ benchmark = sorted(benchmark, key=lambda x: x.question_id)
61
+ format_prompt = format_prompt_generation
62
+ elif scenario == Scenario.testoutputprediction:
63
+ benchmark = load_test_prediction_dataset(args.release_version)
64
+ benchmark = sorted(benchmark, key=lambda x: (x.question_id, x.test_id))
65
+ format_prompt = format_prompt_test_output
66
+ elif scenario == Scenario.selfrepair:
67
+ benchmark = load_code_generation_dataset(args.release_version)
68
+ benchmark = sorted(benchmark, key=lambda x: x.question_id)
69
+ format_prompt = format_prompt_self_repair
70
+ elif scenario == Scenario.codeexecution:
71
+ cot_code_execution: bool = args.cot_code_execution
72
+ benchmark = load_code_execution_dataset(args.release_version)
73
+ benchmark = sorted(benchmark, key=lambda x: int(x.id.split("_")[1]))
74
+ if cot_code_execution:
75
+ format_prompt = format_prompt_execution_cot
76
+ else:
77
+ format_prompt = format_prompt_execution
78
+ else:
79
+ raise ValueError(f"Scenario {scenario} not implemented")
80
+ return benchmark, format_prompt
81
+
82
+
83
+ def combine_results(
84
+ scenario: Scenario,
85
+ results: list[list[str]],
86
+ model: LanguageModel,
87
+ cot_code_execution: bool = False,
88
+ ):
89
+ if scenario == Scenario.codegeneration:
90
+ combined_results = [
91
+ (
92
+ outputs_list,
93
+ [extract_code(output, model.model_style) for output in outputs_list],
94
+ )
95
+ for outputs_list in results
96
+ ]
97
+ elif scenario == Scenario.testoutputprediction:
98
+ combined_results = [
99
+ (
100
+ outputs_list,
101
+ [
102
+ extract_test_output_code(output, model.model_style)
103
+ for output in outputs_list
104
+ ],
105
+ )
106
+ for outputs_list in results
107
+ ]
108
+ elif scenario == Scenario.selfrepair:
109
+ combined_results = [
110
+ (
111
+ [
112
+ output[0] if type(output) is list else output
113
+ for output in outputs_list
114
+ ],
115
+ [
116
+ (
117
+ extract_code(output[0], model.model_style)
118
+ if type(output) is list
119
+ else extract_code(output, model.model_style)
120
+ )
121
+ for output in outputs_list
122
+ ],
123
+ )
124
+ for outputs_list in results
125
+ ]
126
+ elif scenario == Scenario.codeexecution:
127
+ combined_results = [
128
+ (
129
+ outputs_list,
130
+ [
131
+ extract_execution_code(
132
+ output, model.model_style, cot=cot_code_execution
133
+ )
134
+ for output in outputs_list
135
+ ],
136
+ )
137
+ for outputs_list in results
138
+ ]
139
+ else:
140
+ raise ValueError(f"Scenario {scenario} not implemented")
141
+
142
+ return combined_results
143
+
144
+
145
+ def sort_and_extract_save_results(scenario: Scenario, save_results: list[dict]):
146
+ if scenario == Scenario.codegeneration:
147
+ save_results = sorted(save_results, key=lambda x: x["question_id"])
148
+ combined_results = [
149
+ (save_result_instance["output_list"], save_result_instance["code_list"])
150
+ for save_result_instance in save_results
151
+ ]
152
+
153
+ elif scenario == Scenario.testoutputprediction:
154
+ save_results = sorted(
155
+ save_results, key=lambda x: (x["question_id"], x["test_id"])
156
+ )
157
+ combined_results = [
158
+ (save_result_instance["output_list"], save_result_instance["pred_list"])
159
+ for save_result_instance in save_results
160
+ ]
161
+ elif scenario == Scenario.selfrepair:
162
+ save_results = sorted(save_results, key=lambda x: x["question_id"])
163
+ combined_results = [
164
+ (save_result_instance["output_list"], save_result_instance["code_list"])
165
+ for save_result_instance in save_results
166
+ ]
167
+ elif scenario == Scenario.codeexecution:
168
+ save_results = sorted(save_results, key=lambda x: int(x["id"].split("_")[1]))
169
+ combined_results = [
170
+ (save_result_instance["output_list"], save_result_instance["pred_list"])
171
+ for save_result_instance in save_results
172
+ ]
173
+
174
+ else:
175
+ raise ValueError(f"Scenario {scenario} not implemented")
176
+
177
+ return save_results, combined_results
178
+
179
+
180
+ def get_metrics(
181
+ scenario: Scenario,
182
+ args,
183
+ benchmark: list[
184
+ CodeGenerationProblem | CodeExecutionProblem | TestOutputPredictionProblem
185
+ ],
186
+ combined_results,
187
+ ):
188
+ eval_samples = [instance.get_evaluation_sample() for instance in benchmark]
189
+ generations = [extracted for _, extracted in combined_results]
190
+
191
+ if scenario == Scenario.codegeneration or scenario == Scenario.selfrepair:
192
+ metrics = codegen_metrics(
193
+ eval_samples,
194
+ generations,
195
+ num_process_evaluate=args.num_process_evaluate,
196
+ timeout=args.timeout,
197
+ )
198
+
199
+ elif args.scenario == Scenario.testoutputprediction:
200
+ metrics = test_output_metrics(
201
+ eval_samples,
202
+ generations,
203
+ k_list=[1, 5],
204
+ )
205
+
206
+ elif args.scenario == Scenario.codeexecution:
207
+ metrics = code_execution_metrics(
208
+ eval_samples,
209
+ generations,
210
+ )
211
+
212
+ else:
213
+ raise ValueError(f"Scenario {scenario} not implemented")
214
+
215
+ print(metrics[0]["pass@1"])
216
+
217
+ return metrics
lcb_runner/runner/together_runner.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from time import sleep
3
+
4
+ try:
5
+ from together import Together
6
+ except ImportError as e:
7
+ pass
8
+
9
+ from lcb_runner.runner.base_runner import BaseRunner
10
+
11
+ class TogetherAIRunner(BaseRunner):
12
+ client = Together(api_key=os.getenv("TOGETHER_API_KEY"))
13
+
14
+ def __init__(self, args, model):
15
+ super().__init__(args, model)
16
+ self.client_kwargs: dict[str | str] = {
17
+ "model": args.model,
18
+ "temperature": args.temperature,
19
+ "max_tokens": args.max_tokens,
20
+ "top_p": args.top_p,
21
+ "frequency_penalty": 0,
22
+ "presence_penalty": 0,
23
+ "n": 1,
24
+ }
25
+
26
+ def _run_single(self, prompt: tuple[dict[str,str], str]) -> list[str]:
27
+
28
+ def __run_single(counter):
29
+ try:
30
+ response = self.client.chat.completions.create(
31
+ messages=prompt,
32
+ **self.client_kwargs,
33
+ )
34
+ content = response.choices[0].message.content
35
+ return content
36
+ except Exception as e:
37
+ print("Exception: ", repr(e), "Sleeping for 20 seconds...")
38
+ sleep(20 * (11 - counter))
39
+ counter = counter - 1
40
+ if counter == 0:
41
+ print(f"Failed to run model for {prompt}!")
42
+ print("Exception: ", repr(e))
43
+ raise e
44
+ return __run_single(counter)
45
+
46
+ outputs = []
47
+ try:
48
+ for _ in range(self.args.n):
49
+ outputs.append(__run_single(10))
50
+ except Exception as e:
51
+ raise e
52
+
53
+ return outputs
lcb_runner/runner/vllm_runner.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from transformers import AutoTokenizer
3
+ from vllm import LLM, SamplingParams
4
+ except ImportError as e:
5
+ # print("Cannot import vllm")
6
+ pass
7
+
8
+ from lcb_runner.runner.base_runner import BaseRunner
9
+
10
+
11
+ class VLLMRunner(BaseRunner):
12
+ def __init__(self, args, model):
13
+ super().__init__(args, model)
14
+ model_tokenizer_path = (
15
+ model.model_name if args.local_model_path is None else args.local_model_path
16
+ )
17
+ self.llm = LLM(
18
+ model=model_tokenizer_path,
19
+ tokenizer=model_tokenizer_path,
20
+ tensor_parallel_size=args.tensor_parallel_size,
21
+ dtype=args.dtype,
22
+ enforce_eager=True,
23
+ disable_custom_all_reduce=True,
24
+ enable_prefix_caching=args.enable_prefix_caching,
25
+ trust_remote_code=args.trust_remote_code,
26
+ )
27
+ self.sampling_params = SamplingParams(
28
+ n=self.args.n,
29
+ max_tokens=self.args.max_tokens,
30
+ temperature=self.args.temperature,
31
+ top_p=self.args.top_p,
32
+ frequency_penalty=0,
33
+ presence_penalty=0,
34
+ stop=self.args.stop,
35
+ )
36
+
37
+ def _run_single(self, prompt: str) -> list[str]:
38
+ pass
39
+
40
+ def run_batch(self, prompts: list[str]) -> list[list[str]]:
41
+ outputs = [None for _ in prompts]
42
+ remaining_prompts = []
43
+ remaining_indices = []
44
+ for prompt_index, prompt in enumerate(prompts):
45
+ if self.args.use_cache and prompt in self.cache:
46
+ if len(self.cache[prompt]) == self.args.n:
47
+ outputs[prompt_index] = self.cache[prompt]
48
+ continue
49
+ remaining_prompts.append(prompt)
50
+ remaining_indices.append(prompt_index)
51
+ if remaining_prompts:
52
+ vllm_outputs = self.llm.generate(remaining_prompts, self.sampling_params)
53
+ if self.args.use_cache:
54
+ assert len(remaining_prompts) == len(vllm_outputs)
55
+ for index, remaining_prompt, vllm_output in zip(
56
+ remaining_indices, remaining_prompts, vllm_outputs
57
+ ):
58
+ self.cache[remaining_prompt] = [o.text for o in vllm_output.outputs]
59
+ outputs[index] = [o.text for o in vllm_output.outputs]
60
+ else:
61
+ for index, vllm_output in zip(remaining_indices, vllm_outputs):
62
+ outputs[index] = [o.text for o in vllm_output.outputs]
63
+ return outputs