Spaces:
Sleeping
Sleeping
| from llm.llm import LLM | |
| from input.problem import problem_input | |
| # from input.test_middle_result import problem_str, problem_analysis, selected_models, modeling_solution, modeling_solution, task_descriptions | |
| from agent.problem_analysis import ProblemAnalysis | |
| from agent.problem_modeling import ProblemModeling | |
| from agent.task_decompse import TaskDecompose | |
| from agent.task import Task | |
| from agent.create_charts import Chart | |
| from utils.utils import read_json_file, write_json_file, write_text_file, json_to_markdown | |
| # from utils.convert_format import markdown_to_latex | |
| import os | |
| from prompt.template import PROBLEM_EXTRACT_PROMPT | |
| config = { | |
| 'problem_analysis_round': 1, | |
| 'problem_modeling_round': 1, | |
| 'task_formulas_round': 1, | |
| 'tasknum': 4, | |
| 'chart_num': 3, | |
| 'model_name': 'chatgpt-4o-latest' | |
| } | |
| def run_batch(problem_path, config): | |
| # Initialize LLM | |
| llm = LLM(config['model_name']) | |
| # Get problem input | |
| problem_str, problem = problem_input(problem_path, llm) | |
| problem_name = os.path.splitext(os.path.basename(problem_path))[0] | |
| problem_type = os.path.splitext(os.path.basename(problem_path))[0].split('_')[-1] | |
| return {problem_name: problem} | |
| if __name__ == "__main__": | |
| import glob | |
| # files = glob.glob('/Users/ann/Downloads/methmatical_paper_extraction/parse/2025_*/content/*.md') | |
| files = glob.glob('../data/actor_data/input/problem/2025_*') | |
| problems = read_json_file('../data/actor_data/output/problem_24.json') | |
| for file in files: | |
| problems.update(run_batch(file, config)) | |
| write_json_file('../data/actor_data/output/problem_25.json', problems) | |
| print(problems.keys()) | |