File size: 2,449 Bytes
8496edd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from llm.llm import LLM
from input.problem import problem_input
from input.test_middle_result import problem_str, problem_analysis, selected_models, modeling_solution, modeling_solution, task_descriptions
from agent.problem_analysis import ProblemAnalysis
from agent.problem_modeling import ProblemModeling
from agent.task_decompse import TaskDecompose
from agent.task import Task
from utils.utils import read_json_file, write_json_file, write_markdown_file, json_to_markdown


if __name__ == "__main__":
    # llm = LLM('deepseek-chat')
    llm = LLM('deepseek-reasoner')
    # llm = LLM('gpt-4o')
    paper = {'tasks': []}
    
    problem_path = 'data/actor_data/input/problem/2024_C.json'
    problem_str, problem = problem_input(problem_path, llm)
    problem_type = problem_path.split('/')[-1].split('_')[-1].split('.')[0] # 'C'
    tasknum = 4

    print(problem_str)
    print('---')
    paper['problem_background'] = problem['background']
    paper['problem_requirement'] = problem['problem_requirement']

    # pa = ProblemAnalysis(llm)
    # problem_analysis = pa.analysis(problem_str, round=1)
    # print(problem_analysis)
    # print('---')

    # pm = ProblemModeling(llm)
    # modeling_solution = pm.modeling(problem_str, problem_analysis, round=1)
    # print(modeling_solution)
    # print('---')

    # td = TaskDecompose(llm)
    # task_descriptions = td.decompose_and_refine(problem_str, problem_analysis, modeling_solution, problem_type, tasknum)
    # print(task_descriptions)
    # print('---')

    task = Task(llm)
    for task_description in task_descriptions[:1]:
        task_analysis = task.analysis(task_description)
        task_formulas = task.formulas(problem['data_description'], task_description, task_analysis)
        task_modeling = task.modeling(problem['data_description'], task_description, task_analysis, task_formulas)
        task_result = task.result(task_description, task_analysis, task_formulas, task_modeling)
        task_answer = task.answer(task_description, task_analysis, task_formulas, task_modeling, task_result)
        paper['tasks'].append({
            'task_description': task_description,
            'task_analysis': task_analysis,
            'mathematical_formulas': task_formulas,
            'mathematical_modeling_process': task_modeling,
            'result': task_result,
            'answer': task_answer
        })
    print(paper['tasks'])

    print(llm.get_total_usage())