File size: 2,062 Bytes
8496edd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from llm.llm import LLM
from input.problem import problem_input
from input.test_middle_result import problem_str, selected_models, modeling_solution, task_descriptions
from agent.model_selection import ModelSelection
from agent.modeling import Modeling
from agent.task_decompse import TaskDecompose
from agent.task import Task
from utils.utils import write_json_file, write_markdown_file, json_to_markdown


if __name__ == "__main__":
    llm = LLM('deepseek-chat')
    paper = {'tasks': []}
    
    problem_path = 'data/actor_data/input/problem/2024_C.json'
    problem_str, problem = problem_input(problem_path, llm)
    # print(problem_str)
    # print('---')
    paper['problem_background'] = problem['background']
    paper['problem_requirement'] = problem['problem_requirement']

    ms = ModelSelection(llm)
    selected_models = ms.select_models(problem_str)
    print(selected_models)
    print('---')

    mm = Modeling(llm)
    modeling_solution = mm.modeling(problem_str, selected_models)
    print(modeling_solution)
    print('---')

    td = TaskDecompose(llm)
    task_descriptions = td.decompose(problem_str, modeling_solution)
    print(task_descriptions)
    print('---')

    task = Task(llm)
    for task_description in task_descriptions[:]:
        task_analysis = task.analysis(task_description)
        task_modeling = task.modeling(task_description, task_analysis, problem['data_summary'])
        task_result = task.result(task_description, task_analysis, task_modeling)
        task_answer = task.answer(task_description, task_analysis, task_modeling, task_result)
        paper['tasks'].append({
            'task_description': task_description,
            'task_analysis': task_analysis,
            'mathematical_modeling_process': task_modeling,
            'result': task_result,
            'answer': task_answer
        })
    print(paper)

    print(llm.get_total_usage())

    write_json_file('data/actor_data/output/paper4.json', paper)
    write_markdown_file('data/actor_data/output/paper4.md', json_to_markdown(paper))