from .base_agent import BaseAgent
from prompt.constants import modeling_methods
from prompt.template import (TASK_ANALYSIS_PROMPT, TASK_RESULT_PROMPT, TASK_ANSWER_PROMPT,
TASK_FORMULAS_PROMPT, TASK_FORMULAS_CRITIQUE_PROMPT, TASK_FORMULAS_IMPROVEMENT_PROMPT,
TASK_MODELING_PROMPT, TASK_MODELING_CRITIQUE_PROMPT, TASK_MODELING_IMPROVEMENT_PROMPT)
class Task(BaseAgent):
def __init__(self, llm):
super().__init__(llm)
def analysis(self, task_description: str, user_prompt: str = ''):
prompt = TASK_ANALYSIS_PROMPT.format(task_description=task_description, user_prompt=user_prompt).strip()
return self.llm.generate(prompt)
def formulas_actor(self, data_summary: str, task_description: str, task_analysis: str, user_prompt: str = ''):
prompt = TASK_FORMULAS_PROMPT.format(data_summary=data_summary, task_description=task_description, task_analysis=task_analysis, modeling_methods=modeling_methods, user_prompt=user_prompt).strip()
return self.llm.generate(prompt)
def formulas_critic(self, data_summary: str, task_description: str, task_analysis: str, modeling_formulas: str):
prompt = TASK_FORMULAS_CRITIQUE_PROMPT.format(data_summary=data_summary, task_description=task_description, task_analysis=task_analysis, modeling_formulas=modeling_formulas).strip()
return self.llm.generate(prompt)
def formulas_improvement(self, data_summary: str, task_description: str, task_analysis: str, modeling_formulas: str, modeling_formulas_critique: str, user_prompt: str = ''):
prompt = TASK_FORMULAS_IMPROVEMENT_PROMPT.format(data_summary=data_summary, task_description=task_description, task_analysis=task_analysis, modeling_formulas=modeling_formulas, modeling_formulas_critique=modeling_formulas_critique, user_prompt=user_prompt).strip()
return self.llm.generate(prompt)
def formulas(self, data_summary: str, task_description: str, task_analysis: str, round: int = 1, user_prompt: str = ''):
formulas = self.formulas_actor(data_summary, task_description, task_analysis, user_prompt)
for i in range(round):
print(f'FORMULAS Round {i+1}')
formulas_critique = self.formulas_critic(data_summary, task_description, task_analysis, formulas)
formulas = self.formulas_improvement(data_summary, task_description, task_analysis, formulas, formulas_critique, user_prompt)
return formulas
def modeling_actor(self, data_summary: str, task_description: str, task_analysis: str, formulas: str, user_prompt: str = ''):
prompt = TASK_MODELING_PROMPT.format(data_summary=data_summary, task_description=task_description, task_analysis=task_analysis, modeling_formulas=formulas, user_prompt=user_prompt).strip()
return self.llm.generate(prompt)
# def modeling_critic(self, task_description: str, task_analysis: str, data_summary: str, formulas: str, modeling_process: str):
# prompt = TASK_MODELING_CRITIQUE_PROMPT.format(task_description=task_description, task_analysis=task_analysis, data_summary=data_summary, modeling_formulas=formulas, modeling_process=modeling_process).strip()
# return self.llm.generate(prompt)
# def modeling_improvement(self, task_description: str, task_analysis: str, data_summary: str, formulas: str, modeling_process: str, modeling_process_critique: str):
# prompt = TASK_MODELING_IMPROVEMENT_PROMPT.format(task_description=task_description, task_analysis=task_analysis, data_summary=data_summary, modeling_formulas=formulas, modeling_process=modeling_process, modeling_process_critique=modeling_process_critique).strip()
# return self.llm.generate(prompt)
# def modeling(self, task_description: str, task_analysis: str, data_summary: str, formulas: str, round: int = 1):
# process = self.modeling_actor(task_description, task_analysis, data_summary, formulas)
# for i in range(round):
# print(f'MODELING Round {i+1}')
# process_critique = self.modeling_critic(task_description, task_analysis, data_summary, formulas, process)
# process = self.modeling_improvement(task_description, task_analysis, data_summary, formulas, process, process_critique)
# return process
def modeling(self, data_summary: str, task_description: str, task_analysis: str, formulas: str, round: int = 1, user_prompt: str = ''):
return self.modeling_actor(data_summary, task_description, task_analysis, formulas, user_prompt)
def result(self, task_description: str, task_analysis: str, task_formulas: str, task_modeling: str, user_prompt: str = ''):
prompt = TASK_RESULT_PROMPT.format(task_description=task_description, task_analysis=task_analysis, task_formulas=task_formulas, task_modeling=task_modeling, user_prompt=user_prompt).strip()
return self.llm.generate(prompt)
def answer(self, task_description: str, task_analysis: str, task_formulas: str, task_modeling: str, task_result: str, user_prompt: str = ''):
prompt = TASK_ANSWER_PROMPT.format(task_description=task_description, task_analysis=task_analysis, task_formulas=task_formulas, task_modeling=task_modeling, task_result=task_result, user_prompt=user_prompt).strip()
return self.llm.generate(prompt)