Spaces:
Running
Running
| from huggingface_hub import InferenceClient | |
| from transformers import ReactCodeAgent | |
| from transformers import tool as function_to_tool | |
| from toolformers.base import Conversation, Toolformer | |
| class HuggingFaceConversation(Conversation): | |
| def __init__(self, agent : ReactCodeAgent, prompt, category=None): | |
| self.agent = agent | |
| self.messages = [('system', prompt)] | |
| self.category = category | |
| def chat(self, message, role='user', print_output=True) -> str: | |
| self.messages.append((role, message)) | |
| final_prompt = 'For context, here are the previous messages in the conversation:\n\n' | |
| for role, message in self.messages: | |
| final_prompt += f'{role.capitalize()}: {message}\n' | |
| final_prompt += "Don't worry, you don't need to use the same format to reply. Stick with the Task:/Action:/etc. format.\n\n" | |
| response = self.agent.run(final_prompt) | |
| print(response) | |
| return response | |
| class HuggingFaceToolformer(Toolformer): | |
| def __init__(self, model_name, max_tokens=2000): | |
| self.model = InferenceClient(model=model_name) | |
| self.max_tokens = max_tokens | |
| def llm_engine(self, messages, stop_sequences=["Task"]) -> str: | |
| response = self.model.chat_completion(messages, stop=stop_sequences, max_tokens=self.max_tokens) | |
| answer = response.choices[0].message.content | |
| return answer | |
| def new_conversation(self, prompt, tools, category=None): | |
| agent = ReactCodeAgent(tools=[function_to_tool(tool.as_annotated_function()) for tool in tools], llm_engine=self.llm_engine) | |
| return HuggingFaceConversation(agent, prompt, category=category) | |