|
|
import os |
|
|
import sys |
|
|
import gradio as gr |
|
|
from langchain_openai import ChatOpenAI |
|
|
from langgraph.prebuilt import create_react_agent |
|
|
from mcp import ClientSession, StdioServerParameters |
|
|
from mcp.client.stdio import stdio_client |
|
|
from langchain_mcp_adapters.tools import load_mcp_tools |
|
|
from langchain_core.messages import HumanMessage |
|
|
|
|
|
|
|
|
NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY") |
|
|
NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/" |
|
|
MODEL_NAME = "meta-llama/Meta-Llama-3.1-70B-Instruct" |
|
|
|
|
|
|
|
|
SYSTEM_PROMPT = """You are a 'Vibe Coding' Python Tutor. |
|
|
Your goal is not just to talk, but to DO. |
|
|
1. When a user asks to learn a concept, create a python file illustrating it. |
|
|
2. RUN the file to show them the output. |
|
|
3. If there is an error, debug it by reading the file and fixing it. |
|
|
4. Always explain your reasoning briefly before executing tools. |
|
|
|
|
|
You have access to a local filesystem. Use 'write_file' to create examples and 'run_python_script' to execute them. |
|
|
""" |
|
|
|
|
|
async def run_tutor(user_message, chat_history): |
|
|
""" |
|
|
Main function to run the agent loop. |
|
|
It connects to the local MCP server for every request to ensure fresh context. |
|
|
""" |
|
|
|
|
|
|
|
|
server_params = StdioServerParameters( |
|
|
command=sys.executable, |
|
|
args=["server.py"], |
|
|
env=os.environ.copy() |
|
|
) |
|
|
|
|
|
|
|
|
async with stdio_client(server_params) as (read, write): |
|
|
async with ClientSession(read, write) as session: |
|
|
await session.initialize() |
|
|
|
|
|
|
|
|
tools = await load_mcp_tools(session) |
|
|
|
|
|
|
|
|
llm = ChatOpenAI( |
|
|
api_key=NEBIUS_API_KEY, |
|
|
base_url=NEBIUS_BASE_URL, |
|
|
model=MODEL_NAME, |
|
|
temperature=0.7 |
|
|
) |
|
|
|
|
|
|
|
|
agent_executor = create_react_agent(llm, tools, state_modifier=SYSTEM_PROMPT) |
|
|
|
|
|
|
|
|
inputs = {"messages": [HumanMessage(content=user_message)]} |
|
|
response = await agent_executor.ainvoke(inputs) |
|
|
|
|
|
|
|
|
return response["messages"][-1].content |
|
|
|
|
|
|
|
|
with gr.Blocks(title="AI Python Tutor (MCP Powered)") as demo: |
|
|
gr.Markdown("# 🐍 Vibe Coding Tutor") |
|
|
gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)") |
|
|
|
|
|
|
|
|
chatbot = gr.Chatbot(height=600) |
|
|
msg = gr.Textbox(placeholder="E.g., Teach me how to use Python decorators with a working example.") |
|
|
|
|
|
async def user_turn(user_message, history): |
|
|
|
|
|
|
|
|
return "", history + [[user_message, None]] |
|
|
|
|
|
async def bot_turn(history): |
|
|
|
|
|
last_message = history[-1][0] |
|
|
|
|
|
|
|
|
response_text = await run_tutor(last_message, []) |
|
|
|
|
|
|
|
|
history[-1][1] = response_text |
|
|
return history |
|
|
|
|
|
msg.submit(user_turn, [msg, chatbot], [msg, chatbot]).then( |
|
|
bot_turn, [chatbot], [chatbot] |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.queue().launch() |