AI-Python-Tutor / app.py
ABO4SAMRA's picture
Update app.py
62bdcc8 verified
raw
history blame
3.53 kB
import os
import sys
import gradio as gr
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from langchain_mcp_adapters.tools import load_mcp_tools
from langchain_core.messages import HumanMessage
# --- Configuration ---
NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY")
NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/"
MODEL_NAME = "meta-llama/Meta-Llama-3.1-70B-Instruct"
# --- Agent System Prompt ---
SYSTEM_PROMPT = """You are a 'Vibe Coding' Python Tutor.
Your goal is not just to talk, but to DO.
1. When a user asks to learn a concept, create a python file illustrating it.
2. RUN the file to show them the output.
3. If there is an error, debug it by reading the file and fixing it.
4. Always explain your reasoning briefly before executing tools.
You have access to a local filesystem. Use 'write_file' to create examples and 'run_python_script' to execute them.
"""
async def run_tutor(user_message, chat_history):
"""
Main function to run the agent loop.
It connects to the local MCP server for every request to ensure fresh context.
"""
# 1. Define Server Parameters
server_params = StdioServerParameters(
command=sys.executable,
args=["server.py"],
env=os.environ.copy()
)
# 2. Connect to MCP Server & Load Tools
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
# Convert MCP tools to LangChain tools
tools = await load_mcp_tools(session)
# 3. Initialize Nebius LLM
llm = ChatOpenAI(
api_key=NEBIUS_API_KEY,
base_url=NEBIUS_BASE_URL,
model=MODEL_NAME,
temperature=0.7
)
# 4. Create Agent
agent_executor = create_react_agent(llm, tools, state_modifier=SYSTEM_PROMPT)
# 5. Execute
inputs = {"messages": [HumanMessage(content=user_message)]}
response = await agent_executor.ainvoke(inputs)
# 6. Extract the final response text
return response["messages"][-1].content
# --- Gradio UI (Universal Fix) ---
with gr.Blocks(title="AI Python Tutor (MCP Powered)") as demo:
gr.Markdown("# 🐍 Vibe Coding Tutor")
gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)")
# REMOVED: type="messages" (This fixes the TypeError)
chatbot = gr.Chatbot(height=600)
msg = gr.Textbox(placeholder="E.g., Teach me how to use Python decorators with a working example.")
async def user_turn(user_message, history):
# Universal format: Append a list of [user_msg, None]
# This works on Gradio 3, 4, and 5
return "", history + [[user_message, None]]
async def bot_turn(history):
# Get the last user message (it's the first element of the last tuple)
last_message = history[-1][0]
# Run the agent
response_text = await run_tutor(last_message, [])
# Update the last tuple with the bot response
history[-1][1] = response_text
return history
msg.submit(user_turn, [msg, chatbot], [msg, chatbot]).then(
bot_turn, [chatbot], [chatbot]
)
# --- Launch ---
if __name__ == "__main__":
demo.queue().launch()