ABO4SAMRA commited on
Commit
62bdcc8
·
verified ·
1 Parent(s): dc90612

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -18
app.py CHANGED
@@ -30,8 +30,7 @@ async def run_tutor(user_message, chat_history):
30
  It connects to the local MCP server for every request to ensure fresh context.
31
  """
32
 
33
- # 1. Define Server Parameters (Point to our local server.py)
34
- # We use the current python executable to run the server script
35
  server_params = StdioServerParameters(
36
  command=sys.executable,
37
  args=["server.py"],
@@ -54,46 +53,41 @@ async def run_tutor(user_message, chat_history):
54
  temperature=0.7
55
  )
56
 
57
- # 4. Create Agent (Using LangGraph prebuilt agent)
58
  agent_executor = create_react_agent(llm, tools, state_modifier=SYSTEM_PROMPT)
59
 
60
  # 5. Execute
61
- # LangGraph expects a dictionary with a "messages" key
62
  inputs = {"messages": [HumanMessage(content=user_message)]}
63
-
64
- # We use ainvoke to run the agent asynchronously
65
  response = await agent_executor.ainvoke(inputs)
66
 
67
  # 6. Extract the final response text
68
- # The last message in the list is the AI's final answer
69
  return response["messages"][-1].content
70
 
71
- # --- Gradio UI ---
72
- # REMOVED: theme=gr.themes.Soft() to prevent the TypeError
73
  with gr.Blocks(title="AI Python Tutor (MCP Powered)") as demo:
74
  gr.Markdown("# 🐍 Vibe Coding Tutor")
75
  gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)")
76
 
77
- # Type="messages" is critical for Gradio 5 chat interface style
78
- chatbot = gr.Chatbot(height=600, type="messages")
79
  msg = gr.Textbox(placeholder="E.g., Teach me how to use Python decorators with a working example.")
80
 
81
  async def user_turn(user_message, history):
82
- history.append({"role": "user", "content": user_message})
83
- return "", history
 
84
 
85
  async def bot_turn(history):
86
- # Get the last message content
87
- last_message = history[-1]["content"]
88
 
89
  # Run the agent
90
  response_text = await run_tutor(last_message, [])
91
 
92
- # Append response
93
- history.append({"role": "assistant", "content": response_text})
94
  return history
95
 
96
- # Event wiring
97
  msg.submit(user_turn, [msg, chatbot], [msg, chatbot]).then(
98
  bot_turn, [chatbot], [chatbot]
99
  )
 
30
  It connects to the local MCP server for every request to ensure fresh context.
31
  """
32
 
33
+ # 1. Define Server Parameters
 
34
  server_params = StdioServerParameters(
35
  command=sys.executable,
36
  args=["server.py"],
 
53
  temperature=0.7
54
  )
55
 
56
+ # 4. Create Agent
57
  agent_executor = create_react_agent(llm, tools, state_modifier=SYSTEM_PROMPT)
58
 
59
  # 5. Execute
 
60
  inputs = {"messages": [HumanMessage(content=user_message)]}
 
 
61
  response = await agent_executor.ainvoke(inputs)
62
 
63
  # 6. Extract the final response text
 
64
  return response["messages"][-1].content
65
 
66
+ # --- Gradio UI (Universal Fix) ---
 
67
  with gr.Blocks(title="AI Python Tutor (MCP Powered)") as demo:
68
  gr.Markdown("# 🐍 Vibe Coding Tutor")
69
  gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)")
70
 
71
+ # REMOVED: type="messages" (This fixes the TypeError)
72
+ chatbot = gr.Chatbot(height=600)
73
  msg = gr.Textbox(placeholder="E.g., Teach me how to use Python decorators with a working example.")
74
 
75
  async def user_turn(user_message, history):
76
+ # Universal format: Append a list of [user_msg, None]
77
+ # This works on Gradio 3, 4, and 5
78
+ return "", history + [[user_message, None]]
79
 
80
  async def bot_turn(history):
81
+ # Get the last user message (it's the first element of the last tuple)
82
+ last_message = history[-1][0]
83
 
84
  # Run the agent
85
  response_text = await run_tutor(last_message, [])
86
 
87
+ # Update the last tuple with the bot response
88
+ history[-1][1] = response_text
89
  return history
90
 
 
91
  msg.submit(user_turn, [msg, chatbot], [msg, chatbot]).then(
92
  bot_turn, [chatbot], [chatbot]
93
  )