AI-Python-Tutor / app.py
omarash2016's picture
Update app.py
655666b verified
import os
import sys
import re
import json
import gradio as gr
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from langchain_mcp_adapters.tools import load_mcp_tools
from langchain_core.messages import HumanMessage, SystemMessage
# --- Configuration ---
# Ensure your API key is set in your environment variables
NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY")
NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/"
# Model Options
AVAILABLE_MODELS = [
"openai/gpt-oss-20b",
"openai/gpt-oss-120b"
]
# --- System Prompt ---
SYSTEM_PROMPT = """You are a 'Vibe Coding' Python Tutor.
Your goal is to teach by DOING and then providing resources.
BEHAVIOR GUIDELINES:
1. **Greetings & Small Talk**: If the user says "hello", "hi", or asks non-coding questions, respond conversationally and politely. Ask them what they want to learn today.
- DO NOT generate the lesson structure, files, or resources for simple greetings.
2. **Teaching Mode**: ONLY when the user asks a coding question or requests a topic (e.g., "dictionaries", "how do loops work"):
- **The Lesson**: Explain the concept clearly.
- **The Code**: ALWAYS create a Python file, run it, and show the output using tools ('write_file', 'run_python_script').
- **The Context**: Use 'list_directory' to see the student's workspace.
CRITICAL FORMATTING INSTRUCTIONS:
When in "Teaching Mode", you MUST end your response by strictly following this format.
Do not add extra text between the sections.
(End of your main lesson text)
---SECTION: SUMMARY---
(Provide 3-4 concise bullet points summarizing the key syntax, functions, or concepts learned in this lesson.)
---SECTION: VIDEOS---
(List 2-3 YouTube search queries or URLs relevant to the topic)
---SECTION: ARTICLES---
(List 2-3 documentation links or course names, e.g., RealPython, FreeCodeCamp)
---SECTION: QUIZ---
Provide a valid JSON list of exactly 3 objects. Do not use Markdown code blocks.
[
{"question": "Question text here?", "options": ["Option A", "Option B", "Option C"], "correct_answer": "Option A", "explanation": "Brief explanation why."}
]
"""
def parse_agent_response(full_text):
"""
Robust parsing using Regex to handle LLM formatting inconsistencies.
Splits the single LLM response into UI components.
Returns: chat_content, summary, videos, articles, quiz_data (list of dicts)
"""
chat_content = full_text
# Default values
summary = "### 📝 Key Takeaways\n*Ask a coding question to get a cheat sheet!*"
videos = "### 📺 Recommended Videos\n*Ask a coding question to get recommendations!*"
articles = "### 📚 Articles & Courses\n*Ask a coding question to get resources!*"
quiz_data = [] # Empty list for interactive quiz
# Regex patterns
summary_pattern = r"---SECTION:\s*SUMMARY\s*---"
video_pattern = r"---SECTION:\s*VIDEOS\s*---"
article_pattern = r"---SECTION:\s*ARTICLES\s*---"
quiz_pattern = r"---SECTION:\s*QUIZ\s*---"
try:
# 1. Extract Chat vs Summary
split_summary = re.split(summary_pattern, full_text, flags=re.IGNORECASE, maxsplit=1)
if len(split_summary) > 1:
chat_content = split_summary[0].strip()
remaining_after_chat = split_summary[1]
# 2. Extract Summary vs Videos
split_video = re.split(video_pattern, remaining_after_chat, flags=re.IGNORECASE, maxsplit=1)
if len(split_video) > 0:
summary_content = split_video[0].strip()
if summary_content:
summary = f"### 📝 Key Takeaways\n{summary_content}"
if len(split_video) > 1:
remaining_after_video = split_video[1]
# 3. Extract Videos vs Articles
split_article = re.split(article_pattern, remaining_after_video, flags=re.IGNORECASE, maxsplit=1)
if len(split_article) > 0:
video_content = split_article[0].strip()
if video_content:
videos = f"### 📺 Recommended Videos\n{video_content}"
if len(split_article) > 1:
remaining_after_article = split_article[1]
# 4. Extract Articles vs Quiz
split_quiz = re.split(quiz_pattern, remaining_after_article, flags=re.IGNORECASE, maxsplit=1)
if len(split_quiz) > 0:
article_content = split_quiz[0].strip()
if article_content:
articles = f"### 📚 Articles & Courses\n{article_content}"
if len(split_quiz) > 1:
quiz_raw_json = split_quiz[1].strip()
# Attempt to parse JSON
try:
# Clean up potential markdown code blocks if the LLM ignored instructions
clean_json = quiz_raw_json.replace("```json", "").replace("```", "").strip()
quiz_data = json.loads(clean_json)
except json.JSONDecodeError as e:
print(f"Quiz JSON Error: {e}")
quiz_data = [] # Fallback
elif "---SECTION: VIDEOS---" in full_text:
# Fallback parsing
split_video_fallback = re.split(video_pattern, full_text, flags=re.IGNORECASE, maxsplit=1)
chat_content = split_video_fallback[0].strip()
except Exception as e:
print(f"Parsing error: {e}")
chat_content = full_text
return chat_content, summary, videos, articles, quiz_data
async def run_tutor_dashboard(user_message, model_id):
server_params = StdioServerParameters(
command=sys.executable,
args=["server.py"],
env=os.environ.copy()
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
tools = await load_mcp_tools(session)
llm = ChatOpenAI(
api_key=NEBIUS_API_KEY,
base_url=NEBIUS_BASE_URL,
model=model_id,
temperature=0.7
)
agent_executor = create_react_agent(llm, tools)
inputs = {
"messages": [
SystemMessage(content=SYSTEM_PROMPT),
HumanMessage(content=user_message)
]
}
response = await agent_executor.ainvoke(inputs)
final_text = response["messages"][-1].content
return parse_agent_response(final_text)
def check_quiz(ans1, ans2, quiz_data):
"""Checks user answers against the quiz data."""
if not quiz_data or len(quiz_data) < 2:
return "⚠️ Quiz not loaded.", "⚠️ Quiz not loaded."
# Check Q1
q1 = quiz_data[0]
if ans1 == q1["correct_answer"]:
res1 = f"✅ Correct! {q1['explanation']}"
else:
res1 = f"❌ Incorrect. The correct answer was **{q1['correct_answer']}**.\n\n{q1['explanation']}"
# Check Q2
q2 = quiz_data[1]
if ans2 == q2["correct_answer"]:
res2 = f"✅ Correct! {q2['explanation']}"
else:
res2 = f"❌ Incorrect. The correct answer was **{q2['correct_answer']}**.\n\n{q2['explanation']}"
return gr.update(value=res1, visible=True), gr.update(value=res2, visible=True)
# --- Gradio Dashboard UI ---
theme = gr.themes.Soft(
primary_hue="slate",
secondary_hue="indigo",
text_size="lg",
spacing_size="md",
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui"],
).set(
body_background_fill="*neutral_50",
block_background_fill="white",
block_border_width="1px",
block_title_text_weight="600"
)
# Custom CSS
custom_css = """
.tight-header {
margin-bottom: -40px !important;
padding-bottom: 0px !important;
}
.tight-content {
margin-top: 0px !important;
padding-top: 0px !important;
}
.scrollable-right-col {
max-height: 680px;
overflow-y: auto !important;
overflow-x: hidden !important;
padding-right: 10px;
}
"""
with gr.Blocks(title="AI Python Tutor", theme=theme, fill_height=True, css=custom_css) as demo:
# State to hold quiz data
quiz_state = gr.State([])
# --- Header with Model Selector ---
with gr.Row(variant="compact", elem_classes="header-row"):
with gr.Column(scale=1):
gr.Markdown("## 🐍 AI Python Tutor")
with gr.Column(scale=0, min_width=250):
model_selector = gr.Dropdown(
choices=AVAILABLE_MODELS,
value=AVAILABLE_MODELS[0],
label="Select Model",
show_label=False,
container=True,
scale=1
)
with gr.Row(equal_height=True):
# Left Column: Chat & Input
with gr.Column(scale=3, variant="panel"):
with gr.Row():
gr.Markdown("### 💬 Interactive Session")
fullscreen_btn = gr.Button("⛶ Focus Mode", size="sm", variant="secondary", scale=0, min_width=120)
chatbot = gr.Chatbot(
height=600,
show_label=False,
type="messages",
bubble_full_width=False,
show_copy_button=True,
avatar_images=(None, "https://api.dicebear.com/9.x/bottts-neutral/svg?seed=vibe")
)
with gr.Row(equal_height=True):
msg = gr.Textbox(
label="What's your goal?",
placeholder="Type 'Hello' to start, or ask: 'How do lists work?'",
lines=1,
scale=5,
container=False,
autofocus=True
)
submit_btn = gr.Button("🚀 Start", variant="primary", scale=1)
gr.Examples(
examples=[
"Hello! I'm new to Python.",
"How do for-loops work?",
"Explain dictionaries with an example.",
"Write a script to calculate Fibonacci numbers."
],
inputs=msg
)
# Right Column: Resources Dashboard (Side View)
with gr.Column(scale=2, elem_classes="scrollable-right-col") as right_col:
# Moved Key Takeaways INSIDE the tabs to prevent vertical overflow
with gr.Tabs():
with gr.TabItem("📝 Takeaways"):
summary_box_side = gr.Markdown(value="### 📝 Key Takeaways\n*Ask a topic to get a cheat sheet!*", elem_classes="tight-content")
with gr.TabItem("📺 Videos"):
video_box_side = gr.Markdown(value="### Recommended Videos\n*Ask a topic to see video suggestions!*")
with gr.TabItem("📚 Reading"):
article_box_side = gr.Markdown(value="### Articles & Docs\n*Ask a topic to see reading materials!*")
with gr.TabItem("🧠 Quiz"):
# Interactive Quiz UI (Side)
q1_text_side = gr.Markdown("*Quiz will appear here...*")
q1_radio_side = gr.Radio(choices=[], label="Select Answer")
q1_result_side = gr.Markdown(visible=False)
gr.Markdown("---")
q2_text_side = gr.Markdown("")
q2_radio_side = gr.Radio(choices=[], label="Select Answer")
q2_result_side = gr.Markdown(visible=False)
check_btn_side = gr.Button("✅ Check Answers", variant="secondary")
# Bottom Row: Resources Dashboard (Focus Mode View)
with gr.Row(visible=False) as bottom_dashboard:
with gr.Column():
gr.Markdown("### 🎒 Learning Dashboard")
# Moved Key Takeaways INSIDE the tabs for Bottom View too
with gr.Tabs():
with gr.TabItem("📝 Takeaways"):
summary_box_bottom = gr.Markdown(value="### 📝 Key Takeaways\n*Ask a topic to get a cheat sheet!*")
with gr.TabItem("📺 Videos"):
video_box_bottom = gr.Markdown(value="### Recommended Videos\n*Ask a topic to see video suggestions!*")
with gr.TabItem("📚 Reading"):
article_box_bottom = gr.Markdown(value="### Articles & Docs\n*Ask a topic to see reading materials!*")
with gr.TabItem("🧠 Quiz"):
# Interactive Quiz UI (Bottom) - Mirroring functionality
q1_text_bottom = gr.Markdown("*Quiz will appear here...*")
q1_radio_bottom = gr.Radio(choices=[], label="Select Answer")
q1_result_bottom = gr.Markdown(visible=False)
gr.Markdown("---")
q2_text_bottom = gr.Markdown("")
q2_radio_bottom = gr.Radio(choices=[], label="Select Answer")
q2_result_bottom = gr.Markdown(visible=False)
check_btn_bottom = gr.Button("✅ Check Answers", variant="secondary")
# --- Interaction Logic ---
async def respond(user_message, history, model_id):
if history is None: history = []
history.append({"role": "user", "content": user_message})
history.append({"role": "assistant", "content": f"Thinking (using {model_id})..."})
# Yield placeholders
# Outputs: History, Chatbox, Side boxes, Bottom boxes, QUIZ STATE, Quiz Inputs (Side x2, Bottom x2), Quiz Texts (Side x2, Bottom x2)
# We need to yield updates for EVERYTHING.
# Simplified: We yield empty strings/defaults for now.
yield history, "", "", "", "", [], \
gr.update(), gr.update(), gr.update(), gr.update(), \
gr.update(), gr.update(), gr.update(), gr.update(), \
"", "", "", "", \
gr.update(), gr.update(), gr.update(), gr.update(), \
gr.update(), gr.update(), gr.update(), gr.update()
chat_text, summary_text, video_text, article_text, quiz_data = await run_tutor_dashboard(user_message, model_id)
history[-1]["content"] = chat_text
# Prepare Quiz UI updates
if quiz_data and len(quiz_data) >= 2:
q1 = quiz_data[0]
q2 = quiz_data[1]
# Common updates for both views
q1_t = f"### 1. {q1['question']}"
q1_c = q1['options']
q2_t = f"### 2. {q2['question']}"
q2_c = q2['options']
# Reset results and set values
q_upd = [
gr.update(value=q1_t), gr.update(choices=q1_c, value=None, interactive=True), gr.update(value="", visible=False),
gr.update(value=q2_t), gr.update(choices=q2_c, value=None, interactive=True), gr.update(value="", visible=False)
]
else:
# Empty updates if no quiz
q_upd = [gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()]
# Yield final content
# Note: We duplicate the quiz updates for Side and Bottom views
yield history, "", summary_text, video_text, article_text, quiz_data, \
q_upd[0], q_upd[1], q_upd[2], q_upd[3], q_upd[4], q_upd[5], \
summary_text, video_text, article_text, \
q_upd[0], q_upd[1], q_upd[2], q_upd[3], q_upd[4], q_upd[5]
# --- Focus Mode Logic ---
is_fullscreen = gr.State(False)
def toggle_fullscreen(current_state):
new_state = not current_state
side_visible = not new_state
bottom_visible = new_state
btn_text = "↩ Exit Focus" if new_state else "⛶ Focus Mode"
return new_state, gr.Column(visible=side_visible), gr.Row(visible=bottom_visible), btn_text
fullscreen_btn.click(
toggle_fullscreen,
inputs=[is_fullscreen],
outputs=[is_fullscreen, right_col, bottom_dashboard, fullscreen_btn]
)
# --- Quiz Button Logic ---
# Side View Check
check_btn_side.click(
check_quiz,
inputs=[q1_radio_side, q2_radio_side, quiz_state],
outputs=[q1_result_side, q2_result_side]
)
# Bottom View Check
check_btn_bottom.click(
check_quiz,
inputs=[q1_radio_bottom, q2_radio_bottom, quiz_state],
outputs=[q1_result_bottom, q2_result_bottom]
)
outputs_list = [
chatbot, msg,
summary_box_side, video_box_side, article_box_side,
quiz_state,
q1_text_side, q1_radio_side, q1_result_side, q2_text_side, q2_radio_side, q2_result_side,
summary_box_bottom, video_box_bottom, article_box_bottom,
q1_text_bottom, q1_radio_bottom, q1_result_bottom, q2_text_bottom, q2_radio_bottom, q2_result_bottom
]
submit_btn.click(
respond,
[msg, chatbot, model_selector],
outputs_list
)
msg.submit(
respond,
[msg, chatbot, model_selector],
outputs_list
)
# --- Launch ---
if __name__ == "__main__":
demo.queue().launch()