AI-Python-Tutor / app.py
ABO4SAMRA's picture
Update app.py
697651e verified
raw
history blame
6.23 kB
import os
import sys
import re
import gradio as gr
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from langchain_mcp_adapters.tools import load_mcp_tools
from langchain_core.messages import HumanMessage
# --- Configuration ---
NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY")
NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/"
MODEL_NAME = "meta-llama/Meta-Llama-3.1-70B-Instruct"
# --- Advanced System Prompt with Structured Output ---
SYSTEM_PROMPT = """You are a 'Vibe Coding' Python Tutor.
Your goal is to teach by DOING and then providing resources.
STRUCTURE OF YOUR RESPONSE:
1. **The Lesson**: Explain the concept and run code using your tools ('write_file', 'run_python_script').
2. **Context**: Use 'list_directory' to see what the student is working on.
CRITICAL: You must end EVERY response with these exact separators to populate the student's dashboard:
---SECTION: VIDEOS---
(List 2-3 YouTube search queries or URLs relevant to the topic)
---SECTION: ARTICLES---
(List 2-3 documentation links or course names, e.g., RealPython, FreeCodeCamp)
---SECTION: QUIZ---
(Create 1 short multiple-choice question to test what you just taught)
"""
def parse_agent_response(full_text):
"""Splits the single LLM response into 4 UI components."""
# Default content if sections are missing
chat_content = full_text
videos = "Ask a coding question to get video recommendations!"
articles = "Ask a coding question to get reading resources!"
quiz = "Ask a coding question to take a quiz!"
# Robust parsing using split
try:
if "---SECTION: VIDEOS---" in full_text:
parts = full_text.split("---SECTION: VIDEOS---")
chat_content = parts[0].strip()
remainder = parts[1]
if "---SECTION: ARTICLES---" in remainder:
v_parts = remainder.split("---SECTION: ARTICLES---")
videos = v_parts[0].strip()
remainder = v_parts[1]
if "---SECTION: QUIZ---" in remainder:
a_parts = remainder.split("---SECTION: QUIZ---")
articles = a_parts[0].strip()
quiz = a_parts[1].strip()
else:
articles = remainder.strip()
else:
videos = remainder.strip()
except Exception as e:
print(f"Parsing error: {e}")
return chat_content, videos, articles, quiz
async def run_tutor_dashboard(user_message, chat_history):
"""
Main function to run the agent loop and return 4 outputs.
"""
server_params = StdioServerParameters(
command=sys.executable,
args=["server.py"],
env=os.environ.copy()
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
tools = await load_mcp_tools(session)
llm = ChatOpenAI(
api_key=NEBIUS_API_KEY,
base_url=NEBIUS_BASE_URL,
model=MODEL_NAME,
temperature=0.7
)
agent_executor = create_react_agent(llm, tools, state_modifier=SYSTEM_PROMPT)
# Run Agent
inputs = {"messages": [HumanMessage(content=user_message)]}
response = await agent_executor.ainvoke(inputs)
final_text = response["messages"][-1].content
return parse_agent_response(final_text)
# --- Gradio Dashboard UI ---
with gr.Blocks(title="AI Python Tutor (MCP Dashboard)", theme=gr.themes.Soft()) as demo:
gr.Markdown("# ๐Ÿš€ Vibe Coding Academy")
gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)")
with gr.Row():
# Left Column: Chat & Input
with gr.Column(scale=2):
chatbot = gr.Chatbot(height=500, label="Tutor Chat", type="messages")
# BOX 1: Learning Request (Input)
msg = gr.Textbox(
label="1. What do you want to learn?",
placeholder="E.g., How do Python dictionaries work?",
lines=2
)
submit_btn = gr.Button("Start Learning", variant="primary")
# Right Column: Resources Dashboard
with gr.Column(scale=1):
# BOX 2: Videos
video_box = gr.Markdown(
value="### ๐Ÿ“บ Recommended Videos\n*Waiting for topic...*",
label="2. Video References"
)
# BOX 3: Articles/Courses
article_box = gr.Markdown(
value="### ๐Ÿ“š Articles & Courses\n*Waiting for topic...*",
label="3. Articles & Courses"
)
# BOX 4: Quiz
quiz_box = gr.Markdown(
value="### ๐Ÿง  Quick Quiz\n*Waiting for topic...*",
label="4. Knowledge Check"
)
# --- Interaction Logic ---
async def user_turn(user_message, history):
return "", history + [{"role": "user", "content": user_message}]
async def bot_turn(history):
last_message = history[-1]["content"]
# Get all 4 outputs from the agent
chat_text, video_text, article_text, quiz_text = await run_tutor_dashboard(last_message, [])
# Update Chatbot history
history.append({"role": "assistant", "content": chat_text})
# Return all 4 updates
return history, video_text, article_text, quiz_text
# Wire up the inputs and outputs
# Notice we now output to [chatbot, video_box, article_box, quiz_box]
submit_btn.click(
user_turn, [msg, chatbot], [msg, chatbot]
).then(
bot_turn, [chatbot], [chatbot, video_box, article_box, quiz_box]
)
msg.submit(
user_turn, [msg, chatbot], [msg, chatbot]
).then(
bot_turn, [chatbot], [chatbot, video_box, article_box, quiz_box]
)
# --- Launch ---
if __name__ == "__main__":
demo.queue().launch()