import os import sys import re import gradio as gr from langchain_openai import ChatOpenAI from langgraph.prebuilt import create_react_agent from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client from langchain_mcp_adapters.tools import load_mcp_tools from langchain_core.messages import HumanMessage, SystemMessage # --- Configuration --- # Ensure your API key is set in your environment variables NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY") NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/" # Model Options AVAILABLE_MODELS = [ "openai/gpt-oss-20b", "openai/gpt-oss-120b" ] # --- System Prompt --- SYSTEM_PROMPT = """You are a 'Vibe Coding' Python Tutor. Your goal is to teach by DOING and then providing resources. BEHAVIOR GUIDELINES: 1. **Greetings & Small Talk**: If the user says "hello", "hi", or asks non-coding questions, respond conversationally and politely. Ask them what they want to learn today. - DO NOT generate the lesson structure, files, or resources for simple greetings. 2. **Teaching Mode**: ONLY when the user asks a coding question or requests a topic (e.g., "dictionaries", "how do loops work"): - **The Lesson**: Explain the concept clearly. - **The Code**: ALWAYS create a Python file, run it, and show the output using tools ('write_file', 'run_python_script'). - **The Context**: Use 'list_directory' to see the student's workspace. CRITICAL FORMATTING INSTRUCTIONS: When in "Teaching Mode", you MUST end your response by strictly following this format. Do not add extra text between the sections. (End of your main lesson text) ---SECTION: VIDEOS--- (List 2-3 YouTube search queries or URLs relevant to the topic) ---SECTION: ARTICLES--- (List 2-3 documentation links or course names, e.g., RealPython, FreeCodeCamp) ---SECTION: QUIZ--- (Create 2 short multiple-choice question. Use HTML
and tags to hide the answer. Example: **Question**: ... - A) ... - B) ...
👀 Reveal AnswersCorrect is A because...
) """ def parse_agent_response(full_text): """ Robust parsing using Regex to handle LLM formatting inconsistencies. Splits the single LLM response into 4 UI components: Chat, Videos, Articles, Quiz. """ chat_content = full_text # Default values for "waiting" state videos = "### 📺 Recommended Videos\n*Ask a coding question to get recommendations!*" articles = "### 📚 Articles & Courses\n*Ask a coding question to get resources!*" quiz = "### 🧠 Quick Quiz\n*Ask a coding question to take a quiz!*" # Regex patterns that allow for slight variations in spacing or casing # Examples caught: "---SECTION: VIDEOS---", "---SECTION:VIDEOS ---", "--- section: videos ---" video_pattern = r"---SECTION:\s*VIDEOS\s*---" article_pattern = r"---SECTION:\s*ARTICLES\s*---" quiz_pattern = r"---SECTION:\s*QUIZ\s*---" try: # 1. Check if we are in Teaching Mode (look for Video section) # re.split returns a list. If found, index 0 is before the split, index 1 is after. split_video = re.split(video_pattern, full_text, flags=re.IGNORECASE, maxsplit=1) if len(split_video) > 1: chat_content = split_video[0].strip() remaining_resources = split_video[1] # 2. Extract Videos vs Articles split_article = re.split(article_pattern, remaining_resources, flags=re.IGNORECASE, maxsplit=1) if len(split_article) > 0: # Everything before ARTICLE tag is Video content video_content = split_article[0].strip() if video_content: videos = f"### 📺 Recommended Videos\n{video_content}" if len(split_article) > 1: remaining_quiz = split_article[1] # 3. Extract Articles vs Quiz split_quiz = re.split(quiz_pattern, remaining_quiz, flags=re.IGNORECASE, maxsplit=1) if len(split_quiz) > 0: article_content = split_quiz[0].strip() if article_content: articles = f"### 📚 Articles & Courses\n{article_content}" if len(split_quiz) > 1: quiz_content = split_quiz[1].strip() if quiz_content: quiz = f"### 🧠 Quick Quiz\n{quiz_content}" except Exception as e: print(f"Parsing error: {e}") # Fallback: Just show everything in chat if parsing fails completely, so no data is lost chat_content = full_text return chat_content, videos, articles, quiz async def run_tutor_dashboard(user_message, model_id): """ Main function to run the agent loop. Accepts 'model_id' to select the LLM dynamically. """ server_params = StdioServerParameters( command=sys.executable, args=["server.py"], env=os.environ.copy() ) async with stdio_client(server_params) as (read, write): async with ClientSession(read, write) as session: await session.initialize() tools = await load_mcp_tools(session) # Use the selected model_id llm = ChatOpenAI( api_key=NEBIUS_API_KEY, base_url=NEBIUS_BASE_URL, model=model_id, temperature=0.7 ) agent_executor = create_react_agent(llm, tools) inputs = { "messages": [ SystemMessage(content=SYSTEM_PROMPT), HumanMessage(content=user_message) ] } response = await agent_executor.ainvoke(inputs) final_text = response["messages"][-1].content return parse_agent_response(final_text) # --- Gradio Dashboard UI --- theme = gr.themes.Soft( primary_hue="slate", secondary_hue="indigo", text_size="lg", spacing_size="md", font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui"], ).set( body_background_fill="*neutral_50", block_background_fill="white", block_border_width="1px", block_title_text_weight="600" ) with gr.Blocks(title="AI Python Tutor", theme=theme, fill_height=True) as demo: # --- Header with Model Selector --- with gr.Row(variant="compact", elem_classes="header-row"): with gr.Column(scale=1): gr.Markdown("## 🐍 AI Python Tutor") with gr.Column(scale=0, min_width=250): model_selector = gr.Dropdown( choices=AVAILABLE_MODELS, value=AVAILABLE_MODELS[0], # Default to 20b label="Select Model", show_label=False, container=True, scale=1 ) with gr.Row(equal_height=True): # Left Column: Chat & Input with gr.Column(scale=3, variant="panel"): with gr.Row(): gr.Markdown("### 💬 Interactive Session") fullscreen_btn = gr.Button("⛶ Focus Mode", size="sm", variant="secondary", scale=0, min_width=120) chatbot = gr.Chatbot( height=600, show_label=False, type="messages", bubble_full_width=False, show_copy_button=True, avatar_images=(None, "https://api.dicebear.com/9.x/bottts-neutral/svg?seed=vibe") ) with gr.Row(equal_height=True): msg = gr.Textbox( label="What's your goal?", placeholder="Type 'Hello' to start, or ask: 'How do lists work?'", lines=1, scale=5, container=False, autofocus=True ) submit_btn = gr.Button("🚀 Start", variant="primary", scale=1) # Quick Examples gr.Examples( examples=[ "Hello! I'm new to Python.", "How do for-loops work?", "Explain dictionaries with an example.", "Write a script to calculate Fibonacci numbers." ], inputs=msg ) # Right Column: Resources Dashboard (Side View - Visible by default) with gr.Column(scale=2) as right_col: gr.Markdown("### 🎒 Learning Dashboard") with gr.Tabs(): with gr.TabItem("📺 Videos"): video_box_side = gr.Markdown(value="### Recommended Videos\n*Ask a topic to see video suggestions!*") with gr.TabItem("📚 Reading"): article_box_side = gr.Markdown(value="### Articles & Docs\n*Ask a topic to see reading materials!*") with gr.TabItem("🧠 Quiz"): quiz_box_side = gr.Markdown(value="### Knowledge Check\n*Ask a topic to unlock the quiz!*") # Bottom Row: Resources Dashboard (Bottom View - Hidden by default) with gr.Row(visible=False) as bottom_dashboard: with gr.Column(): gr.Markdown("### 🎒 Learning Dashboard") with gr.Tabs(): with gr.TabItem("📺 Videos"): video_box_bottom = gr.Markdown(value="### Recommended Videos\n*Ask a topic to see video suggestions!*") with gr.TabItem("📚 Reading"): article_box_bottom = gr.Markdown(value="### Articles & Docs\n*Ask a topic to see reading materials!*") with gr.TabItem("🧠 Quiz"): quiz_box_bottom = gr.Markdown(value="### Knowledge Check\n*Ask a topic to unlock the quiz!*") # --- Interaction Logic --- async def respond(user_message, history, model_id): if history is None: history = [] # Immediate user update history.append({"role": "user", "content": user_message}) # Placeholder for AI history.append({"role": "assistant", "content": f"Thinking (using {model_id})..."}) # Yield placeholders to ALL output boxes (Side AND Bottom) yield history, "", "", "", "", "", "", "" # Run Agent with SELECTED MODEL chat_text, video_text, article_text, quiz_text = await run_tutor_dashboard(user_message, model_id) # Update AI response history[-1]["content"] = chat_text # Yield final content to ALL output boxes yield history, "", video_text, article_text, quiz_text, video_text, article_text, quiz_text # --- Focus Mode Logic --- is_fullscreen = gr.State(False) def toggle_fullscreen(current_state): new_state = not current_state # If new_state is True (Fullscreen): Hide side col, Show bottom row # If False (Normal): Show side col, Hide bottom row side_visible = not new_state bottom_visible = new_state btn_text = "↩ Exit Focus" if new_state else "⛶ Focus Mode" return new_state, gr.Column(visible=side_visible), gr.Row(visible=bottom_visible), btn_text fullscreen_btn.click( toggle_fullscreen, inputs=[is_fullscreen], outputs=[is_fullscreen, right_col, bottom_dashboard, fullscreen_btn] ) # Actions - We must map outputs to BOTH side and bottom components outputs_list = [ chatbot, msg, video_box_side, article_box_side, quiz_box_side, video_box_bottom, article_box_bottom, quiz_box_bottom ] submit_btn.click( respond, [msg, chatbot, model_selector], outputs_list ) msg.submit( respond, [msg, chatbot, model_selector], outputs_list ) # --- Launch --- if __name__ == "__main__": demo.queue().launch()