File size: 6,232 Bytes
7140e26 e3731b4 697651e 7140e26 e3731b4 c8f1de1 e3731b4 c8f1de1 e3731b4 c8f1de1 e3731b4 697651e e3731b4 697651e e3731b4 697651e e3731b4 697651e 7140e26 697651e 7140e26 e3731b4 7140e26 e3731b4 7140e26 e3731b4 7140e26 c8f1de1 697651e c8f1de1 697651e e3731b4 697651e 7140e26 697651e e3731b4 697651e e3731b4 697651e e3731b4 697651e dc90612 697651e dc90612 697651e 7140e26 697651e 7140e26 e3731b4 7140e26 e3731b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
import os
import sys
import re
import gradio as gr
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from langchain_mcp_adapters.tools import load_mcp_tools
from langchain_core.messages import HumanMessage
# --- Configuration ---
NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY")
NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/"
MODEL_NAME = "meta-llama/Meta-Llama-3.1-70B-Instruct"
# --- Advanced System Prompt with Structured Output ---
SYSTEM_PROMPT = """You are a 'Vibe Coding' Python Tutor.
Your goal is to teach by DOING and then providing resources.
STRUCTURE OF YOUR RESPONSE:
1. **The Lesson**: Explain the concept and run code using your tools ('write_file', 'run_python_script').
2. **Context**: Use 'list_directory' to see what the student is working on.
CRITICAL: You must end EVERY response with these exact separators to populate the student's dashboard:
---SECTION: VIDEOS---
(List 2-3 YouTube search queries or URLs relevant to the topic)
---SECTION: ARTICLES---
(List 2-3 documentation links or course names, e.g., RealPython, FreeCodeCamp)
---SECTION: QUIZ---
(Create 1 short multiple-choice question to test what you just taught)
"""
def parse_agent_response(full_text):
"""Splits the single LLM response into 4 UI components."""
# Default content if sections are missing
chat_content = full_text
videos = "Ask a coding question to get video recommendations!"
articles = "Ask a coding question to get reading resources!"
quiz = "Ask a coding question to take a quiz!"
# Robust parsing using split
try:
if "---SECTION: VIDEOS---" in full_text:
parts = full_text.split("---SECTION: VIDEOS---")
chat_content = parts[0].strip()
remainder = parts[1]
if "---SECTION: ARTICLES---" in remainder:
v_parts = remainder.split("---SECTION: ARTICLES---")
videos = v_parts[0].strip()
remainder = v_parts[1]
if "---SECTION: QUIZ---" in remainder:
a_parts = remainder.split("---SECTION: QUIZ---")
articles = a_parts[0].strip()
quiz = a_parts[1].strip()
else:
articles = remainder.strip()
else:
videos = remainder.strip()
except Exception as e:
print(f"Parsing error: {e}")
return chat_content, videos, articles, quiz
async def run_tutor_dashboard(user_message, chat_history):
"""
Main function to run the agent loop and return 4 outputs.
"""
server_params = StdioServerParameters(
command=sys.executable,
args=["server.py"],
env=os.environ.copy()
)
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
tools = await load_mcp_tools(session)
llm = ChatOpenAI(
api_key=NEBIUS_API_KEY,
base_url=NEBIUS_BASE_URL,
model=MODEL_NAME,
temperature=0.7
)
agent_executor = create_react_agent(llm, tools, state_modifier=SYSTEM_PROMPT)
# Run Agent
inputs = {"messages": [HumanMessage(content=user_message)]}
response = await agent_executor.ainvoke(inputs)
final_text = response["messages"][-1].content
return parse_agent_response(final_text)
# --- Gradio Dashboard UI ---
with gr.Blocks(title="AI Python Tutor (MCP Dashboard)", theme=gr.themes.Soft()) as demo:
gr.Markdown("# ๐ Vibe Coding Academy")
gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)")
with gr.Row():
# Left Column: Chat & Input
with gr.Column(scale=2):
chatbot = gr.Chatbot(height=500, label="Tutor Chat", type="messages")
# BOX 1: Learning Request (Input)
msg = gr.Textbox(
label="1. What do you want to learn?",
placeholder="E.g., How do Python dictionaries work?",
lines=2
)
submit_btn = gr.Button("Start Learning", variant="primary")
# Right Column: Resources Dashboard
with gr.Column(scale=1):
# BOX 2: Videos
video_box = gr.Markdown(
value="### ๐บ Recommended Videos\n*Waiting for topic...*",
label="2. Video References"
)
# BOX 3: Articles/Courses
article_box = gr.Markdown(
value="### ๐ Articles & Courses\n*Waiting for topic...*",
label="3. Articles & Courses"
)
# BOX 4: Quiz
quiz_box = gr.Markdown(
value="### ๐ง Quick Quiz\n*Waiting for topic...*",
label="4. Knowledge Check"
)
# --- Interaction Logic ---
async def user_turn(user_message, history):
return "", history + [{"role": "user", "content": user_message}]
async def bot_turn(history):
last_message = history[-1]["content"]
# Get all 4 outputs from the agent
chat_text, video_text, article_text, quiz_text = await run_tutor_dashboard(last_message, [])
# Update Chatbot history
history.append({"role": "assistant", "content": chat_text})
# Return all 4 updates
return history, video_text, article_text, quiz_text
# Wire up the inputs and outputs
# Notice we now output to [chatbot, video_box, article_box, quiz_box]
submit_btn.click(
user_turn, [msg, chatbot], [msg, chatbot]
).then(
bot_turn, [chatbot], [chatbot, video_box, article_box, quiz_box]
)
msg.submit(
user_turn, [msg, chatbot], [msg, chatbot]
).then(
bot_turn, [chatbot], [chatbot, video_box, article_box, quiz_box]
)
# --- Launch ---
if __name__ == "__main__":
demo.queue().launch() |