|
|
import os |
|
|
import sys |
|
|
import re |
|
|
import gradio as gr |
|
|
from langchain_openai import ChatOpenAI |
|
|
from langgraph.prebuilt import create_react_agent |
|
|
from mcp import ClientSession, StdioServerParameters |
|
|
from mcp.client.stdio import stdio_client |
|
|
from langchain_mcp_adapters.tools import load_mcp_tools |
|
|
from langchain_core.messages import HumanMessage |
|
|
|
|
|
|
|
|
NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY") |
|
|
NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/" |
|
|
MODEL_NAME = "meta-llama/Meta-Llama-3.1-70B-Instruct" |
|
|
|
|
|
|
|
|
SYSTEM_PROMPT = """You are a 'Vibe Coding' Python Tutor. |
|
|
Your goal is to teach by DOING and then providing resources. |
|
|
|
|
|
STRUCTURE OF YOUR RESPONSE: |
|
|
1. **The Lesson**: Explain the concept and run code using your tools ('write_file', 'run_python_script'). |
|
|
2. **Context**: Use 'list_directory' to see what the student is working on. |
|
|
|
|
|
CRITICAL: You must end EVERY response with these exact separators to populate the student's dashboard: |
|
|
|
|
|
---SECTION: VIDEOS--- |
|
|
(List 2-3 YouTube search queries or URLs relevant to the topic) |
|
|
|
|
|
---SECTION: ARTICLES--- |
|
|
(List 2-3 documentation links or course names, e.g., RealPython, FreeCodeCamp) |
|
|
|
|
|
---SECTION: QUIZ--- |
|
|
(Create 1 short multiple-choice question to test what you just taught) |
|
|
""" |
|
|
|
|
|
def parse_agent_response(full_text): |
|
|
"""Splits the single LLM response into 4 UI components.""" |
|
|
|
|
|
chat_content = full_text |
|
|
videos = "Ask a coding question to get video recommendations!" |
|
|
articles = "Ask a coding question to get reading resources!" |
|
|
quiz = "Ask a coding question to take a quiz!" |
|
|
|
|
|
|
|
|
try: |
|
|
if "---SECTION: VIDEOS---" in full_text: |
|
|
parts = full_text.split("---SECTION: VIDEOS---") |
|
|
chat_content = parts[0].strip() |
|
|
remainder = parts[1] |
|
|
|
|
|
if "---SECTION: ARTICLES---" in remainder: |
|
|
v_parts = remainder.split("---SECTION: ARTICLES---") |
|
|
videos = v_parts[0].strip() |
|
|
remainder = v_parts[1] |
|
|
|
|
|
if "---SECTION: QUIZ---" in remainder: |
|
|
a_parts = remainder.split("---SECTION: QUIZ---") |
|
|
articles = a_parts[0].strip() |
|
|
quiz = a_parts[1].strip() |
|
|
else: |
|
|
articles = remainder.strip() |
|
|
else: |
|
|
videos = remainder.strip() |
|
|
except Exception as e: |
|
|
print(f"Parsing error: {e}") |
|
|
|
|
|
return chat_content, videos, articles, quiz |
|
|
|
|
|
async def run_tutor_dashboard(user_message, chat_history): |
|
|
""" |
|
|
Main function to run the agent loop and return 4 outputs. |
|
|
""" |
|
|
server_params = StdioServerParameters( |
|
|
command=sys.executable, |
|
|
args=["server.py"], |
|
|
env=os.environ.copy() |
|
|
) |
|
|
|
|
|
async with stdio_client(server_params) as (read, write): |
|
|
async with ClientSession(read, write) as session: |
|
|
await session.initialize() |
|
|
tools = await load_mcp_tools(session) |
|
|
|
|
|
llm = ChatOpenAI( |
|
|
api_key=NEBIUS_API_KEY, |
|
|
base_url=NEBIUS_BASE_URL, |
|
|
model=MODEL_NAME, |
|
|
temperature=0.7 |
|
|
) |
|
|
|
|
|
agent_executor = create_react_agent(llm, tools, state_modifier=SYSTEM_PROMPT) |
|
|
|
|
|
|
|
|
inputs = {"messages": [HumanMessage(content=user_message)]} |
|
|
response = await agent_executor.ainvoke(inputs) |
|
|
final_text = response["messages"][-1].content |
|
|
|
|
|
return parse_agent_response(final_text) |
|
|
|
|
|
|
|
|
with gr.Blocks(title="AI Python Tutor (MCP Dashboard)", theme=gr.themes.Soft()) as demo: |
|
|
gr.Markdown("# ๐ Vibe Coding Academy") |
|
|
gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)") |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
chatbot = gr.Chatbot(height=500, label="Tutor Chat", type="messages") |
|
|
|
|
|
|
|
|
msg = gr.Textbox( |
|
|
label="1. What do you want to learn?", |
|
|
placeholder="E.g., How do Python dictionaries work?", |
|
|
lines=2 |
|
|
) |
|
|
submit_btn = gr.Button("Start Learning", variant="primary") |
|
|
|
|
|
|
|
|
with gr.Column(scale=1): |
|
|
|
|
|
video_box = gr.Markdown( |
|
|
value="### ๐บ Recommended Videos\n*Waiting for topic...*", |
|
|
label="2. Video References" |
|
|
) |
|
|
|
|
|
|
|
|
article_box = gr.Markdown( |
|
|
value="### ๐ Articles & Courses\n*Waiting for topic...*", |
|
|
label="3. Articles & Courses" |
|
|
) |
|
|
|
|
|
|
|
|
quiz_box = gr.Markdown( |
|
|
value="### ๐ง Quick Quiz\n*Waiting for topic...*", |
|
|
label="4. Knowledge Check" |
|
|
) |
|
|
|
|
|
|
|
|
async def user_turn(user_message, history): |
|
|
return "", history + [{"role": "user", "content": user_message}] |
|
|
|
|
|
async def bot_turn(history): |
|
|
last_message = history[-1]["content"] |
|
|
|
|
|
|
|
|
chat_text, video_text, article_text, quiz_text = await run_tutor_dashboard(last_message, []) |
|
|
|
|
|
|
|
|
history.append({"role": "assistant", "content": chat_text}) |
|
|
|
|
|
|
|
|
return history, video_text, article_text, quiz_text |
|
|
|
|
|
|
|
|
|
|
|
submit_btn.click( |
|
|
user_turn, [msg, chatbot], [msg, chatbot] |
|
|
).then( |
|
|
bot_turn, [chatbot], [chatbot, video_box, article_box, quiz_box] |
|
|
) |
|
|
|
|
|
msg.submit( |
|
|
user_turn, [msg, chatbot], [msg, chatbot] |
|
|
).then( |
|
|
bot_turn, [chatbot], [chatbot, video_box, article_box, quiz_box] |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.queue().launch() |