Update app.py
Browse files
app.py
CHANGED
|
@@ -13,7 +13,7 @@ NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY")
|
|
| 13 |
NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/"
|
| 14 |
MODEL_NAME = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
| 15 |
|
| 16 |
-
# ---
|
| 17 |
SYSTEM_PROMPT = """You are a 'Vibe Coding' Python Tutor.
|
| 18 |
Your goal is to teach by DOING and then providing resources.
|
| 19 |
|
|
@@ -35,7 +35,6 @@ CRITICAL: You must end EVERY response with these exact separators to populate th
|
|
| 35 |
|
| 36 |
def parse_agent_response(full_text):
|
| 37 |
"""Splits the single LLM response into 4 UI components."""
|
| 38 |
-
# Default content
|
| 39 |
chat_content = full_text
|
| 40 |
videos = "Ask a coding question to get video recommendations!"
|
| 41 |
articles = "Ask a coding question to get reading resources!"
|
|
@@ -65,7 +64,7 @@ def parse_agent_response(full_text):
|
|
| 65 |
|
| 66 |
return chat_content, videos, articles, quiz
|
| 67 |
|
| 68 |
-
async def run_tutor_dashboard(user_message
|
| 69 |
"""
|
| 70 |
Main function to run the agent loop.
|
| 71 |
"""
|
|
@@ -96,8 +95,8 @@ async def run_tutor_dashboard(user_message, chat_history):
|
|
| 96 |
|
| 97 |
return parse_agent_response(final_text)
|
| 98 |
|
| 99 |
-
# --- Gradio Dashboard UI ---
|
| 100 |
-
#
|
| 101 |
with gr.Blocks(title="AI Python Tutor (MCP Dashboard)") as demo:
|
| 102 |
gr.Markdown("# ๐ Vibe Coding Academy")
|
| 103 |
gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)")
|
|
@@ -105,8 +104,7 @@ with gr.Blocks(title="AI Python Tutor (MCP Dashboard)") as demo:
|
|
| 105 |
with gr.Row():
|
| 106 |
# Left Column: Chat & Input
|
| 107 |
with gr.Column(scale=2):
|
| 108 |
-
#
|
| 109 |
-
# This defaults to the standard list-of-lists format [[user, bot], [user, bot]]
|
| 110 |
chatbot = gr.Chatbot(height=500, label="Tutor Chat")
|
| 111 |
|
| 112 |
msg = gr.Textbox(
|
|
@@ -122,34 +120,36 @@ with gr.Blocks(title="AI Python Tutor (MCP Dashboard)") as demo:
|
|
| 122 |
article_box = gr.Markdown(value="### ๐ Articles & Courses\n*Waiting for topic...*", label="3. Articles & Courses")
|
| 123 |
quiz_box = gr.Markdown(value="### ๐ง Quick Quiz\n*Waiting for topic...*", label="4. Knowledge Check")
|
| 124 |
|
| 125 |
-
# --- Interaction Logic (
|
| 126 |
-
async def
|
| 127 |
-
#
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
|
|
|
| 133 |
|
| 134 |
-
# Run agent
|
| 135 |
-
chat_text, video_text, article_text, quiz_text = await run_tutor_dashboard(
|
| 136 |
|
| 137 |
-
# Update the last item
|
| 138 |
history[-1][1] = chat_text
|
| 139 |
|
| 140 |
-
|
|
|
|
| 141 |
|
| 142 |
-
# Wire up inputs/outputs
|
| 143 |
submit_btn.click(
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
)
|
| 148 |
|
| 149 |
msg.submit(
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
)
|
| 154 |
|
| 155 |
# --- Launch ---
|
|
|
|
| 13 |
NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/"
|
| 14 |
MODEL_NAME = "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
| 15 |
|
| 16 |
+
# --- System Prompt ---
|
| 17 |
SYSTEM_PROMPT = """You are a 'Vibe Coding' Python Tutor.
|
| 18 |
Your goal is to teach by DOING and then providing resources.
|
| 19 |
|
|
|
|
| 35 |
|
| 36 |
def parse_agent_response(full_text):
|
| 37 |
"""Splits the single LLM response into 4 UI components."""
|
|
|
|
| 38 |
chat_content = full_text
|
| 39 |
videos = "Ask a coding question to get video recommendations!"
|
| 40 |
articles = "Ask a coding question to get reading resources!"
|
|
|
|
| 64 |
|
| 65 |
return chat_content, videos, articles, quiz
|
| 66 |
|
| 67 |
+
async def run_tutor_dashboard(user_message):
|
| 68 |
"""
|
| 69 |
Main function to run the agent loop.
|
| 70 |
"""
|
|
|
|
| 95 |
|
| 96 |
return parse_agent_response(final_text)
|
| 97 |
|
| 98 |
+
# --- Gradio Dashboard UI (Safe Mode) ---
|
| 99 |
+
# We removed 'theme' and 'type="messages"' to ensure compatibility
|
| 100 |
with gr.Blocks(title="AI Python Tutor (MCP Dashboard)") as demo:
|
| 101 |
gr.Markdown("# ๐ Vibe Coding Academy")
|
| 102 |
gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)")
|
|
|
|
| 104 |
with gr.Row():
|
| 105 |
# Left Column: Chat & Input
|
| 106 |
with gr.Column(scale=2):
|
| 107 |
+
# Standard Chatbot (Expects List of Lists: [[user, bot], ...])
|
|
|
|
| 108 |
chatbot = gr.Chatbot(height=500, label="Tutor Chat")
|
| 109 |
|
| 110 |
msg = gr.Textbox(
|
|
|
|
| 120 |
article_box = gr.Markdown(value="### ๐ Articles & Courses\n*Waiting for topic...*", label="3. Articles & Courses")
|
| 121 |
quiz_box = gr.Markdown(value="### ๐ง Quick Quiz\n*Waiting for topic...*", label="4. Knowledge Check")
|
| 122 |
|
| 123 |
+
# --- Interaction Logic (List-of-Lists Format) ---
|
| 124 |
+
async def respond(user_message, history):
|
| 125 |
+
# 1. Start with an empty bot response
|
| 126 |
+
# history is a list of lists: [['hi', 'hello'], ['user_msg', None]]
|
| 127 |
+
if history is None: history = []
|
| 128 |
+
history.append([user_message, None])
|
| 129 |
+
|
| 130 |
+
# 2. Return the history immediately to show user message
|
| 131 |
+
yield history, "", "", "", ""
|
| 132 |
|
| 133 |
+
# 3. Run the agent
|
| 134 |
+
chat_text, video_text, article_text, quiz_text = await run_tutor_dashboard(user_message)
|
| 135 |
|
| 136 |
+
# 4. Update the last history item with bot response
|
| 137 |
history[-1][1] = chat_text
|
| 138 |
|
| 139 |
+
# 5. Yield final state
|
| 140 |
+
yield history, "", video_text, article_text, quiz_text
|
| 141 |
|
| 142 |
+
# Wire up inputs/outputs using the 'respond' generator
|
| 143 |
submit_btn.click(
|
| 144 |
+
respond,
|
| 145 |
+
[msg, chatbot],
|
| 146 |
+
[chatbot, msg, video_box, article_box, quiz_box]
|
| 147 |
)
|
| 148 |
|
| 149 |
msg.submit(
|
| 150 |
+
respond,
|
| 151 |
+
[msg, chatbot],
|
| 152 |
+
[chatbot, msg, video_box, article_box, quiz_box]
|
| 153 |
)
|
| 154 |
|
| 155 |
# --- Launch ---
|