ABO4SAMRA commited on
Commit
dc90612
·
verified ·
1 Parent(s): 9903495

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -9,7 +9,6 @@ from langchain_mcp_adapters.tools import load_mcp_tools
9
  from langchain_core.messages import HumanMessage
10
 
11
  # --- Configuration ---
12
- # Ensure NEBIUS_API_KEY is set in your Space's "Settings -> Secrets"
13
  NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY")
14
  NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/"
15
  MODEL_NAME = "meta-llama/Meta-Llama-3.1-70B-Instruct"
@@ -32,6 +31,7 @@ async def run_tutor(user_message, chat_history):
32
  """
33
 
34
  # 1. Define Server Parameters (Point to our local server.py)
 
35
  server_params = StdioServerParameters(
36
  command=sys.executable,
37
  args=["server.py"],
@@ -55,7 +55,6 @@ async def run_tutor(user_message, chat_history):
55
  )
56
 
57
  # 4. Create Agent (Using LangGraph prebuilt agent)
58
- # This replaces the old AgentExecutor
59
  agent_executor = create_react_agent(llm, tools, state_modifier=SYSTEM_PROMPT)
60
 
61
  # 5. Execute
@@ -70,10 +69,12 @@ async def run_tutor(user_message, chat_history):
70
  return response["messages"][-1].content
71
 
72
  # --- Gradio UI ---
73
- with gr.Blocks(title="AI Python Tutor (MCP Powered)", theme=gr.themes.Soft()) as demo:
 
74
  gr.Markdown("# 🐍 Vibe Coding Tutor")
75
  gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)")
76
 
 
77
  chatbot = gr.Chatbot(height=600, type="messages")
78
  msg = gr.Textbox(placeholder="E.g., Teach me how to use Python decorators with a working example.")
79
 
@@ -82,11 +83,17 @@ with gr.Blocks(title="AI Python Tutor (MCP Powered)", theme=gr.themes.Soft()) as
82
  return "", history
83
 
84
  async def bot_turn(history):
 
85
  last_message = history[-1]["content"]
 
 
86
  response_text = await run_tutor(last_message, [])
 
 
87
  history.append({"role": "assistant", "content": response_text})
88
  return history
89
 
 
90
  msg.submit(user_turn, [msg, chatbot], [msg, chatbot]).then(
91
  bot_turn, [chatbot], [chatbot]
92
  )
 
9
  from langchain_core.messages import HumanMessage
10
 
11
  # --- Configuration ---
 
12
  NEBIUS_API_KEY = os.getenv("NEBIUS_API_KEY")
13
  NEBIUS_BASE_URL = "https://api.studio.nebius.ai/v1/"
14
  MODEL_NAME = "meta-llama/Meta-Llama-3.1-70B-Instruct"
 
31
  """
32
 
33
  # 1. Define Server Parameters (Point to our local server.py)
34
+ # We use the current python executable to run the server script
35
  server_params = StdioServerParameters(
36
  command=sys.executable,
37
  args=["server.py"],
 
55
  )
56
 
57
  # 4. Create Agent (Using LangGraph prebuilt agent)
 
58
  agent_executor = create_react_agent(llm, tools, state_modifier=SYSTEM_PROMPT)
59
 
60
  # 5. Execute
 
69
  return response["messages"][-1].content
70
 
71
  # --- Gradio UI ---
72
+ # REMOVED: theme=gr.themes.Soft() to prevent the TypeError
73
+ with gr.Blocks(title="AI Python Tutor (MCP Powered)") as demo:
74
  gr.Markdown("# 🐍 Vibe Coding Tutor")
75
  gr.Markdown("Powered by **Nebius** (Llama 3.1) & **MCP** (Local Filesystem Access)")
76
 
77
+ # Type="messages" is critical for Gradio 5 chat interface style
78
  chatbot = gr.Chatbot(height=600, type="messages")
79
  msg = gr.Textbox(placeholder="E.g., Teach me how to use Python decorators with a working example.")
80
 
 
83
  return "", history
84
 
85
  async def bot_turn(history):
86
+ # Get the last message content
87
  last_message = history[-1]["content"]
88
+
89
+ # Run the agent
90
  response_text = await run_tutor(last_message, [])
91
+
92
+ # Append response
93
  history.append({"role": "assistant", "content": response_text})
94
  return history
95
 
96
+ # Event wiring
97
  msg.submit(user_turn, [msg, chatbot], [msg, chatbot]).then(
98
  bot_turn, [chatbot], [chatbot]
99
  )