burtenshaw
commited on
Commit
·
13ab1cf
1
Parent(s):
b3deced
refactor the application to focus on advice
Browse files
app.py
CHANGED
|
@@ -1,29 +1,23 @@
|
|
| 1 |
import os
|
| 2 |
-
from
|
| 3 |
-
import
|
| 4 |
-
from typing import List
|
| 5 |
import gradio as gr
|
| 6 |
-
from datasets import load_dataset
|
| 7 |
-
from huggingface_hub import
|
| 8 |
-
import black
|
| 9 |
|
| 10 |
# Initialize the inference client
|
| 11 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 12 |
HF_API_URL = os.getenv("HF_API_URL", "Qwen/Qwen2.5-Coder-32B-Instruct")
|
| 13 |
-
client = InferenceClient(
|
| 14 |
|
| 15 |
# Load questions from Hugging Face dataset
|
| 16 |
-
EXAM_MAX_QUESTIONS = int(
|
| 17 |
-
os.getenv("EXAM_MAX_QUESTIONS", 5)
|
| 18 |
-
) # Limit quiz to max questions
|
| 19 |
-
EXAM_PASSING_SCORE = float(os.getenv("EXAM_PASSING_SCORE", 0.8))
|
| 20 |
EXAM_DATASET_ID = "agents-course/smolagents-quiz-data"
|
| 21 |
|
| 22 |
-
|
| 23 |
# prep the dataset for the quiz
|
| 24 |
ds = load_dataset(EXAM_DATASET_ID, split="train", download_mode="force_redownload")
|
| 25 |
-
quiz_data = list(ds)
|
| 26 |
-
# random.shuffle(quiz_data)
|
| 27 |
if EXAM_MAX_QUESTIONS:
|
| 28 |
quiz_data = quiz_data[:EXAM_MAX_QUESTIONS]
|
| 29 |
|
|
@@ -31,6 +25,30 @@ if EXAM_MAX_QUESTIONS:
|
|
| 31 |
HAS_IMAGE_FEATURE = "image" in ds.features
|
| 32 |
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
def format_python_code(code: str) -> str:
|
| 35 |
"""Format Python code using black."""
|
| 36 |
try:
|
|
@@ -40,80 +58,95 @@ def format_python_code(code: str) -> str:
|
|
| 40 |
return code
|
| 41 |
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
def check_code(
|
| 44 |
user_code: str, solution: str, challenge: str, assessment_criteria: List[str]
|
| 45 |
-
):
|
| 46 |
"""
|
| 47 |
-
Use LLM to evaluate
|
| 48 |
-
Returns True if the solution is correct, False otherwise.
|
| 49 |
"""
|
| 50 |
# Format both user code and solution
|
| 51 |
formatted_user_code = format_python_code(user_code)
|
| 52 |
formatted_solution = format_python_code(solution)
|
| 53 |
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
)
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
Student's Solution:
|
| 67 |
-
{formatted_user_code}
|
| 68 |
-
|
| 69 |
-
Assessment Criteria:
|
| 70 |
-
{assessment_criteria_str}
|
| 71 |
-
|
| 72 |
-
Evaluate if the student's solution is functionally equivalent to the reference solution.
|
| 73 |
-
Consider:
|
| 74 |
-
1. Does it solve the problem correctly?
|
| 75 |
-
2. Does it handle edge cases appropriately?
|
| 76 |
-
3. Does it follow the requirements of the challenge?
|
| 77 |
-
4. Does it meet the assessment criteria?
|
| 78 |
-
|
| 79 |
-
Respond with ONLY "CORRECT" or "INCORRECT" followed by a brief explanation.
|
| 80 |
-
"""
|
| 81 |
-
|
| 82 |
-
messages = [{"role": "user", "content": prompt}]
|
| 83 |
|
| 84 |
try:
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
| 89 |
)
|
| 90 |
|
| 91 |
-
response
|
|
|
|
| 92 |
|
| 93 |
-
#
|
| 94 |
-
|
|
|
|
|
|
|
| 95 |
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
gr.Info(f"{status}\n\n{explanation}")
|
| 100 |
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
except Exception as e:
|
| 104 |
-
gr.Warning(f"Error
|
| 105 |
-
|
| 106 |
-
status = (
|
| 107 |
-
"✅ Correct!" if is_correct else "❌ Incorrect!" + f"\n\nError: {str(e)}"
|
| 108 |
-
)
|
| 109 |
-
gr.Info(f"{status} (Fallback comparison)")
|
| 110 |
-
return is_correct
|
| 111 |
|
| 112 |
|
| 113 |
def on_user_logged_in(token: gr.OAuthToken | None):
|
| 114 |
"""
|
| 115 |
Handle user login state.
|
| 116 |
-
On a valid token, hide the login button and reveal the Start button while keeping Next
|
| 117 |
Also, clear the question text, code input, status, and image.
|
| 118 |
"""
|
| 119 |
if token is not None:
|
|
@@ -121,7 +154,6 @@ def on_user_logged_in(token: gr.OAuthToken | None):
|
|
| 121 |
gr.update(visible=False), # login_btn hidden
|
| 122 |
gr.update(visible=True), # start_btn shown
|
| 123 |
gr.update(visible=False), # next_btn hidden
|
| 124 |
-
gr.update(visible=False), # submit_btn hidden
|
| 125 |
"", # Clear question_text
|
| 126 |
gr.update(value="", visible=False), # Clear code_input
|
| 127 |
"", # Clear status_text
|
|
@@ -132,7 +164,6 @@ def on_user_logged_in(token: gr.OAuthToken | None):
|
|
| 132 |
gr.update(visible=True), # login_btn visible
|
| 133 |
gr.update(visible=False), # start_btn hidden
|
| 134 |
gr.update(visible=False), # next_btn hidden
|
| 135 |
-
gr.update(visible=False), # submit_btn hidden
|
| 136 |
"",
|
| 137 |
gr.update(value="", visible=False),
|
| 138 |
"",
|
|
@@ -140,92 +171,17 @@ def on_user_logged_in(token: gr.OAuthToken | None):
|
|
| 140 |
)
|
| 141 |
|
| 142 |
|
| 143 |
-
def push_results_to_hub(
|
| 144 |
-
user_answers: list, token: gr.OAuthToken | None, signed_in_message: str
|
| 145 |
-
):
|
| 146 |
-
"""Push results to Hugging Face Hub."""
|
| 147 |
-
|
| 148 |
-
print(f"signed_in_message: {signed_in_message}")
|
| 149 |
-
|
| 150 |
-
if not user_answers: # Check if there are any answers to submit
|
| 151 |
-
gr.Warning("No answers to submit!")
|
| 152 |
-
return "No answers to submit!"
|
| 153 |
-
|
| 154 |
-
if token is None:
|
| 155 |
-
gr.Warning("Please log in to Hugging Face before pushing!")
|
| 156 |
-
return "Please log in to Hugging Face before pushing!"
|
| 157 |
-
|
| 158 |
-
# Calculate grade
|
| 159 |
-
correct_count = sum(1 for answer in user_answers if answer["is_correct"])
|
| 160 |
-
total_questions = len(user_answers)
|
| 161 |
-
grade = correct_count / total_questions if total_questions > 0 else 0
|
| 162 |
-
|
| 163 |
-
if grade < float(EXAM_PASSING_SCORE):
|
| 164 |
-
gr.Warning(
|
| 165 |
-
f"Score {grade:.1%} below passing threshold of {float(EXAM_PASSING_SCORE):.1%}"
|
| 166 |
-
)
|
| 167 |
-
return f"You scored {grade:.1%}. Please try again to achieve at least {float(EXAM_PASSING_SCORE):.1%}"
|
| 168 |
-
|
| 169 |
-
gr.Info("Submitting answers to the Hub. Please wait...", duration=2)
|
| 170 |
-
|
| 171 |
-
user_info = whoami(token=token.token)
|
| 172 |
-
username = user_info["name"]
|
| 173 |
-
repo_id = f"{EXAM_DATASET_ID}_responses"
|
| 174 |
-
submission_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 175 |
-
|
| 176 |
-
# Create a dataset with the user's answers and metadata
|
| 177 |
-
submission_data = [
|
| 178 |
-
{
|
| 179 |
-
"username": username,
|
| 180 |
-
"datetime": submission_time,
|
| 181 |
-
"grade": grade,
|
| 182 |
-
**answer, # Include all answer data
|
| 183 |
-
}
|
| 184 |
-
for answer in user_answers
|
| 185 |
-
]
|
| 186 |
-
|
| 187 |
-
try:
|
| 188 |
-
# Try to load existing dataset
|
| 189 |
-
existing_ds = load_dataset(repo_id)
|
| 190 |
-
# Convert to DatasetDict if it isn't already
|
| 191 |
-
if not isinstance(existing_ds, dict):
|
| 192 |
-
existing_ds = DatasetDict({"default": existing_ds})
|
| 193 |
-
except Exception:
|
| 194 |
-
# If dataset doesn't exist, create empty DatasetDict
|
| 195 |
-
existing_ds = DatasetDict()
|
| 196 |
-
|
| 197 |
-
# Create new dataset from submission
|
| 198 |
-
new_ds = Dataset.from_list(submission_data)
|
| 199 |
-
|
| 200 |
-
# Add or update the split for this user
|
| 201 |
-
existing_ds[username] = new_ds
|
| 202 |
-
|
| 203 |
-
# Push the updated dataset to the Hub
|
| 204 |
-
existing_ds.push_to_hub(
|
| 205 |
-
repo_id,
|
| 206 |
-
private=True, # Make it private by default since it contains student submissions
|
| 207 |
-
)
|
| 208 |
-
|
| 209 |
-
return f"Your responses have been submitted to the Hub! Final grade: {grade:.1%}"
|
| 210 |
-
|
| 211 |
-
|
| 212 |
def handle_quiz(question_idx, user_answers, submitted_code, is_start):
|
| 213 |
"""Handle quiz state and progression"""
|
| 214 |
-
# Hide the start button once the first question is shown
|
| 215 |
-
start_btn_update = gr.update(visible=False) if is_start else None
|
| 216 |
-
|
| 217 |
-
# If this is the first time (start=True), begin at question_idx=0
|
| 218 |
if is_start:
|
| 219 |
question_idx = 0
|
| 220 |
else:
|
| 221 |
-
# If not the first question and there's a submission, store
|
| 222 |
-
if (
|
| 223 |
-
question_idx < len(quiz_data) and submitted_code.strip()
|
| 224 |
-
): # Only check if there's code
|
| 225 |
current_q = quiz_data[question_idx]
|
| 226 |
# Format the submitted code before checking
|
| 227 |
formatted_code = format_python_code(submitted_code)
|
| 228 |
-
|
| 229 |
formatted_code,
|
| 230 |
current_q["solution"],
|
| 231 |
current_q["challenge"],
|
|
@@ -234,47 +190,69 @@ def handle_quiz(question_idx, user_answers, submitted_code, is_start):
|
|
| 234 |
user_answers.append(
|
| 235 |
{
|
| 236 |
"challenge": current_q["challenge"],
|
| 237 |
-
"submitted_code": formatted_code,
|
| 238 |
"correct_solution": current_q["solution"],
|
| 239 |
"assessment_criteria": current_q["assessment_criteria"],
|
| 240 |
-
"
|
| 241 |
}
|
| 242 |
)
|
| 243 |
question_idx += 1
|
| 244 |
|
| 245 |
# If we've reached the end, show final results
|
| 246 |
if question_idx >= len(quiz_data):
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
|
|
|
| 255 |
for idx, answer in enumerate(user_answers):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
results_text += (
|
| 257 |
-
f"Question {idx + 1}: {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
)
|
| 259 |
|
| 260 |
return (
|
| 261 |
"", # question_text cleared
|
| 262 |
gr.update(value="", visible=False), # hide code_input
|
| 263 |
-
|
| 264 |
question_idx, # updated question index
|
| 265 |
user_answers, # accumulated answers
|
| 266 |
-
gr.update(visible=False), # start_btn hidden
|
| 267 |
-
gr.update(visible=False), # next_btn hidden
|
| 268 |
-
gr.update(visible=True), #
|
| 269 |
-
gr.update(
|
| 270 |
-
gr.update(visible=False), # question_image hidden on completion
|
| 271 |
)
|
| 272 |
else:
|
| 273 |
# Show the next question
|
| 274 |
q = quiz_data[question_idx]
|
| 275 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 276 |
|
| 277 |
-
# Only show image if the feature exists and
|
| 278 |
show_image = HAS_IMAGE_FEATURE and q.get("image") is not None
|
| 279 |
image_update = gr.update(
|
| 280 |
value=q.get("image") if show_image else None, visible=show_image
|
|
@@ -283,14 +261,13 @@ def handle_quiz(question_idx, user_answers, submitted_code, is_start):
|
|
| 283 |
return (
|
| 284 |
challenge_text, # question_text
|
| 285 |
gr.update(value=q["placeholder"], visible=True), # code_input
|
| 286 |
-
"Submit your
|
| 287 |
question_idx, # updated question_idx
|
| 288 |
user_answers, # user_answers
|
| 289 |
gr.update(visible=False), # start_btn hidden
|
| 290 |
gr.update(visible=True), # next_btn visible
|
| 291 |
-
gr.update(visible=False), # submit_btn hidden
|
| 292 |
gr.update(visible=False), # final_markdown hidden
|
| 293 |
-
image_update, # question_image
|
| 294 |
)
|
| 295 |
|
| 296 |
|
|
@@ -304,8 +281,8 @@ with gr.Blocks() as demo:
|
|
| 304 |
gr.Markdown(f"## Welcome to the {EXAM_DATASET_ID} Quiz")
|
| 305 |
with gr.Row(variant="compact"):
|
| 306 |
gr.Markdown(
|
| 307 |
-
"Log in first, then click 'Start' to begin. Complete each coding challenge
|
| 308 |
-
"and
|
| 309 |
)
|
| 310 |
|
| 311 |
with gr.Row(variant="panel"):
|
|
@@ -328,7 +305,6 @@ with gr.Blocks() as demo:
|
|
| 328 |
login_btn = gr.LoginButton()
|
| 329 |
start_btn = gr.Button("Start")
|
| 330 |
next_btn = gr.Button("Next ⏭️", visible=False)
|
| 331 |
-
submit_btn = gr.Button("Submit ✅", visible=False)
|
| 332 |
|
| 333 |
with gr.Row(variant="compact"):
|
| 334 |
final_markdown = gr.Markdown("", visible=False)
|
|
@@ -340,7 +316,6 @@ with gr.Blocks() as demo:
|
|
| 340 |
login_btn,
|
| 341 |
start_btn,
|
| 342 |
next_btn,
|
| 343 |
-
submit_btn,
|
| 344 |
question_text,
|
| 345 |
code_input,
|
| 346 |
status_text,
|
|
@@ -359,7 +334,6 @@ with gr.Blocks() as demo:
|
|
| 359 |
user_answers, # Updated user answers (state)
|
| 360 |
start_btn, # Update for start button (will be hidden)
|
| 361 |
next_btn, # Update for next button (shown for in-progress quiz)
|
| 362 |
-
submit_btn, # Update for submit button (hidden until end)
|
| 363 |
final_markdown, # Final results markdown (hidden until quiz ends)
|
| 364 |
question_image, # Image update for the quiz question
|
| 365 |
],
|
|
@@ -376,18 +350,10 @@ with gr.Blocks() as demo:
|
|
| 376 |
user_answers,
|
| 377 |
start_btn,
|
| 378 |
next_btn,
|
| 379 |
-
submit_btn,
|
| 380 |
final_markdown,
|
| 381 |
question_image,
|
| 382 |
],
|
| 383 |
)
|
| 384 |
|
| 385 |
-
submit_btn.click(
|
| 386 |
-
fn=push_results_to_hub,
|
| 387 |
-
inputs=[user_answers, login_btn],
|
| 388 |
-
outputs=status_text,
|
| 389 |
-
)
|
| 390 |
-
|
| 391 |
-
|
| 392 |
if __name__ == "__main__":
|
| 393 |
demo.launch()
|
|
|
|
| 1 |
import os
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
from pydantic import BaseModel, Field
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
+
from datasets import load_dataset
|
| 6 |
+
from huggingface_hub import InferenceClient
|
| 7 |
+
import black
|
| 8 |
|
| 9 |
# Initialize the inference client
|
| 10 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 11 |
HF_API_URL = os.getenv("HF_API_URL", "Qwen/Qwen2.5-Coder-32B-Instruct")
|
| 12 |
+
client = InferenceClient(model=HF_API_URL, token=HF_TOKEN)
|
| 13 |
|
| 14 |
# Load questions from Hugging Face dataset
|
| 15 |
+
EXAM_MAX_QUESTIONS = int(os.getenv("EXAM_MAX_QUESTIONS", 1))
|
|
|
|
|
|
|
|
|
|
| 16 |
EXAM_DATASET_ID = "agents-course/smolagents-quiz-data"
|
| 17 |
|
|
|
|
| 18 |
# prep the dataset for the quiz
|
| 19 |
ds = load_dataset(EXAM_DATASET_ID, split="train", download_mode="force_redownload")
|
| 20 |
+
quiz_data = list(ds)
|
|
|
|
| 21 |
if EXAM_MAX_QUESTIONS:
|
| 22 |
quiz_data = quiz_data[:EXAM_MAX_QUESTIONS]
|
| 23 |
|
|
|
|
| 25 |
HAS_IMAGE_FEATURE = "image" in ds.features
|
| 26 |
|
| 27 |
|
| 28 |
+
class CriterionFeedback(BaseModel):
|
| 29 |
+
"""Feedback for a single assessment criterion"""
|
| 30 |
+
|
| 31 |
+
criterion: str = Field(..., description="The assessment criterion being evaluated")
|
| 32 |
+
met: bool = Field(..., description="Whether the criterion was met")
|
| 33 |
+
explanation: str = Field(
|
| 34 |
+
..., description="Detailed explanation of how well the criterion was met"
|
| 35 |
+
)
|
| 36 |
+
improvement_tips: Optional[str] = Field(
|
| 37 |
+
None, description="Specific tips for improvement if needed"
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class CodeFeedback(BaseModel):
|
| 42 |
+
"""Structured feedback for code submission"""
|
| 43 |
+
|
| 44 |
+
overall_feedback: str = Field(
|
| 45 |
+
..., description="Overall assessment of the code solution"
|
| 46 |
+
)
|
| 47 |
+
criteria_feedback: List[CriterionFeedback] = Field(
|
| 48 |
+
..., description="Detailed feedback for each assessment criterion"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
def format_python_code(code: str) -> str:
|
| 53 |
"""Format Python code using black."""
|
| 54 |
try:
|
|
|
|
| 58 |
return code
|
| 59 |
|
| 60 |
|
| 61 |
+
EVALUATION_TEMPLATE = """Evaluate this Python code solution:
|
| 62 |
+
|
| 63 |
+
Challenge:
|
| 64 |
+
{challenge}
|
| 65 |
+
|
| 66 |
+
Reference Solution:
|
| 67 |
+
```python
|
| 68 |
+
|
| 69 |
+
{solution}
|
| 70 |
+
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
Student's Solution:
|
| 74 |
+
|
| 75 |
+
```python
|
| 76 |
+
|
| 77 |
+
{student_code}
|
| 78 |
+
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
Assessment Criteria:
|
| 82 |
+
{criteria}
|
| 83 |
+
|
| 84 |
+
Approach:
|
| 85 |
+
Be highly tollerent of differences in approach, as long as they meet Assessment Criteria.
|
| 86 |
+
|
| 87 |
+
Provide detailed feedback on how well each criterion was met."""
|
| 88 |
+
|
| 89 |
+
|
| 90 |
def check_code(
|
| 91 |
user_code: str, solution: str, challenge: str, assessment_criteria: List[str]
|
| 92 |
+
) -> dict:
|
| 93 |
"""
|
| 94 |
+
Use LLM to evaluate the user's code solution and provide structured feedback.
|
|
|
|
| 95 |
"""
|
| 96 |
# Format both user code and solution
|
| 97 |
formatted_user_code = format_python_code(user_code)
|
| 98 |
formatted_solution = format_python_code(solution)
|
| 99 |
|
| 100 |
+
# Format criteria as bullet points
|
| 101 |
+
criteria_text = "\n".join(f"- {c}" for c in assessment_criteria)
|
|
|
|
| 102 |
|
| 103 |
+
# Fill the template
|
| 104 |
+
prompt = EVALUATION_TEMPLATE.format(
|
| 105 |
+
challenge=challenge,
|
| 106 |
+
solution=formatted_solution,
|
| 107 |
+
student_code=formatted_user_code,
|
| 108 |
+
criteria=criteria_text,
|
| 109 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
try:
|
| 112 |
+
# Get structured feedback using response_format with schema from Pydantic model
|
| 113 |
+
response = client.text_generation(
|
| 114 |
+
prompt=prompt,
|
| 115 |
+
grammar={
|
| 116 |
+
"type": "json_object",
|
| 117 |
+
"value": CodeFeedback.model_json_schema(),
|
| 118 |
+
},
|
| 119 |
)
|
| 120 |
|
| 121 |
+
# Parse response into Pydantic model
|
| 122 |
+
feedback = CodeFeedback.model_validate_json(response)
|
| 123 |
|
| 124 |
+
# Format the feedback for display
|
| 125 |
+
formatted_feedback = [
|
| 126 |
+
f"### Overall Assessment\n{feedback.overall_feedback}\n\n"
|
| 127 |
+
]
|
| 128 |
|
| 129 |
+
for cf in feedback.criteria_feedback:
|
| 130 |
+
tip = cf.improvement_tips or ""
|
| 131 |
+
tip_text = f"\n💡 Tip: {tip}" if tip else ""
|
|
|
|
| 132 |
|
| 133 |
+
formatted_feedback.append(
|
| 134 |
+
f"### {cf.criterion}\n"
|
| 135 |
+
f"{'✅' if cf.met else '❌'} {cf.explanation}"
|
| 136 |
+
f"{tip_text}\n"
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
return {"feedback": "\n".join(formatted_feedback)}
|
| 140 |
|
| 141 |
except Exception as e:
|
| 142 |
+
gr.Warning(f"Error generating feedback: {str(e)}")
|
| 143 |
+
return {"feedback": "Unable to generate detailed feedback due to an error."}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
|
| 145 |
|
| 146 |
def on_user_logged_in(token: gr.OAuthToken | None):
|
| 147 |
"""
|
| 148 |
Handle user login state.
|
| 149 |
+
On a valid token, hide the login button and reveal the Start button while keeping Next hidden.
|
| 150 |
Also, clear the question text, code input, status, and image.
|
| 151 |
"""
|
| 152 |
if token is not None:
|
|
|
|
| 154 |
gr.update(visible=False), # login_btn hidden
|
| 155 |
gr.update(visible=True), # start_btn shown
|
| 156 |
gr.update(visible=False), # next_btn hidden
|
|
|
|
| 157 |
"", # Clear question_text
|
| 158 |
gr.update(value="", visible=False), # Clear code_input
|
| 159 |
"", # Clear status_text
|
|
|
|
| 164 |
gr.update(visible=True), # login_btn visible
|
| 165 |
gr.update(visible=False), # start_btn hidden
|
| 166 |
gr.update(visible=False), # next_btn hidden
|
|
|
|
| 167 |
"",
|
| 168 |
gr.update(value="", visible=False),
|
| 169 |
"",
|
|
|
|
| 171 |
)
|
| 172 |
|
| 173 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
def handle_quiz(question_idx, user_answers, submitted_code, is_start):
|
| 175 |
"""Handle quiz state and progression"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
if is_start:
|
| 177 |
question_idx = 0
|
| 178 |
else:
|
| 179 |
+
# If not the first question and there's a submission, store it
|
| 180 |
+
if question_idx < len(quiz_data) and submitted_code.strip():
|
|
|
|
|
|
|
| 181 |
current_q = quiz_data[question_idx]
|
| 182 |
# Format the submitted code before checking
|
| 183 |
formatted_code = format_python_code(submitted_code)
|
| 184 |
+
feedback_dict = check_code(
|
| 185 |
formatted_code,
|
| 186 |
current_q["solution"],
|
| 187 |
current_q["challenge"],
|
|
|
|
| 190 |
user_answers.append(
|
| 191 |
{
|
| 192 |
"challenge": current_q["challenge"],
|
| 193 |
+
"submitted_code": formatted_code,
|
| 194 |
"correct_solution": current_q["solution"],
|
| 195 |
"assessment_criteria": current_q["assessment_criteria"],
|
| 196 |
+
"feedback": feedback_dict["feedback"],
|
| 197 |
}
|
| 198 |
)
|
| 199 |
question_idx += 1
|
| 200 |
|
| 201 |
# If we've reached the end, show final results
|
| 202 |
if question_idx >= len(quiz_data):
|
| 203 |
+
results_text = """## Quiz Complete! 📚
|
| 204 |
+
This feedback should help you improve your skills.
|
| 205 |
+
|
| 206 |
+
⛔️ The feedback uses Qwen/Qwen2.5-Coder-32B-Instruct to compare your response to a gold
|
| 207 |
+
standard solution. As we know, LLMs are not perfect. You should compare your work against
|
| 208 |
+
the assessment criteria if you doubt the feedback.
|
| 209 |
+
|
| 210 |
+
Here's your detailed feedback:"""
|
| 211 |
+
|
| 212 |
for idx, answer in enumerate(user_answers):
|
| 213 |
+
# Format assessment criteria as bullet points
|
| 214 |
+
criteria_bullets = "\n".join(
|
| 215 |
+
f"- {c}" for c in answer["assessment_criteria"]
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
# Build the results text piece by piece
|
| 219 |
results_text += (
|
| 220 |
+
f"### Question {idx + 1}: {answer['challenge']}\n\n"
|
| 221 |
+
"#### Your Solution:\n```python\n"
|
| 222 |
+
f"{answer['submitted_code']}\n```\n\n"
|
| 223 |
+
"#### Reference Solution:\n```python\n"
|
| 224 |
+
f"{answer['correct_solution']}\n```\n\n"
|
| 225 |
+
"#### Assessment Criteria:\n"
|
| 226 |
+
f"{criteria_bullets}\n\n"
|
| 227 |
+
"#### Feedback:\n"
|
| 228 |
+
f"{answer['feedback']}\n\n"
|
| 229 |
+
"---\n\n"
|
| 230 |
)
|
| 231 |
|
| 232 |
return (
|
| 233 |
"", # question_text cleared
|
| 234 |
gr.update(value="", visible=False), # hide code_input
|
| 235 |
+
"Review your feedback below to improve your coding skills!",
|
| 236 |
question_idx, # updated question index
|
| 237 |
user_answers, # accumulated answers
|
| 238 |
+
gr.update(visible=False), # start_btn hidden
|
| 239 |
+
gr.update(visible=False), # next_btn hidden
|
| 240 |
+
gr.update(value=results_text, visible=True), # final_markdown
|
| 241 |
+
gr.update(visible=False), # question_image hidden
|
|
|
|
| 242 |
)
|
| 243 |
else:
|
| 244 |
# Show the next question
|
| 245 |
q = quiz_data[question_idx]
|
| 246 |
+
# Format assessment criteria as bullet points
|
| 247 |
+
criteria_bullets = "\n".join(f"- {c}" for c in q["assessment_criteria"])
|
| 248 |
+
challenge_text = (
|
| 249 |
+
f"## Question {question_idx + 1}\n\n"
|
| 250 |
+
f"### Challenge:\n{q['challenge']}\n\n"
|
| 251 |
+
"### Assessment Criteria:\n"
|
| 252 |
+
f"{criteria_bullets}"
|
| 253 |
+
)
|
| 254 |
|
| 255 |
+
# Only show image if the feature exists and question has an image
|
| 256 |
show_image = HAS_IMAGE_FEATURE and q.get("image") is not None
|
| 257 |
image_update = gr.update(
|
| 258 |
value=q.get("image") if show_image else None, visible=show_image
|
|
|
|
| 261 |
return (
|
| 262 |
challenge_text, # question_text
|
| 263 |
gr.update(value=q["placeholder"], visible=True), # code_input
|
| 264 |
+
"Submit your solution and click 'Next' to continue.",
|
| 265 |
question_idx, # updated question_idx
|
| 266 |
user_answers, # user_answers
|
| 267 |
gr.update(visible=False), # start_btn hidden
|
| 268 |
gr.update(visible=True), # next_btn visible
|
|
|
|
| 269 |
gr.update(visible=False), # final_markdown hidden
|
| 270 |
+
image_update, # question_image
|
| 271 |
)
|
| 272 |
|
| 273 |
|
|
|
|
| 281 |
gr.Markdown(f"## Welcome to the {EXAM_DATASET_ID} Quiz")
|
| 282 |
with gr.Row(variant="compact"):
|
| 283 |
gr.Markdown(
|
| 284 |
+
"Log in first, then click 'Start' to begin. Complete each coding challenge "
|
| 285 |
+
"and click 'Next' to proceed. You'll get feedback on your solutions at the end."
|
| 286 |
)
|
| 287 |
|
| 288 |
with gr.Row(variant="panel"):
|
|
|
|
| 305 |
login_btn = gr.LoginButton()
|
| 306 |
start_btn = gr.Button("Start")
|
| 307 |
next_btn = gr.Button("Next ⏭️", visible=False)
|
|
|
|
| 308 |
|
| 309 |
with gr.Row(variant="compact"):
|
| 310 |
final_markdown = gr.Markdown("", visible=False)
|
|
|
|
| 316 |
login_btn,
|
| 317 |
start_btn,
|
| 318 |
next_btn,
|
|
|
|
| 319 |
question_text,
|
| 320 |
code_input,
|
| 321 |
status_text,
|
|
|
|
| 334 |
user_answers, # Updated user answers (state)
|
| 335 |
start_btn, # Update for start button (will be hidden)
|
| 336 |
next_btn, # Update for next button (shown for in-progress quiz)
|
|
|
|
| 337 |
final_markdown, # Final results markdown (hidden until quiz ends)
|
| 338 |
question_image, # Image update for the quiz question
|
| 339 |
],
|
|
|
|
| 350 |
user_answers,
|
| 351 |
start_btn,
|
| 352 |
next_btn,
|
|
|
|
| 353 |
final_markdown,
|
| 354 |
question_image,
|
| 355 |
],
|
| 356 |
)
|
| 357 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 358 |
if __name__ == "__main__":
|
| 359 |
demo.launch()
|