HarshalGunjalOp
Add notebooks and kaggle data for GitHub, configure HuggingFace ignore
eccd289
"""
Gradio Deployment App for Emotion Classification Model
This app loads the trained model from HuggingFace Hub and creates an interactive interface.
"""
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import numpy as np
from huggingface_hub import hf_hub_download
# Configuration
HF_REPO_ID = "hrshlgunjal/emotion-classifier-deberta-v3" # UPDATE THIS!
LABELS = ["anger", "fear", "joy", "sadness", "surprise"]
MAX_LEN = 128
# Load model and tokenizer
print("Loading model from HuggingFace Hub...")
model = AutoModelForSequenceClassification.from_pretrained(
HF_REPO_ID,
num_labels=len(LABELS),
problem_type="multi_label_classification"
)
tokenizer = AutoTokenizer.from_pretrained(HF_REPO_ID)
model.eval()
print("βœ… Model loaded successfully!")
# Load optimized thresholds
print("Loading optimized thresholds...")
try:
threshold_path = hf_hub_download(
repo_id=HF_REPO_ID,
filename="best_thresholds.npy"
)
thresholds = np.load(threshold_path)
print("βœ… Optimized thresholds loaded!")
except Exception as e:
print(f"⚠️ Could not load thresholds: {e}")
print("Using default thresholds of 0.5")
thresholds = np.array([0.5] * len(LABELS))
# Prediction function
def predict_emotions(text):
"""
Predict emotions from input text.
Args:
text (str): Input text to analyze
Returns:
dict: Probability scores for each emotion
"""
if not text.strip():
return {label: 0.0 for label in LABELS}
# Tokenize
inputs = tokenizer(
text,
return_tensors="pt",
truncation=True,
max_length=MAX_LEN,
padding=True
)
# Predict
with torch.no_grad():
outputs = model(**inputs)
# Get probabilities
probs = torch.sigmoid(outputs.logits).cpu().numpy()[0]
# Apply thresholds for binary predictions
predictions = (probs >= thresholds).astype(int)
# Create result dictionary
result = {}
for i, label in enumerate(LABELS):
result[label] = float(probs[i])
return result
def predict_with_explanation(text):
"""
Predict emotions and provide detailed explanation.
Args:
text (str): Input text to analyze
Returns:
tuple: (emotion_scores, explanation_text)
"""
if not text.strip():
return {label: 0.0 for label in LABELS}, "Please enter some text to analyze."
# Get predictions
result = predict_emotions(text)
# Create explanation
detected_emotions = []
for label, score in result.items():
if score >= thresholds[LABELS.index(label)]:
detected_emotions.append(f"**{label.capitalize()}** ({score:.2%})")
if detected_emotions:
explanation = f"**Detected Emotions:** {', '.join(detected_emotions)}\n\n"
else:
explanation = "**No strong emotions detected.**\n\n"
explanation += "**All Emotion Scores:**\n"
for label, score in sorted(result.items(), key=lambda x: x[1], reverse=True):
bar = "β–ˆ" * int(score * 20)
explanation += f"- {label.capitalize()}: {bar} {score:.2%}\n"
return result, explanation
# Example texts
examples = [
["I am so excited about this amazing opportunity!"],
["I can't believe you did this to me. I'm so angry!"],
["I'm terrified of what might happen next."],
["This is the saddest day of my life."],
["Wow! I didn't expect that at all!"],
["I'm feeling really happy and grateful today."],
["I'm so frustrated with this situation."],
["This news is shocking and scary."],
["I'm overjoyed and surprised by this wonderful gift!"],
["I'm deeply saddened and disappointed."],
]
# Create Gradio interface
with gr.Blocks(title="🎭 Emotion Classification") as demo:
gr.Markdown(
"""
# 🎭 Emotion Classification Model
This model analyzes text and identifies 5 emotions: **anger**, **fear**, **joy**, **sadness**, and **surprise**.
### Features:
- βœ… Multi-label classification (can detect multiple emotions)
- βœ… Based on DeBERTa-v3 transformer model
- βœ… Trained with 5-fold cross-validation
- βœ… Optimized thresholds for best performance
---
"""
)
with gr.Row():
with gr.Column(scale=2):
text_input = gr.Textbox(
label="Enter Text",
placeholder="Type or paste your text here...",
lines=5
)
with gr.Row():
submit_btn = gr.Button("Analyze Emotions πŸ”", variant="primary")
clear_btn = gr.Button("Clear πŸ—‘οΈ")
with gr.Column(scale=1):
emotion_output = gr.Label(
label="Emotion Scores",
num_top_classes=5
)
explanation_output = gr.Markdown(label="Detailed Analysis")
# Example section
gr.Markdown("### πŸ“ Try These Examples:")
gr.Examples(
examples=examples,
inputs=text_input,
outputs=[emotion_output, explanation_output],
fn=predict_with_explanation,
cache_examples=False
)
# Info section
gr.Markdown(
"""
---
### ℹ️ About This Model
**Model:** microsoft/deberta-v3-base (fine-tuned)
**Training:**
- 5-fold stratified cross-validation
- Mixed precision training (FP16)
- Threshold optimization for each emotion
**Performance:**
- Macro F1 Score: [Your CV Score]
- Kaggle Score: 8.3+
**Labels:**
- 😠 **Anger:** Expressions of anger, frustration, or annoyance
- 😨 **Fear:** Expressions of fear, anxiety, or worry
- 😊 **Joy:** Expressions of happiness, pleasure, or satisfaction
- 😒 **Sadness:** Expressions of sadness, sorrow, or disappointment
- 😲 **Surprise:** Expressions of surprise, shock, or amazement
---
**Repository:** [HuggingFace Hub](https://huggingface.co/{})
**Created for:** Deep Learning & Gen AI Project 2025
""".format(HF_REPO_ID)
)
# Button actions
submit_btn.click(
fn=predict_with_explanation,
inputs=text_input,
outputs=[emotion_output, explanation_output]
)
clear_btn.click(
fn=lambda: ("", {label: 0.0 for label in LABELS}, ""),
inputs=None,
outputs=[text_input, emotion_output, explanation_output]
)
# Launch the app
if __name__ == "__main__":
demo.launch(
share=False, # Set to True to create a public link
server_name="0.0.0.0",
server_port=7860
)