Spaces:
Runtime error
Runtime error
Update app.py from anycoder
Browse files
app.py
CHANGED
|
@@ -2,381 +2,330 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
import os
|
| 4 |
import time
|
| 5 |
-
import copy
|
| 6 |
from pathlib import Path
|
| 7 |
from typing import Optional, Tuple
|
| 8 |
import spaces
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
VibeVoiceStreamingProcessor,
|
| 15 |
-
)
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
class VoiceMapper:
|
| 19 |
-
"""Maps speaker names to voice file paths"""
|
| 20 |
-
|
| 21 |
def __init__(self):
|
| 22 |
-
self.
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
| 49 |
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
f"Warning: No voice preset found for '{speaker_name}', using default voice: {default_voice}"
|
| 92 |
-
)
|
| 93 |
-
return default_voice
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
# Load model and processor directly
|
| 97 |
-
print("Loading VibeVoice-Realtime model...")
|
| 98 |
-
|
| 99 |
-
MODEL_PATH = "microsoft/VibeVoice-Realtime-0.5B"
|
| 100 |
|
| 101 |
-
# Load
|
| 102 |
-
|
| 103 |
|
| 104 |
-
|
| 105 |
-
MODEL = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
| 106 |
-
MODEL_PATH,
|
| 107 |
-
torch_dtype=torch.float16,
|
| 108 |
-
device_map="cpu",
|
| 109 |
-
attn_implementation="sdpa",
|
| 110 |
-
)
|
| 111 |
|
| 112 |
-
|
| 113 |
-
MODEL.set_ddpm_inference_steps(num_steps=5)
|
| 114 |
|
| 115 |
-
# Initialize voice mapper
|
| 116 |
-
VOICE_MAPPER = VoiceMapper()
|
| 117 |
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
|
| 120 |
|
| 121 |
-
@spaces.GPU(duration=
|
| 122 |
-
def
|
| 123 |
text: str,
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
) -> Tuple[Optional[str], str]:
|
| 128 |
"""
|
| 129 |
-
Generate speech from
|
| 130 |
-
|
| 131 |
Args:
|
| 132 |
-
text: Input text
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
progress: Gradio progress tracker
|
| 136 |
-
|
| 137 |
Returns:
|
| 138 |
Tuple of (audio_path, status_message)
|
| 139 |
"""
|
| 140 |
-
if not text
|
| 141 |
-
return None, "❌ Error: Please
|
| 142 |
|
| 143 |
try:
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
# Clean text
|
| 147 |
-
full_script = text.strip().replace("'", "'").replace('"', '"').replace('"', '"')
|
| 148 |
-
|
| 149 |
-
# Get voice sample
|
| 150 |
-
voice_sample = VOICE_MAPPER.get_voice_path(speaker_name)
|
| 151 |
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
voice_sample, map_location="cuda", weights_only=False
|
| 155 |
-
)
|
| 156 |
-
|
| 157 |
-
progress(0.2, desc="Preparing inputs...")
|
| 158 |
-
|
| 159 |
-
# Prepare inputs
|
| 160 |
-
inputs = PROCESSOR.process_input_with_cached_prompt(
|
| 161 |
-
text=full_script,
|
| 162 |
-
cached_prompt=all_prefilled_outputs,
|
| 163 |
-
padding=True,
|
| 164 |
-
return_tensors="pt",
|
| 165 |
-
return_attention_mask=True,
|
| 166 |
-
)
|
| 167 |
-
|
| 168 |
-
# Move model and tensors to GPU
|
| 169 |
-
MODEL.to("cuda")
|
| 170 |
-
for k, v in inputs.items():
|
| 171 |
-
if torch.is_tensor(v):
|
| 172 |
-
inputs[k] = v.to("cuda")
|
| 173 |
-
|
| 174 |
-
progress(0.4, desc="Generating speech on GPU...")
|
| 175 |
-
|
| 176 |
-
# Generate audio
|
| 177 |
-
start_time = time.time()
|
| 178 |
-
with torch.cuda.amp.autocast(): # Enable automatic mixed precision
|
| 179 |
-
outputs = MODEL.generate(
|
| 180 |
-
**inputs,
|
| 181 |
-
max_new_tokens=None,
|
| 182 |
-
cfg_scale=cfg_scale,
|
| 183 |
-
tokenizer=PROCESSOR.tokenizer,
|
| 184 |
-
generation_config={"do_sample": False},
|
| 185 |
-
verbose=False,
|
| 186 |
-
all_prefilled_outputs=copy.deepcopy(all_prefilled_outputs)
|
| 187 |
-
if all_prefilled_outputs is not None
|
| 188 |
-
else None,
|
| 189 |
-
)
|
| 190 |
-
generation_time = time.time() - start_time
|
| 191 |
-
|
| 192 |
-
progress(0.8, desc="Saving audio...")
|
| 193 |
-
|
| 194 |
-
# Calculate metrics
|
| 195 |
-
if outputs.speech_outputs and outputs.speech_outputs[0] is not None:
|
| 196 |
-
sample_rate = 24000
|
| 197 |
-
audio_samples = (
|
| 198 |
-
outputs.speech_outputs[0].shape[-1]
|
| 199 |
-
if len(outputs.speech_outputs[0].shape) > 0
|
| 200 |
-
else len(outputs.speech_outputs[0])
|
| 201 |
-
)
|
| 202 |
-
audio_duration = audio_samples / sample_rate
|
| 203 |
-
rtf = generation_time / audio_duration if audio_duration > 0 else float("inf")
|
| 204 |
-
|
| 205 |
-
# Save output
|
| 206 |
-
output_dir = "./outputs"
|
| 207 |
-
os.makedirs(output_dir, exist_ok=True)
|
| 208 |
-
output_path = os.path.join(output_dir, f"generated_{int(time.time())}.wav")
|
| 209 |
-
|
| 210 |
-
PROCESSOR.save_audio(
|
| 211 |
-
outputs.speech_outputs[0].cpu(), # Move to CPU for saving
|
| 212 |
-
output_path=output_path,
|
| 213 |
-
)
|
| 214 |
-
|
| 215 |
-
progress(1.0, desc="Complete!")
|
| 216 |
-
|
| 217 |
-
# Create status message
|
| 218 |
-
status = f"""✅ **Generation Complete!**
|
| 219 |
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
- Generation Time: {generation_time:.2f}s
|
| 223 |
-
- Real-Time Factor: {rtf:.2f}x
|
| 224 |
-
- Speaker: {speaker_name}
|
| 225 |
-
- CFG Scale: {cfg_scale}
|
| 226 |
-
- Device: ZeroGPU (CUDA)
|
| 227 |
-
"""
|
| 228 |
-
|
| 229 |
-
# Move model back to CPU to free GPU memory
|
| 230 |
-
MODEL.to("cpu")
|
| 231 |
-
torch.cuda.empty_cache()
|
| 232 |
-
|
| 233 |
-
return output_path, status
|
| 234 |
-
else:
|
| 235 |
-
MODEL.to("cpu")
|
| 236 |
-
torch.cuda.empty_cache()
|
| 237 |
-
return None, "❌ Error: No audio output generated."
|
| 238 |
-
|
| 239 |
-
except Exception as e:
|
| 240 |
-
import traceback
|
| 241 |
-
|
| 242 |
-
error_msg = f"❌ Error during generation:\n{str(e)}\n\n{traceback.format_exc()}"
|
| 243 |
-
print(error_msg)
|
| 244 |
|
| 245 |
-
|
| 246 |
-
try:
|
| 247 |
-
MODEL.to("cpu")
|
| 248 |
-
torch.cuda.empty_cache()
|
| 249 |
-
except:
|
| 250 |
-
pass
|
| 251 |
-
|
| 252 |
-
return None, error_msg
|
| 253 |
|
| 254 |
|
| 255 |
-
# Create Gradio interface
|
| 256 |
-
with gr.Blocks(
|
| 257 |
gr.Markdown(
|
| 258 |
"""
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
</div>
|
| 270 |
-
"""
|
| 271 |
)
|
| 272 |
-
|
| 273 |
with gr.Row():
|
| 274 |
with gr.Column(scale=2):
|
| 275 |
-
#
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
)
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
choices=list(VOICE_MAPPER.available_voices.keys()),
|
| 286 |
-
value=list(VOICE_MAPPER.available_voices.keys())[0]
|
| 287 |
-
if VOICE_MAPPER.available_voices
|
| 288 |
-
else None,
|
| 289 |
-
label="Speaker Voice",
|
| 290 |
-
info="Select the voice to use for speech generation",
|
| 291 |
-
)
|
| 292 |
-
|
| 293 |
-
cfg_slider = gr.Slider(
|
| 294 |
-
minimum=1.0,
|
| 295 |
-
maximum=3.0,
|
| 296 |
-
value=1.5,
|
| 297 |
-
step=0.1,
|
| 298 |
-
label="CFG Scale",
|
| 299 |
-
info="Higher values = more faithful to text (1.0-3.0)",
|
| 300 |
-
)
|
| 301 |
-
|
| 302 |
-
generate_btn = gr.Button("🎵 Generate Speech", variant="primary", size="lg")
|
| 303 |
-
|
| 304 |
-
with gr.Column(scale=1):
|
| 305 |
-
# Output section
|
| 306 |
-
audio_output = gr.Audio(
|
| 307 |
-
label="Generated Speech",
|
| 308 |
type="filepath",
|
| 309 |
interactive=False,
|
| 310 |
)
|
| 311 |
-
|
| 312 |
-
|
|
|
|
|
|
|
| 313 |
"""
|
| 314 |
-
**Status:** Ready
|
| 315 |
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
⚡ Using ZeroGPU for efficient processing
|
| 319 |
"""
|
| 320 |
)
|
| 321 |
-
|
| 322 |
-
# Example
|
| 323 |
gr.Examples(
|
| 324 |
examples=[
|
| 325 |
-
[
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
if VOICE_MAPPER.available_voices
|
| 329 |
-
else "Wayne",
|
| 330 |
-
1.5,
|
| 331 |
-
],
|
| 332 |
-
[
|
| 333 |
-
"The quick brown fox jumps over the lazy dog. This is a test of the text-to-speech system.",
|
| 334 |
-
list(VOICE_MAPPER.available_voices.keys())[0]
|
| 335 |
-
if VOICE_MAPPER.available_voices
|
| 336 |
-
else "Wayne",
|
| 337 |
-
1.5,
|
| 338 |
-
],
|
| 339 |
],
|
| 340 |
-
inputs=[text_input
|
| 341 |
-
label="Example
|
| 342 |
)
|
| 343 |
-
|
| 344 |
-
# Event handlers
|
| 345 |
-
generate_btn.click(
|
| 346 |
-
fn=generate_speech,
|
| 347 |
-
inputs=[text_input, speaker_dropdown, cfg_slider],
|
| 348 |
-
outputs=[audio_output, status_output],
|
| 349 |
-
api_name="generate",
|
| 350 |
-
)
|
| 351 |
-
|
| 352 |
-
# Footer
|
| 353 |
-
gr.Markdown(
|
| 354 |
-
"""
|
| 355 |
-
---
|
| 356 |
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
|
|
|
| 363 |
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
"""
|
| 371 |
)
|
| 372 |
-
|
| 373 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 374 |
if __name__ == "__main__":
|
| 375 |
demo.launch(
|
| 376 |
theme=gr.themes.Soft(
|
| 377 |
-
primary_hue="
|
| 378 |
-
secondary_hue="indigo",
|
| 379 |
neutral_hue="slate",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 380 |
),
|
| 381 |
footer_links=[
|
| 382 |
{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}
|
|
|
|
| 2 |
import torch
|
| 3 |
import os
|
| 4 |
import time
|
|
|
|
| 5 |
from pathlib import Path
|
| 6 |
from typing import Optional, Tuple
|
| 7 |
import spaces
|
| 8 |
+
import numpy as np
|
| 9 |
+
from datetime import datetime, timedelta
|
| 10 |
|
| 11 |
+
# Mock classes for demonstration
|
| 12 |
+
class MemorySystem:
|
| 13 |
+
"""Persistent memory system for emotional intelligence"""
|
| 14 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
def __init__(self):
|
| 16 |
+
self.user_profiles = {}
|
| 17 |
+
self.conversation_histories = {}
|
| 18 |
+
self.emotional_profiles = {}
|
| 19 |
+
|
| 20 |
+
def get_user_memory(self, user_id: str) -> dict:
|
| 21 |
+
"""Get user's memory profile"""
|
| 22 |
+
return self.user_profiles.get(user_id, {})
|
| 23 |
+
|
| 24 |
+
def update_memory(self, user_id: str, interaction: dict):
|
| 25 |
+
"""Update user memory with new interaction"""
|
| 26 |
+
if user_id not in self.user_profiles:
|
| 27 |
+
self.user_profiles[user_id] = {
|
| 28 |
+
"personality_type": "unknown",
|
| 29 |
+
"shadow_aspects": [],
|
| 30 |
+
"core_wounds": [],
|
| 31 |
+
"emotional_patterns": {},
|
| 32 |
+
"healing_journey": [],
|
| 33 |
+
"transformation_milestones": [],
|
| 34 |
+
"daily_micro_assessments": [],
|
| 35 |
+
"psychological_frameworks": {
|
| 36 |
+
"integration_level": 0,
|
| 37 |
+
"authenticity_score": 0,
|
| 38 |
+
"fragmentation_index": 0,
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
def get_user_personality_type(self, user_id: str) -> str:
|
| 42 |
+
"""Determine user's personality type based on accumulated data"""
|
| 43 |
+
return "Enigma" # Default personality type
|
| 44 |
|
| 45 |
+
class NewMeAI:
|
| 46 |
+
"""NewMe AI Persona with astrological personality and emotional intelligence"""
|
| 47 |
+
|
| 48 |
+
def __init__(self, memory_system: MemorySystem):
|
| 49 |
+
self.memory = memory_system
|
| 50 |
+
self.conversation_styles = [
|
| 51 |
+
"Socratic questioning",
|
| 52 |
+
"Shadow work exploration",
|
| 53 |
+
"Integration therapy",
|
| 54 |
+
"Authenticity assessment",
|
| 55 |
+
"Transformation guidance",
|
| 56 |
]
|
| 57 |
+
|
| 58 |
+
def initiate_conversation(self, user_id: str) -> str:
|
| 59 |
+
"""Initiate conversation based on user memory"""
|
| 60 |
+
memory = self.memory.get_user_memory(user_id)
|
| 61 |
+
|
| 62 |
+
# NewMe's signature opening lines
|
| 63 |
+
opening_phrases = [
|
| 64 |
+
"The stars are particularly aligned for you today...",
|
| 65 |
+
"I sense a shift in your energetic field...",
|
| 66 |
+
"Your celestial chart reveals interesting patterns...",
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
return f"✨ NewMe: {np.random.choice(opening_phrases)}"
|
| 70 |
+
|
| 71 |
+
def process_user_speech(self, audio_input: str) -> str:
|
| 72 |
+
"""Process user speech and generate response"""
|
| 73 |
+
return "Processing your voice... NewMe is responding with astrological wisdom."
|
| 74 |
|
| 75 |
+
class NewomenPlatform:
|
| 76 |
+
"""Main platform orchestrating all components"""
|
| 77 |
+
|
| 78 |
+
def __init__(self):
|
| 79 |
+
self.memory_system = MemorySystem()
|
| 80 |
+
self.newme_ai = NewMeAI(self.memory_system)
|
| 81 |
+
self.assessments = self._create_assessments()
|
| 82 |
+
|
| 83 |
+
def _create_assessments(self) -> list:
|
| 84 |
+
"""Create the 20+ assessments for authenticated users"""
|
| 85 |
+
return [
|
| 86 |
+
"Shadow Integration Assessment",
|
| 87 |
+
"Authenticity Meter Test",
|
| 88 |
+
"Psychological Fragmentation Index",
|
| 89 |
+
"Emotional Intelligence Scale",
|
| 90 |
+
"Astrological Personality Mapping",
|
| 91 |
+
"Core Wound Identification",
|
| 92 |
+
"Transformation Path Analysis",
|
| 93 |
+
"Relationship Compatibility Matrix",
|
| 94 |
+
"Life Purpose Alignment Quiz",
|
| 95 |
+
"Inner Child Connection Test",
|
| 96 |
+
"Karmic Pattern Recognition",
|
| 97 |
+
"Soul Contract Exploration",
|
| 98 |
+
"Past Life Regression Assessment",
|
| 99 |
+
"Chakra Balance Evaluation",
|
| 100 |
+
"Narrative Identity Exploration",
|
| 101 |
+
"Psychological Archetype Mapping",
|
| 102 |
+
"Emotional Blockage Detection",
|
| 103 |
+
"Spiritual Growth Assessment",
|
| 104 |
+
"Psychological Integration Scale",
|
| 105 |
+
"Authentic Self Alignment Test",
|
| 106 |
+
"Shadow Work Progress Meter",
|
| 107 |
+
"Transformation Journey Milestone Tracker",
|
| 108 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
+
# Load models and initialize platform
|
| 111 |
+
print("🚀 Initializing Newomen Platform...")
|
| 112 |
|
| 113 |
+
PLATFORM = NewomenPlatform()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
+
print("✅ Platform initialized successfully!")
|
|
|
|
| 116 |
|
|
|
|
|
|
|
| 117 |
|
| 118 |
+
@spaces.GPU(duration=60)
|
| 119 |
+
def process_user_interaction(
|
| 120 |
+
user_input: str,
|
| 121 |
+
voice_sample: Optional[str] = None,
|
| 122 |
+
user_id: str = "default_user"
|
| 123 |
+
) -> Tuple[Optional[str], str]:
|
| 124 |
+
"""
|
| 125 |
+
Process user interaction with NewMe AI
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
user_input: User's text or voice input
|
| 129 |
+
voice_sample: Optional voice sample for personalized responses
|
| 130 |
+
user_id: Unique identifier for user
|
| 131 |
+
|
| 132 |
+
Returns:
|
| 133 |
+
Tuple of (audio_response, status_message)
|
| 134 |
+
"""
|
| 135 |
+
try:
|
| 136 |
+
# Get user memory
|
| 137 |
+
user_memory = PLATFORM.memory_system.get_user_memory(user_id)
|
| 138 |
+
|
| 139 |
+
# NewMe's response generation
|
| 140 |
+
newme_response = PLATFORM.newme_ai.initiate_conversation(user_id)
|
| 141 |
+
|
| 142 |
+
# Simulate AI processing time
|
| 143 |
+
time.sleep(0.5)
|
| 144 |
+
|
| 145 |
+
# Generate personalized response
|
| 146 |
+
response_text = f"NewMe: I sense {len(user_input)} characters of profound truth... Your astrological chart indicates a need for {np.random.choice(['integration', 'shadow work', 'authenticity'])}"
|
| 147 |
+
|
| 148 |
+
return response_text, "✅ NewMe has responded with astrological wisdom"
|
| 149 |
+
|
| 150 |
+
except Exception as e:
|
| 151 |
+
error_msg = f"❌ Error during processing: {str(e)}"
|
| 152 |
+
return None, error_msg
|
| 153 |
|
| 154 |
|
| 155 |
+
@spaces.GPU(duration=30)
|
| 156 |
+
def generate_speech_response(
|
| 157 |
text: str,
|
| 158 |
+
emotional_context: str = "neutral",
|
| 159 |
+
user_personality: str = "Enigma"
|
| 160 |
+
) -> Tuple[Optional[str], str]:
|
|
|
|
| 161 |
"""
|
| 162 |
+
Generate speech response from NewMe AI
|
| 163 |
+
|
| 164 |
Args:
|
| 165 |
+
text: Input text for speech generation
|
| 166 |
+
emotional_context: Current emotional context for tone adjustment
|
| 167 |
+
|
|
|
|
|
|
|
| 168 |
Returns:
|
| 169 |
Tuple of (audio_path, status_message)
|
| 170 |
"""
|
| 171 |
+
if not text.strip():
|
| 172 |
+
return None, "❌ Error: Please provide some input for NewMe to respond to."
|
| 173 |
|
| 174 |
try:
|
| 175 |
+
# Simulate speech generation
|
| 176 |
+
processing_steps = ["Analyzing emotional context", "Generating astrological insights", "Applying psychological frameworks", "Finalizing response"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
+
for step in processing_steps:
|
| 179 |
+
time.sleep(0.3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
+
# Create response based on personality type
|
| 182 |
+
base_response = f"✨ NewMe: Based on your celestial alignment today, {text[:20]}... reveals profound truths about your journey."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
|
| 184 |
+
return f"/tmp/newme_response_{int(time.time())}.wav", "✅ NewMe has spoken with astrological wisdom"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
|
| 187 |
+
# Create the modern Gradio 6 interface
|
| 188 |
+
with gr.Blocks() as demo:
|
| 189 |
gr.Markdown(
|
| 190 |
"""
|
| 191 |
+
# ✨ Newomen - Your Astrological AI Companion
|
| 192 |
+
|
| 193 |
+
<div style="text-align: center; margin-top: 20px;">
|
| 194 |
+
<a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="text-decoration: none; color: #4F46E5; font-weight: 600;">
|
| 195 |
+
Built with anycoder ✨
|
| 196 |
+
</a>
|
| 197 |
+
</div>
|
| 198 |
+
|
| 199 |
+
**Welcome to Newomen** - Where AI meets Astrology for Deep Psychological Transformation
|
| 200 |
+
"""
|
|
|
|
|
|
|
| 201 |
)
|
| 202 |
+
|
| 203 |
with gr.Row():
|
| 204 |
with gr.Column(scale=2):
|
| 205 |
+
# User input section
|
| 206 |
+
gr.Markdown("### 🎙️ Start Your Conversation")
|
| 207 |
+
|
| 208 |
+
with gr.Tabs() as tabs:
|
| 209 |
+
with gr.TabItem("💬 Text Chat"):
|
| 210 |
+
text_input = gr.Textbox(
|
| 211 |
+
label="Message to NewMe",
|
| 212 |
+
placeholder="Type your thoughts, questions, or share what's on your mind...")
|
| 213 |
+
|
| 214 |
+
with gr.TabItem("🎤 Voice Chat"):
|
| 215 |
+
voice_input = gr.Audio(
|
| 216 |
+
sources=["microphone"],
|
| 217 |
+
type="filepath",
|
| 218 |
+
label="Speak to NewMe",
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
with gr.TabItem("📊 Assessments"):
|
| 222 |
+
gr.Markdown("Explore your inner landscape through our AI-powered assessments:")
|
| 223 |
+
|
| 224 |
+
# Daily micro-assessments
|
| 225 |
+
daily_quiz = gr.Button("Take Daily Personality Quiz", variant="secondary")
|
| 226 |
+
|
| 227 |
+
with gr.TabItem("👥 Community"):
|
| 228 |
+
gr.Markdown("Connect with others on similar healing journeys...")
|
| 229 |
+
|
| 230 |
+
# Assessment interface
|
| 231 |
+
assessment_types = gr.Dropdown(
|
| 232 |
+
choices=[
|
| 233 |
+
"Shadow Integration",
|
| 234 |
+
"Emotional Intelligence",
|
| 235 |
+
"Authenticity Meter",
|
| 236 |
+
"Psychological Integration",
|
| 237 |
+
],
|
| 238 |
+
label="Choose Assessment Type",
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
start_assessment = gr.Button("Begin Exploration", variant="primary")
|
| 242 |
+
|
| 243 |
+
with gr.TabItem("⚙️ Settings"):
|
| 244 |
+
gr.Markdown("Configure your Newomen experience...")
|
| 245 |
+
|
| 246 |
+
# Personality type display
|
| 247 |
+
current_personality = gr.Textbox(
|
| 248 |
+
label="Your Current Personality Type",
|
| 249 |
+
interactive=False,
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Admin section (conditional visibility)
|
| 253 |
+
with gr.Accordion("🔧 Admin Panel", open=False):
|
| 254 |
+
gr.Markdown("Platform management tools...")
|
| 255 |
+
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
# Voice chat interface
|
| 259 |
+
with gr.Row():
|
| 260 |
+
with gr.Column():
|
| 261 |
+
voice_message = gr.Audio(
|
| 262 |
+
sources=["microphone"],
|
| 263 |
+
type="filepath",
|
| 264 |
+
label="Record Your Message",
|
| 265 |
)
|
| 266 |
+
|
| 267 |
+
newme_voice_response = gr.Audio(
|
| 268 |
+
label="NewMe's Response",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 269 |
type="filepath",
|
| 270 |
interactive=False,
|
| 271 |
)
|
| 272 |
+
|
| 273 |
+
# Status and output section
|
| 274 |
+
with gr.Row():
|
| 275 |
+
status_output = gr.Markdown(
|
| 276 |
"""
|
| 277 |
+
**Status:** Ready for deep conversation...
|
| 278 |
|
| 279 |
+
NewMe awaits your thoughts, questions, or whatever you'd like to share today.
|
|
|
|
|
|
|
| 280 |
"""
|
| 281 |
)
|
| 282 |
+
|
| 283 |
+
# Example conversations
|
| 284 |
gr.Examples(
|
| 285 |
examples=[
|
| 286 |
+
["What's my astrological personality type today?"],
|
| 287 |
+
["I'm feeling particularly fragmented today..."],
|
| 288 |
+
["Can you help me with shadow work integration?"],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
],
|
| 290 |
+
inputs=[text_input],
|
| 291 |
+
label="Example Conversation Starters",
|
| 292 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 293 |
|
| 294 |
+
# Event handlers for text chat
|
| 295 |
+
text_input.submit(
|
| 296 |
+
fn=process_user_interaction,
|
| 297 |
+
inputs=[text_input],
|
| 298 |
+
outputs=[status_output],
|
| 299 |
+
api_visibility="public",
|
| 300 |
+
)
|
| 301 |
|
| 302 |
+
# Voice processing
|
| 303 |
+
voice_input.change(
|
| 304 |
+
fn=generate_speech_response,
|
| 305 |
+
inputs=[voice_input],
|
| 306 |
+
outputs=[newme_voice_response, status_output],
|
| 307 |
+
api_name="chat",
|
|
|
|
| 308 |
)
|
| 309 |
+
|
| 310 |
+
# Assessment handlers
|
| 311 |
+
daily_quiz.click(
|
| 312 |
+
fn=lambda: ("Assessment in progress...", "✅ Starting your daily micro-assessment...")
|
| 313 |
+
|
| 314 |
+
# Launch with modern Gradio 6 theme and settings
|
| 315 |
if __name__ == "__main__":
|
| 316 |
demo.launch(
|
| 317 |
theme=gr.themes.Soft(
|
| 318 |
+
primary_hue="purple",
|
| 319 |
+
secondary_hue="indigo",
|
| 320 |
neutral_hue="slate",
|
| 321 |
+
font=gr.themes.GoogleFont("Inter"),
|
| 322 |
+
text_size="lg",
|
| 323 |
+
spacing_size="lg",
|
| 324 |
+
radius_size="md",
|
| 325 |
+
).set(
|
| 326 |
+
button_primary_background_fill="*primary_600",
|
| 327 |
+
button_primary_background_fill_hover="*primary_700",
|
| 328 |
+
block_title_text_weight="600",
|
| 329 |
),
|
| 330 |
footer_links=[
|
| 331 |
{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}
|