ACloudCenter commited on
Commit
a75a22d
·
1 Parent(s): cf6f850

Remove chat history management, not needed

Browse files
Files changed (1) hide show
  1. app.py +22 -37
app.py CHANGED
@@ -60,17 +60,16 @@ def transcribe_audio(audio_filepath):
60
 
61
 
62
  @spaces.GPU
63
- def transcript_qa(transcript, question, history, thinking_history):
64
  if not transcript:
65
- return (history + [{"role": "user", "content": question}, {"role": "assistant", "content": "Please transcribe audio first before asking questions."}],
66
- thinking_history + "")
67
 
68
  if not question or question.strip() == "":
69
- return history, thinking_history
70
 
71
  with torch.inference_mode(), model.llm.disable_adapter():
72
  output_ids = model.generate(
73
- prompts=[[{"role": "user", "content": f"Answer this question about the transcript: {question}\n\nTranscript: {transcript}"}]],
74
  max_new_tokens=512,
75
  )
76
 
@@ -88,12 +87,7 @@ def transcript_qa(transcript, question, history, thinking_history):
88
  if not ans:
89
  ans = "I couldn't generate a response. Please try rephrasing your question."
90
 
91
- # Update thinking history if there's new thinking
92
- if thinking:
93
- thinking_history += f"\n\n**Question:** {question}\n**Thinking:** {thinking}"
94
-
95
- return (history + [{"role": "user", "content": question}, {"role": "assistant", "content": ans}],
96
- thinking_history)
97
 
98
  def disable_transcribe():
99
  return gr.update(interactive=False)
@@ -128,7 +122,6 @@ with gr.Blocks(theme=theme) as demo:
128
 
129
  # State variables
130
  transcript_state = gr.State("")
131
- thinking_state = gr.State("")
132
 
133
  # Example questions
134
  example_questions = ["Can you summarize this meeting?", "Please provide bullet points of the key items.",
@@ -179,11 +172,11 @@ with gr.Blocks(theme=theme) as demo:
179
  example_container = gr.Column()
180
 
181
  with gr.Column(scale=2):
182
- chatbot = gr.Chatbot(
183
- label="Chat History",
184
- type="messages",
185
- height=400,
186
- show_copy_button=True
187
  )
188
  with gr.Row():
189
  msg = gr.Textbox(
@@ -214,19 +207,19 @@ with gr.Blocks(theme=theme) as demo:
214
  )
215
 
216
  # Event handlers
217
- def submit_question(question, history, transcript, thinking):
218
  if not question or question.strip() == "":
219
- return "", history, thinking
220
- new_history, new_thinking = transcript_qa(transcript, question, history, thinking)
221
- return "", new_history, new_thinking
222
 
223
 
224
  transcribe_btn.click(
225
  fn=disable_transcribe,
226
  outputs=[transcribe_btn]
227
  ).then(
228
- fn=lambda: ([], "", ""),
229
- outputs=[chatbot, thinking_box, thinking_state]
230
  ).then(
231
  fn=transcribe_audio,
232
  inputs=[audio_input],
@@ -248,27 +241,19 @@ with gr.Blocks(theme=theme) as demo:
248
 
249
  msg.submit(
250
  fn=submit_question,
251
- inputs=[msg, chatbot, transcript_state, thinking_state],
252
- outputs=[msg, chatbot, thinking_state]
253
- ).then(
254
- fn=lambda x: x,
255
- inputs=[thinking_state],
256
- outputs=[thinking_box]
257
  )
258
 
259
  submit_chat_btn.click(
260
  fn=submit_question,
261
- inputs=[msg, chatbot, transcript_state, thinking_state],
262
- outputs=[msg, chatbot, thinking_state]
263
- ).then(
264
- fn=lambda x: x,
265
- inputs=[thinking_state],
266
- outputs=[thinking_box]
267
  )
268
 
269
  clear_chat_btn.click(
270
- fn=lambda: ([], "", ""),
271
- outputs=[chatbot, thinking_box, thinking_state]
272
  )
273
 
274
  demo.queue()
 
60
 
61
 
62
  @spaces.GPU
63
+ def transcript_qa(transcript, question):
64
  if not transcript:
65
+ return "Please transcribe audio first before asking questions.", ""
 
66
 
67
  if not question or question.strip() == "":
68
+ return "", ""
69
 
70
  with torch.inference_mode(), model.llm.disable_adapter():
71
  output_ids = model.generate(
72
+ prompts=[[{"role": "user", "content": f"{question}\n\nTranscript: {transcript}"}]],
73
  max_new_tokens=512,
74
  )
75
 
 
87
  if not ans:
88
  ans = "I couldn't generate a response. Please try rephrasing your question."
89
 
90
+ return ans, thinking
 
 
 
 
 
91
 
92
  def disable_transcribe():
93
  return gr.update(interactive=False)
 
122
 
123
  # State variables
124
  transcript_state = gr.State("")
 
125
 
126
  # Example questions
127
  example_questions = ["Can you summarize this meeting?", "Please provide bullet points of the key items.",
 
172
  example_container = gr.Column()
173
 
174
  with gr.Column(scale=2):
175
+ answer_box = gr.Textbox(
176
+ label="Assistant's Response",
177
+ lines=8,
178
+ max_lines=12,
179
+ interactive=False
180
  )
181
  with gr.Row():
182
  msg = gr.Textbox(
 
207
  )
208
 
209
  # Event handlers
210
+ def submit_question(question, transcript):
211
  if not question or question.strip() == "":
212
+ return "", "", ""
213
+ answer, thinking = transcript_qa(transcript, question)
214
+ return "", answer, thinking
215
 
216
 
217
  transcribe_btn.click(
218
  fn=disable_transcribe,
219
  outputs=[transcribe_btn]
220
  ).then(
221
+ fn=lambda: ("", ""),
222
+ outputs=[answer_box, thinking_box]
223
  ).then(
224
  fn=transcribe_audio,
225
  inputs=[audio_input],
 
241
 
242
  msg.submit(
243
  fn=submit_question,
244
+ inputs=[msg, transcript_state],
245
+ outputs=[msg, answer_box, thinking_box]
 
 
 
 
246
  )
247
 
248
  submit_chat_btn.click(
249
  fn=submit_question,
250
+ inputs=[msg, transcript_state],
251
+ outputs=[msg, answer_box, thinking_box]
 
 
 
 
252
  )
253
 
254
  clear_chat_btn.click(
255
+ fn=lambda: ("", ""),
256
+ outputs=[answer_box, thinking_box]
257
  )
258
 
259
  demo.queue()