ACloudCenter commited on
Commit
62a239a
·
1 Parent(s): 66d00cc

Modiy UI and properly handle chat state

Browse files
Files changed (1) hide show
  1. app.py +37 -19
app.py CHANGED
@@ -64,7 +64,7 @@ def transcript_qa(transcript, question, history):
64
  if not transcript:
65
  return history + [{"role": "user", "content": question}, {"role": "assistant", "content": "Please transcribe audio first before asking questions."}]
66
 
67
- if not question:
68
  return history
69
 
70
  with torch.inference_mode(), model.llm.disable_adapter():
@@ -82,6 +82,9 @@ def transcript_qa(transcript, question, history):
82
  _, ans = ans.split("</think>")
83
  ans = ans.strip()
84
 
 
 
 
85
  return history + [{"role": "user", "content": question}, {"role": "assistant", "content": ans}]
86
 
87
  def disable_transcribe():
@@ -150,34 +153,49 @@ with gr.Blocks(theme=theme) as demo:
150
  clear_transcript_btn = gr.Button("Clear Transcript")
151
 
152
  # Spacing
153
- with gr.Row():
154
- with gr.Column():
155
- gr.Markdown()
156
- gr.Markdown("---")
157
- gr.Markdown()
158
 
159
  # Step 3 - Interactive Q&A
160
- with gr.Row(equal_height=True):
 
 
161
  with gr.Column(scale=1):
162
- gr.Markdown("### Step 3 - Interactive Q&A")
163
- msg = gr.Textbox(
164
- placeholder="Ask a question about the transcript or pick an example",
165
- label="Questions")
166
- gr.Examples(
167
- examples=example_questions,
168
- inputs=msg
 
 
 
 
169
  )
170
- submit_chat_btn = gr.Button("Submit", variant="primary")
171
- clear_chat_btn = gr.Button("Clear Chat")
 
 
 
 
 
 
172
 
 
173
  with gr.Row():
174
  with gr.Column(scale=1):
175
- gr.Markdown("### Answers")
176
- chatbot = gr.Chatbot(label="Responses", type="messages", height=400)
 
 
 
 
 
 
177
 
178
  # Event handlers
179
  def submit_question(question, history, transcript):
180
- if not question:
181
  return "", history
182
  new_history = transcript_qa(transcript, question, history)
183
  return "", new_history
 
64
  if not transcript:
65
  return history + [{"role": "user", "content": question}, {"role": "assistant", "content": "Please transcribe audio first before asking questions."}]
66
 
67
+ if not question or question.strip() == "":
68
  return history
69
 
70
  with torch.inference_mode(), model.llm.disable_adapter():
 
82
  _, ans = ans.split("</think>")
83
  ans = ans.strip()
84
 
85
+ if not ans:
86
+ ans = "I couldn't generate a response. Please try rephrasing your question."
87
+
88
  return history + [{"role": "user", "content": question}, {"role": "assistant", "content": ans}]
89
 
90
  def disable_transcribe():
 
153
  clear_transcript_btn = gr.Button("Clear Transcript")
154
 
155
  # Spacing
156
+ gr.Markdown("---")
 
 
 
 
157
 
158
  # Step 3 - Interactive Q&A
159
+ gr.Markdown("### Step 3 - Interactive Q&A")
160
+
161
+ with gr.Row():
162
  with gr.Column(scale=1):
163
+ gr.Markdown("#### About Context-Aware Q&A")
164
+ gr.Markdown("""The model retains the full transcript context, allowing you to ask follow-up questions
165
+ naturally without re-stating information. It understands references like 'they', 'it', or 'that topic'.""")
166
+ gr.Markdown("#### Example Questions")
167
+
168
+ with gr.Column(scale=2):
169
+ chatbot = gr.Chatbot(
170
+ label="Chat History",
171
+ type="messages",
172
+ height=400,
173
+ show_copy_button=True
174
  )
175
+ with gr.Row():
176
+ msg = gr.Textbox(
177
+ placeholder="Ask a question about the transcript...",
178
+ label="Your Question",
179
+ scale=4
180
+ )
181
+ submit_chat_btn = gr.Button("Send", variant="primary", scale=1)
182
+ clear_chat_btn = gr.Button("Clear Chat", size="sm")
183
 
184
+ # Add examples after msg is defined
185
  with gr.Row():
186
  with gr.Column(scale=1):
187
+ gr.Examples(
188
+ examples=example_questions,
189
+ inputs=msg,
190
+ cache_examples=False,
191
+ label=""
192
+ )
193
+ with gr.Column(scale=2):
194
+ pass # Empty column for alignment
195
 
196
  # Event handlers
197
  def submit_question(question, history, transcript):
198
+ if not question or question.strip() == "":
199
  return "", history
200
  new_history = transcript_qa(transcript, question, history)
201
  return "", new_history