hanzla commited on
Commit
9f2c552
·
verified ·
1 Parent(s): 001ffde

Replace template with Gemini-powered medical chatbot

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +191 -35
src/streamlit_app.py CHANGED
@@ -1,40 +1,196 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
 
 
 
 
 
8
 
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
1
  import streamlit as st
2
+ import os
3
+ import google.generativeai as genai
4
+ from typing import Optional
5
 
6
+ # Set page configuration
7
+ st.set_page_config(
8
+ page_title="Medical Q/A Chatbot",
9
+ page_icon="🩺",
10
+ layout="wide",
11
+ initial_sidebar_state="expanded"
12
+ )
13
 
14
+ def main():
15
+ """Main function for the Medical Q/A Chatbot"""
16
+
17
+ # Title and description
18
+ st.title("🩺 Medical Q/A Chatbot")
19
+ st.markdown(
20
+ """
21
+ Welcome to the Medical Q/A Chatbot! This application provides informational responses
22
+ to medical questions using Google's Gemini 2.5 Flash API. Please note that this is for
23
+ educational purposes only and should not replace professional medical advice.
24
+ """
25
+ )
26
+
27
+ # Sidebar configuration
28
+ with st.sidebar:
29
+ st.header("Configuration")
30
+
31
+ # API Key configuration
32
+ st.subheader("API Settings")
33
+ api_key = st.text_input(
34
+ "Gemini API Key",
35
+ type="password",
36
+ help="Enter your Google Gemini API key",
37
+ placeholder="AIzaSy..."
38
+ )
39
+
40
+ # If API key is provided, set it as environment variable
41
+ if api_key:
42
+ os.environ['GEMINI_API_KEY'] = api_key
43
+ st.success("API Key configured successfully!")
44
+ else:
45
+ # Use default API key from environment if available
46
+ default_key = os.getenv('GEMINI_API_KEY', 'AIzaSyBEyc7iQCLXfry6V7pA0TDR1k0eriX_nDo')
47
+ if default_key:
48
+ os.environ['GEMINI_API_KEY'] = default_key
49
+ st.info("Using default API key from environment")
50
+
51
+ st.divider()
52
+
53
+ model_choice = st.selectbox(
54
+ "Select Model",
55
+ ["Gemini 2.5 Flash", "Gemini Pro"],
56
+ index=0
57
+ )
58
+
59
+ temperature = st.slider(
60
+ "Temperature",
61
+ min_value=0.0,
62
+ max_value=1.0,
63
+ value=0.7,
64
+ step=0.1,
65
+ help="Controls randomness in responses. Lower values are more focused and deterministic."
66
+ )
67
+
68
+ max_tokens = st.number_input(
69
+ "Max Tokens",
70
+ min_value=100,
71
+ max_value=4000,
72
+ value=500,
73
+ step=100,
74
+ help="Maximum number of tokens in the response."
75
+ )
76
+
77
+ # Chat interface
78
+ st.header("Ask your medical question:")
79
+
80
+ # Initialize chat history
81
+ if "messages" not in st.session_state:
82
+ st.session_state.messages = []
83
+
84
+ # Display chat messages from history
85
+ for message in st.session_state.messages:
86
+ with st.chat_message(message["role"]):
87
+ st.markdown(message["content"])
88
+
89
+ # Chat input
90
+ if prompt := st.chat_input("What is your medical question?"):
91
+ # Add user message to chat history
92
+ st.session_state.messages.append({"role": "user", "content": prompt})
93
+
94
+ # Display user message
95
+ with st.chat_message("user"):
96
+ st.markdown(prompt)
97
+
98
+ # Generate and display assistant response
99
+ with st.chat_message("assistant"):
100
+ with st.spinner("Thinking..."):
101
+ response = generate_medical_response(prompt, model_choice, temperature, max_tokens)
102
+ st.markdown(response)
103
+
104
+ # Add assistant response to chat history
105
+ st.session_state.messages.append({"role": "assistant", "content": response})
106
+
107
+ # Clear chat button
108
+ if st.button("Clear Chat History"):
109
+ st.session_state.messages = []
110
+ st.rerun()
111
 
112
+ def generate_medical_response(question: str, model: str, temperature: float, max_tokens: int) -> str:
113
+ """
114
+ Generate a medical response using Google's Gemini API.
115
+ """
116
+
117
+ # Disclaimer message
118
+ disclaimer = "\n\n**Disclaimer**: This response is for informational purposes only and should not replace professional medical advice, diagnosis, or treatment. Always consult with a qualified healthcare provider for medical concerns."
119
+
120
+ try:
121
+ # Get API key from environment
122
+ api_key = os.getenv('GEMINI_API_KEY')
123
+ if not api_key:
124
+ return "Error: No API key configured. Please set your Gemini API key in the sidebar." + disclaimer
125
+
126
+ # Configure the API
127
+ genai.configure(api_key=api_key)
128
+
129
+ # Select model based on choice
130
+ model_name = "gemini-2.5-flash" if "Flash" in model else "gemini-pro"
131
+
132
+ # Create the model with configuration
133
+ generation_config = {
134
+ "temperature": temperature,
135
+ "top_p": 1,
136
+ "top_k": 40,
137
+ "max_output_tokens": max_tokens,
138
+ }
139
+
140
+ model_instance = genai.GenerativeModel(
141
+ model_name=model_name,
142
+ generation_config=generation_config
143
+ )
144
+
145
+ # Create a medical-focused prompt
146
+ medical_prompt = f"""
147
+ You are a knowledgeable medical AI assistant. Please provide an informative and helpful response to the following medical question.
148
+ Your response should be:
149
+ - Medically accurate and evidence-based
150
+ - Clear and easy to understand
151
+ - Comprehensive but concise
152
+ - Include relevant medical terminology with explanations
153
+ - Always emphasize when professional medical consultation is needed
154
+
155
+ Question: {question}
156
+
157
+ Please provide a detailed medical response while always reminding the user that this is for educational purposes only and cannot replace professional medical advice.
158
+ """
159
+
160
+ # Generate response
161
+ response = model_instance.generate_content(medical_prompt)
162
+
163
+ if response and response.text:
164
+ return response.text + disclaimer
165
+ else:
166
+ return "I apologize, but I couldn't generate a response at this time. Please try again or rephrase your question." + disclaimer
167
+
168
+ except Exception as e:
169
+ error_msg = f"Error connecting to Gemini API: {str(e)}"
170
+
171
+ # Provide fallback response with helpful information
172
+ fallback_response = f"""
173
+ {error_msg}
174
+
175
+ I understand you're seeking medical information about: "{question}"
176
+
177
+ While I'm currently unable to provide a detailed response due to technical issues, I recommend:
178
+
179
+ 1. **For urgent medical concerns**: Contact your healthcare provider immediately or call emergency services
180
+ 2. **For general health questions**: Consult with your primary care physician
181
+ 3. **For medication questions**: Speak with a pharmacist or your prescribing doctor
182
+ 4. **For reliable medical information**: Visit reputable sources like:
183
+ - Mayo Clinic (mayoclinic.org)
184
+ - WebMD (webmd.com)
185
+ - MedlinePlus (medlineplus.gov)
186
+
187
+ **Current Configuration:**
188
+ - Model: {model}
189
+ - Temperature: {temperature}
190
+ - Max Tokens: {max_tokens}
191
+ """
192
+
193
+ return fallback_response + disclaimer
194
 
195
+ if __name__ == "__main__":
196
+ main()