File size: 7,100 Bytes
0911831
 
71eba9d
0911831
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71eba9d
 
0911831
 
 
 
 
 
71eba9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0911831
 
71eba9d
0911831
 
 
 
 
 
 
 
71eba9d
 
0911831
 
 
 
 
 
 
71eba9d
 
0911831
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71eba9d
 
 
0911831
 
 
 
 
 
 
 
 
 
 
71eba9d
0911831
 
 
71eba9d
0911831
71eba9d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0911831
 
71eba9d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
import streamlit as st
import os
import google.generativeai as genai
from typing import Optional

# Set page configuration
st.set_page_config(
    page_title="Medical Q/A Chatbot",
    page_icon="🩺",
    layout="wide",
    initial_sidebar_state="expanded"
)

def main():
    """Main function for the Medical Q/A Chatbot"""
    
    # Title and description
    st.title("🩺 Medical Q/A Chatbot")
    st.markdown(
        """
        Welcome to the Medical Q/A Chatbot! This application provides informational responses 
        to medical questions using Google's Gemini 2.5 Flash API. Please note that this is for 
        educational purposes only and should not replace professional medical advice.
        """
    )
    
    # Sidebar configuration
    with st.sidebar:
        st.header("Configuration")
        
        # API Key configuration
        st.subheader("API Settings")
        api_key = st.text_input(
            "Gemini API Key",
            type="password",
            help="Enter your Google Gemini API key",
            placeholder="AIzaSy..."
        )
        
        # If API key is provided, set it as environment variable
        if api_key:
            os.environ['GEMINI_API_KEY'] = api_key
            st.success("API Key configured successfully!")
        else:
            # Use default API key from environment if available
            default_key = os.getenv('GEMINI_API_KEY', 'AIzaSyBEyc7iQCLXfry6V7pA0TDR1k0eriX_nDo')
            if default_key:
                os.environ['GEMINI_API_KEY'] = default_key
                st.info("Using default API key from environment")
        
        st.divider()
        
        model_choice = st.selectbox(
            "Select Model",
            ["Gemini 2.5 Flash", "Gemini Pro"],
            index=0
        )
        
        temperature = st.slider(
            "Temperature",
            min_value=0.0,
            max_value=1.0,
            value=0.7,
            step=0.1,
            help="Controls randomness in responses. Lower values are more focused and deterministic."
        )
        
        max_tokens = st.number_input(
            "Max Tokens",
            min_value=100,
            max_value=4000,
            value=500,
            step=100,
            help="Maximum number of tokens in the response."
        )
    
    # Chat interface
    st.header("Ask your medical question:")
    
    # Initialize chat history
    if "messages" not in st.session_state:
        st.session_state.messages = []
    
    # Display chat messages from history
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])
    
    # Chat input
    if prompt := st.chat_input("What is your medical question?"):
        # Add user message to chat history
        st.session_state.messages.append({"role": "user", "content": prompt})
        
        # Display user message
        with st.chat_message("user"):
            st.markdown(prompt)
        
        # Generate and display assistant response
        with st.chat_message("assistant"):
            with st.spinner("Thinking..."):
                response = generate_medical_response(prompt, model_choice, temperature, max_tokens)
                st.markdown(response)
        
        # Add assistant response to chat history
        st.session_state.messages.append({"role": "assistant", "content": response})
    
    # Clear chat button
    if st.button("Clear Chat History"):
        st.session_state.messages = []
        st.rerun()

def generate_medical_response(question: str, model: str, temperature: float, max_tokens: int) -> str:
    """
    Generate a medical response using Google's Gemini API.
    """
    
    # Disclaimer message
    disclaimer = "\n\n**Disclaimer**: This response is for informational purposes only and should not replace professional medical advice, diagnosis, or treatment. Always consult with a qualified healthcare provider for medical concerns."
    
    try:
        # Get API key from environment
        api_key = os.getenv('GEMINI_API_KEY')
        if not api_key:
            return "Error: No API key configured. Please set your Gemini API key in the sidebar." + disclaimer
        
        # Configure the API
        genai.configure(api_key=api_key)
        
        # Select model based on choice
        model_name = "gemini-2.5-flash" if "Flash" in model else "gemini-pro"
        
        # Create the model with configuration
        generation_config = {
            "temperature": temperature,
            "top_p": 1,
            "top_k": 40,
            "max_output_tokens": max_tokens,
        }
        
        model_instance = genai.GenerativeModel(
            model_name=model_name,
            generation_config=generation_config
        )
        
        # Create a medical-focused prompt
        medical_prompt = f"""
        You are a knowledgeable medical AI assistant. Please provide an informative and helpful response to the following medical question. 
        Your response should be:
        - Medically accurate and evidence-based
        - Clear and easy to understand
        - Comprehensive but concise
        - Include relevant medical terminology with explanations
        - Always emphasize when professional medical consultation is needed
        
        Question: {question}
        
        Please provide a detailed medical response while always reminding the user that this is for educational purposes only and cannot replace professional medical advice.
        """
        
        # Generate response
        response = model_instance.generate_content(medical_prompt)
        
        if response and response.text:
            return response.text + disclaimer
        else:
            return "I apologize, but I couldn't generate a response at this time. Please try again or rephrase your question." + disclaimer
            
    except Exception as e:
        error_msg = f"Error connecting to Gemini API: {str(e)}"
        
        # Provide fallback response with helpful information
        fallback_response = f"""
        {error_msg}
        
        I understand you're seeking medical information about: "{question}"
        
        While I'm currently unable to provide a detailed response due to technical issues, I recommend:
        
        1. **For urgent medical concerns**: Contact your healthcare provider immediately or call emergency services
        2. **For general health questions**: Consult with your primary care physician
        3. **For medication questions**: Speak with a pharmacist or your prescribing doctor
        4. **For reliable medical information**: Visit reputable sources like:
           - Mayo Clinic (mayoclinic.org)
           - WebMD (webmd.com)
           - MedlinePlus (medlineplus.gov)
        
        **Current Configuration:**
        - Model: {model}
        - Temperature: {temperature}
        - Max Tokens: {max_tokens}
        """
        
        return fallback_response + disclaimer

if __name__ == "__main__":
    main()