Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| import google.generativeai as genai | |
| from typing import Optional | |
| # Set page configuration | |
| st.set_page_config( | |
| page_title="Medical Q/A Chatbot", | |
| page_icon="🩺", | |
| layout="wide", | |
| initial_sidebar_state="expanded" | |
| ) | |
| def main(): | |
| """Main function for the Medical Q/A Chatbot""" | |
| # Title and description | |
| st.title("🩺 Medical Q/A Chatbot") | |
| st.markdown( | |
| """ | |
| Welcome to the Medical Q/A Chatbot! This application provides informational responses | |
| to medical questions using Google's Gemini 2.5 Flash API. Please note that this is for | |
| educational purposes only and should not replace professional medical advice. | |
| """ | |
| ) | |
| # Sidebar configuration | |
| with st.sidebar: | |
| st.header("Configuration") | |
| # API Key configuration | |
| st.subheader("API Settings") | |
| api_key = st.text_input( | |
| "Gemini API Key", | |
| type="password", | |
| help="Enter your Google Gemini API key", | |
| placeholder="AIzaSy..." | |
| ) | |
| # If API key is provided, set it as environment variable | |
| if api_key: | |
| os.environ['GEMINI_API_KEY'] = api_key | |
| st.success("API Key configured successfully!") | |
| else: | |
| # Use default API key from environment if available | |
| default_key = os.getenv('GEMINI_API_KEY', 'AIzaSyBEyc7iQCLXfry6V7pA0TDR1k0eriX_nDo') | |
| if default_key: | |
| os.environ['GEMINI_API_KEY'] = default_key | |
| st.info("Using default API key from environment") | |
| st.divider() | |
| model_choice = st.selectbox( | |
| "Select Model", | |
| ["Gemini 2.5 Flash", "Gemini Pro"], | |
| index=0 | |
| ) | |
| temperature = st.slider( | |
| "Temperature", | |
| min_value=0.0, | |
| max_value=1.0, | |
| value=0.7, | |
| step=0.1, | |
| help="Controls randomness in responses. Lower values are more focused and deterministic." | |
| ) | |
| max_tokens = st.number_input( | |
| "Max Tokens", | |
| min_value=100, | |
| max_value=4000, | |
| value=500, | |
| step=100, | |
| help="Maximum number of tokens in the response." | |
| ) | |
| # Chat interface | |
| st.header("Ask your medical question:") | |
| # Initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Display chat messages from history | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Chat input | |
| if prompt := st.chat_input("What is your medical question?"): | |
| # Add user message to chat history | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Display user message | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Generate and display assistant response | |
| with st.chat_message("assistant"): | |
| with st.spinner("Thinking..."): | |
| response = generate_medical_response(prompt, model_choice, temperature, max_tokens) | |
| st.markdown(response) | |
| # Add assistant response to chat history | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |
| # Clear chat button | |
| if st.button("Clear Chat History"): | |
| st.session_state.messages = [] | |
| st.rerun() | |
| def generate_medical_response(question: str, model: str, temperature: float, max_tokens: int) -> str: | |
| """ | |
| Generate a medical response using Google's Gemini API. | |
| """ | |
| # Disclaimer message | |
| disclaimer = "\n\n**Disclaimer**: This response is for informational purposes only and should not replace professional medical advice, diagnosis, or treatment. Always consult with a qualified healthcare provider for medical concerns." | |
| try: | |
| # Get API key from environment | |
| api_key = os.getenv('GEMINI_API_KEY') | |
| if not api_key: | |
| return "Error: No API key configured. Please set your Gemini API key in the sidebar." + disclaimer | |
| # Configure the API | |
| genai.configure(api_key=api_key) | |
| # Select model based on choice | |
| model_name = "gemini-2.5-flash" if "Flash" in model else "gemini-pro" | |
| # Create the model with configuration | |
| generation_config = { | |
| "temperature": temperature, | |
| "top_p": 1, | |
| "top_k": 40, | |
| "max_output_tokens": max_tokens, | |
| } | |
| model_instance = genai.GenerativeModel( | |
| model_name=model_name, | |
| generation_config=generation_config | |
| ) | |
| # Create a medical-focused prompt | |
| medical_prompt = f""" | |
| You are a knowledgeable medical AI assistant. Please provide an informative and helpful response to the following medical question. | |
| Your response should be: | |
| - Medically accurate and evidence-based | |
| - Clear and easy to understand | |
| - Comprehensive but concise | |
| - Include relevant medical terminology with explanations | |
| - Always emphasize when professional medical consultation is needed | |
| Question: {question} | |
| Please provide a detailed medical response while always reminding the user that this is for educational purposes only and cannot replace professional medical advice. | |
| """ | |
| # Generate response | |
| response = model_instance.generate_content(medical_prompt) | |
| if response and response.text: | |
| return response.text + disclaimer | |
| else: | |
| return "I apologize, but I couldn't generate a response at this time. Please try again or rephrase your question." + disclaimer | |
| except Exception as e: | |
| error_msg = f"Error connecting to Gemini API: {str(e)}" | |
| # Provide fallback response with helpful information | |
| fallback_response = f""" | |
| {error_msg} | |
| I understand you're seeking medical information about: "{question}" | |
| While I'm currently unable to provide a detailed response due to technical issues, I recommend: | |
| 1. **For urgent medical concerns**: Contact your healthcare provider immediately or call emergency services | |
| 2. **For general health questions**: Consult with your primary care physician | |
| 3. **For medication questions**: Speak with a pharmacist or your prescribing doctor | |
| 4. **For reliable medical information**: Visit reputable sources like: | |
| - Mayo Clinic (mayoclinic.org) | |
| - WebMD (webmd.com) | |
| - MedlinePlus (medlineplus.gov) | |
| **Current Configuration:** | |
| - Model: {model} | |
| - Temperature: {temperature} | |
| - Max Tokens: {max_tokens} | |
| """ | |
| return fallback_response + disclaimer | |
| if __name__ == "__main__": | |
| main() |