File size: 3,002 Bytes
76fa1ae
 
a591239
58210c8
a591239
58210c8
76fa1ae
58210c8
a591239
 
 
58210c8
 
 
a591239
 
76fa1ae
 
a591239
 
58210c8
76fa1ae
 
 
a591239
 
 
58210c8
 
a591239
 
 
 
76fa1ae
a591239
 
58210c8
a591239
58210c8
 
 
a591239
 
 
58210c8
 
 
 
 
 
 
 
 
 
 
 
a591239
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import os
import torch
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM

# βœ… Load Model
MODEL_NAME = "microsoft/BioGPT"
device = "cuda" if torch.cuda.is_available() else "cpu"

tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
    MODEL_NAME, 
    torch_dtype=torch.float16 if device == "cuda" else torch.float32, 
    device_map=device
)

# βœ… Define chatbot function
def chat_with_gpt(user_query):
    if not user_query.strip():
        return "⚠️ Please enter a valid medical question."
    
    inputs = tokenizer(user_query, return_tensors="pt").to(device)
    outputs = model.generate(**inputs, max_new_tokens=100)
    return tokenizer.decode(outputs[0], skip_special_tokens=True)

# βœ… Define Gradio UI
with gr.Blocks(theme=gr.themes.Soft()) as app:
    with gr.Tab("πŸ₯ Medical Chatbot"):
        gr.Markdown("<h1 style='text-align: center;'>πŸ₯ AI Health Assistant</h1>")
        user_input = gr.Textbox(label="Enter your medical query:")
        submit_button = gr.Button("πŸ” Get Answer", variant="primary")
        chatbot_response = gr.Textbox(label="Chatbot Response", interactive=False)

        submit_button.click(chat_with_gpt, inputs=user_input, outputs=chatbot_response)

    with gr.Tab("ℹ️ About"):
        gr.Markdown("### ℹ️ About AI Health Assistant")
        gr.Markdown("""
        - This AI chatbot answers **medical-related questions**.
        - **Not a substitute for professional medical advice**.
        - It does **not** provide **medications or treatments**.
        """)

    with gr.Tab("❓ FAQ"):
        gr.Markdown("### ❓ Frequently Asked Questions")

        gr.Markdown("**1️⃣ Can I use this for medical diagnosis?**\n- ❌ No, this is for **informational purposes only**.")
        gr.Markdown("**2️⃣ How accurate are the responses?**\n- πŸ“Š The AI provides answers based on trained medical data but should be cross-checked.")
        gr.Markdown("**3️⃣ Is my data safe?**\n- πŸ” Yes, your input is **not stored**.")
        gr.Markdown("**4️⃣ What types of medical questions can I ask?**\n- πŸ’‘ You can ask about **symptoms, diseases, and treatments**.")
        gr.Markdown("**5️⃣ Does this AI prescribe medicine?**\n- ❌ No, this chatbot does **not prescribe medications**.")
        gr.Markdown("**6️⃣ Can it provide emergency medical advice?**\n- ⚠️ No, always contact a doctor in emergencies.")
        gr.Markdown("**7️⃣ Is it suitable for mental health support?**\n- 🧠 It provides basic guidance but is **not a replacement for therapy**.")
        gr.Markdown("**8️⃣ How often is the AI updated?**\n- πŸ”„ The AI is updated periodically.")
        gr.Markdown("**9️⃣ Can I trust the medical advice given?**\n- πŸ₯ Always verify with professionals.")
        gr.Markdown("**πŸ”Ÿ Can I use this chatbot for educational purposes?**\n- πŸ“š Yes! Great for learning.")

# βœ… Launch the app
app.launch()