sounar's picture
Update app.py
b43d847 verified
raw
history blame
1.04 kB
# Get API token from environment variable
#api_token = os.getenv("HF_TOKEN").strip()
import gradio as gr
from transformers import AutoModel, AutoTokenizer
import torch
# Load the model and tokenizer
model_name = "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1"
model = AutoModel.from_pretrained(model_name, trust_remote_code=True, device_map="auto", torch_dtype=torch.float16)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
def process_query(image, question):
inputs = {"question": question}
if image:
inputs["image"] = image
# Process the inputs and generate a response
response = model.chat(image=inputs.get("image"), msgs=[{"role": "user", "content": question}], tokenizer=tokenizer)
return response
iface = gr.Interface(
fn=process_query,
inputs=[gr.Image(label="Upload Medical Image"), gr.Textbox(label="Question")],
outputs="text",
title="Medical Multimodal Assistant",
description="Upload a medical image and ask your question."
)
iface.launch()