Spaces:
Sleeping
Sleeping
import os | |
from huggingface_hub import login | |
# 🔐 Authentification Hugging Face | |
hf_token = os.environ.get("HUGGINGFACE_API_KEY") | |
login(hf_token) | |
from transformers import AutoModelForCausalLM, AutoProcessor | |
import torch | |
import gradio as gr | |
from PIL import Image | |
model_id = "ContactDoctor/Bio-Medical-MultiModal-Llama-3-8B-V1" | |
# ✅ Chargement du modèle | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
trust_remote_code=True, | |
torch_dtype=torch.float16, | |
device_map="auto" | |
) | |
# ✅ Chargement du processor | |
processor = AutoProcessor.from_pretrained( | |
model_id, | |
trust_remote_code=True | |
) | |
# 🧠 Fonction principale | |
def generate_answer(image, question): | |
print("📥 Question reçue:", question) | |
print("🖼️ Image présente ?", image is not None) | |
if not question or question.strip() == "": | |
question = "Please describe this medical image." | |
prompt = f"### User: {question}\n### Assistant:" | |
try: | |
if image is None: | |
inputs = processor(prompt, return_tensors="pt").to(model.device) | |
else: | |
print("✅ Image fournie, traitement en cours...") | |
inputs = processor(prompt, images=image, return_tensors="pt").to(model.device) | |
print("🚀 Génération...") | |
outputs = model.generate(**inputs, max_new_tokens=256) | |
decoded = processor.tokenizer.decode(outputs[0], skip_special_tokens=True) | |
print("✅ Réponse générée.") | |
return decoded[len(prompt):].strip() | |
except Exception as e: | |
print("❌ Exception attrapée :", str(e)) | |
return f"⚠️ Internal Error: {str(e)}" | |
# 🎛️ Interface Gradio | |
demo = gr.Interface( | |
fn=generate_answer, | |
inputs=[ | |
gr.Image(type="pil", label="🩻 Image médicale (optionnelle)"), | |
gr.Textbox(label="❓ Votre question médicale") | |
], | |
outputs="text", | |
title="🧠 ContactDoctor - Biomedical LLM", | |
description="Assistant médical multimodal. Posez une question ou uploadez une image." | |
) | |
# 🚀 Lancement | |
demo.launch() | |