CheXRay / app.py
Tonic's picture
Create app.py
ce23029 verified
raw
history blame
No virus
3.39 kB
import spaces
import io
import torch
from PIL import Image
import gradio as gr
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
title = """# Welcome to🌟Tonic's CheXRay⚕⚛ !
You can use this ZeroGPU Space to test out the current model [StanfordAIMI/CheXagent-8b](https://huggingface.co/StanfordAIMI/CheXagent-8b). CheXRay⚕⚛ is fine tuned to analyze chest x-rays with a different and generally better results than other multimodal models.
You can also useCheXRay⚕⚛ by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/CheXRay?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
### How To use
simply upload an image with the right prompt (coming soon!) and anaylze your Xray !
Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [Poly](https://github.com/tonic-ai/poly) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
"""
device = "cuda"
dtype = torch.float16
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True)
@spaces.GPU
def generate(image, prompt):
# Convert the uploaded file to an image and process
image = Image.open(io.BytesIO(image.read())).convert("RGB")
images = [image]
# Prepare inputs
inputs = processor(images=images, text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt").to(device=device, dtype=dtype)
# Generate the findings
output = model.generate(**inputs, generation_config=generation_config)[0]
response = processor.tokenizer.decode(output, skip_special_tokens=True)
return response
with gr.Blocks() as demo:
gr.Markdown("# AI Medical Image Analysis")
gr.Markdown("Upload a medical image and enter a prompt to receive an AI-generated analysis.")
with gr.Row():
with gr.Column():
image_input = gr.Image(type="file")
prompt_input = gr.Textbox(label="Prompt")
with gr.Column():
output_text = gr.Textbox(label="Response")
generate_button = gr.Button("Generate")
generate_button.click(fn=generate, inputs=[image_input, prompt_input], outputs=output_text)
demo.launch()