Filip commited on
Commit
8ed8457
·
1 Parent(s): 6544719
Files changed (2) hide show
  1. app.py +102 -0
  2. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoProcessor, TextStreamer
3
+ import torch
4
+
5
+ # Configure torch to use CPU
6
+ device = "cpu"
7
+ torch.set_default_device(device)
8
+
9
+ # Load model and tokenizer
10
+ def load_model():
11
+ model_name = "forestav/unsloth_vision_radiography_finetune"
12
+
13
+ # Load with CPU optimization settings
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ model_name,
16
+ device_map="cpu",
17
+ torch_dtype=torch.float16,
18
+ low_cpu_mem_usage=True
19
+ )
20
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
21
+ processor = AutoProcessor.from_pretrained(model_name)
22
+ return model, tokenizer, processor
23
+
24
+ # Initialize model and tokenizer globally
25
+ print("Loading model...")
26
+ model, tokenizer, processor = load_model()
27
+ print("Model loaded!")
28
+
29
+ def analyze_image(image, instruction):
30
+ if instruction.strip() == "":
31
+ instruction = "You are an expert radiographer. Describe accurately what you see in this image."
32
+
33
+ # Prepare the messages
34
+ messages = [
35
+ {"role": "user", "content": [
36
+ {"type": "image"},
37
+ {"type": "text", "text": instruction}
38
+ ]}
39
+ ]
40
+
41
+ # Process the image and text
42
+ inputs = processor(
43
+ images=image,
44
+ text=tokenizer.apply_chat_template(messages, add_generation_prompt=True),
45
+ return_tensors="pt"
46
+ )
47
+
48
+ # Generate the response
49
+ text_streamer = TextStreamer(tokenizer, skip_prompt=True)
50
+
51
+ # Generate with lower resource settings
52
+ with torch.no_grad():
53
+ outputs = model.generate(
54
+ **inputs,
55
+ max_new_tokens=128,
56
+ temperature=1.2,
57
+ min_p=0.1,
58
+ use_cache=True,
59
+ streamer=text_streamer
60
+ )
61
+
62
+ # Decode the response
63
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
64
+ return response
65
+
66
+ # Create the Gradio interface
67
+ with gr.Blocks() as demo:
68
+ gr.Markdown("""
69
+ # Medical Image Analysis Assistant
70
+ Upload a medical image and receive a professional description from an AI radiographer.
71
+ """)
72
+
73
+ with gr.Row():
74
+ with gr.Column():
75
+ image_input = gr.Image(type="pil", label="Upload Medical Image")
76
+ instruction_input = gr.Textbox(
77
+ label="Custom Instruction (optional)",
78
+ placeholder="You are an expert radiographer. Describe accurately what you see in this image.",
79
+ lines=2
80
+ )
81
+ submit_btn = gr.Button("Analyze Image")
82
+
83
+ with gr.Column():
84
+ output_text = gr.Textbox(label="Analysis Result", lines=10)
85
+
86
+ # Handle the submission
87
+ submit_btn.click(
88
+ fn=analyze_image,
89
+ inputs=[image_input, instruction_input],
90
+ outputs=output_text
91
+ )
92
+
93
+ gr.Markdown("""
94
+ ### Notes:
95
+ - The model runs on CPU and may take a few moments to process each image
96
+ - For best results, upload clear, high-quality medical images
97
+ - Default instruction will be used if none is provided
98
+ """)
99
+
100
+ # Launch the app
101
+ if __name__ == "__main__":
102
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers>=4.31.0
2
+ torch>=2.0.0
3
+ gradio>=3.34.0
4
+ accelerate>=0.26.0
5
+ pillow