thlinhares commited on
Commit
c74b8b6
1 Parent(s): 676edf4

initial commit

Browse files
Files changed (1) hide show
  1. app.py +25 -0
app.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ import requests
4
+ import torch
5
+ from PIL import Image
6
+ from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
7
+
8
+ # step 1: Setup constant
9
+ device = "cuda"
10
+ dtype = torch.float16
11
+
12
+ # step 2: Load Processor and Model
13
+ processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
14
+ generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
15
+ model = AutoModelForCausalLM.from_pretrained("StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True)
16
+
17
+ # step 3: Fetch the images
18
+ image_path = "https://upload.wikimedia.org/wikipedia/commons/3/3b/Pleural_effusion-Metastatic_breast_carcinoma_Case_166_%285477628658%29.jpg"
19
+ images = [Image.open(io.BytesIO(requests.get(image_path).content)).convert("RGB")]
20
+
21
+ # step 4: Generate the Findings section
22
+ prompt = f'Describe "Airway"'
23
+ inputs = processor(images=images, text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt").to(device=device, dtype=dtype)
24
+ output = model.generate(**inputs, generation_config=generation_config)[0]
25
+ response = processor.tokenizer.decode(output, skip_special_tokens=True)