File size: 831 Bytes
8dce058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import torch
import os
from transformers import AutoProcessor
from PIL import Image

model_path = "D:/nighttest/nightshadeblip_high.pth"
test_path = 'D:/nighttest/test/'

model = torch.load(model_path)
processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")

model.eval()

for filename in os.listdir(test_path):
    file_path = os.path.join(test_path, filename)
    if os.path.isfile(file_path):
        image = Image.open(file_path)

        device = "cuda"
        inputs = processor(images=image, return_tensors="pt").to(device)
        pixel_values = inputs.pixel_values

        generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
        generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
        print(f"[{generated_caption}] {file_path}")