File size: 2,521 Bytes
21ac435
 
d6e285e
21ac435
 
 
37db1ce
21ac435
 
43b8814
 
37db1ce
 
d6e285e
37db1ce
 
21ac435
37db1ce
21ac435
d6e285e
21ac435
37db1ce
 
 
 
 
 
21ac435
37db1ce
 
 
 
21ac435
d6e285e
21ac435
 
 
 
 
 
 
37db1ce
21ac435
37db1ce
 
21ac435
 
37db1ce
 
 
 
21ac435
 
 
 
 
 
 
 
 
37db1ce
 
 
 
 
 
21ac435
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from transformers import Blip2ForConditionalGeneration
from transformers import Blip2Processor
from peft import PeftModel
import streamlit as st
from PIL import Image
import torch
import os

preprocess_ckp = "Salesforce/blip2-opt-2.7b" #Checkpoint path used for perprocess image
base_model_ckp = "./model/blip2-opt-2.7b-fp16-sharded" #Base model checkpoint path
peft_model_ckp = "./model/blip2_peft" #PEFT model checkpoint path
sample_img_path = "./sample_images/"

#init_model_required = True
#processor = None
#model = None

#def init_model():

    #if init_model_required:

#Preprocess input 
processor = Blip2Processor.from_pretrained(preprocess_ckp)

#Model   
#Inferance on GPU device. Will give error in CPU system, as "load_in_8bit" is an setting of bitsandbytes library and only works for GPU
#model = Blip2ForConditionalGeneration.from_pretrained(base_model_ckp, load_in_8bit = True, device_map = "auto") 

#Inferance on CPU device
model = Blip2ForConditionalGeneration.from_pretrained(base_model_ckp) 

model = PeftModel.from_pretrained(model, peft_model_ckp)

        #init_model_required = False

    

def main():

    st.title("Fashion Image Caption using BLIP2")

    #init_model()

    #Select few sample images for the catagory of cloths
    option = st.selectbox('Sample images ?', ('cap', 'tee', 'dress'))
    file_name = st.file_uploader("Upload image")

    if file_name is None and option is not None:

        file_name = os.join.path(sample_img_path, option)

    if file_name is not None:

        image_col, caption_text = st.columns(2)

        image_col.header("Image")
        image = Image.open(file_name)
        image_col.image(image, use_column_width = True)

        #Preprocess the image
        #Inferance on GPU. When used this on GPU will get errors like: "slow_conv2d_cpu" not implemented for 'Half'" , " Input type (float) and bias type (struct c10::Half)"
        #inputs = processor(images = image, return_tensors = "pt").to('cuda', torch.float16)

        #Inferance on CPU 
        inputs = processor(images = image, return_tensors = "pt")

        pixel_values = inputs.pixel_values

        #Predict the caption for the imahe
        generated_ids = model.generate(pixel_values = pixel_values, max_length = 25)
        generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]  

        #Output the predict text
        caption_text.header("Generated Caption")
        caption_text.text(generated_caption)

if __name__ == "__main__":
    main()