Update app.py
Browse files
app.py
CHANGED
@@ -41,6 +41,8 @@ model_id_clip = "openai/clip-vit-base-patch16"
|
|
41 |
model_clip = CLIPModel.from_pretrained(model_id_clip).to("cpu")
|
42 |
processor_clip = CLIPProcessor.from_pretrained(model_id_clip)
|
43 |
|
|
|
|
|
44 |
# Preprocess the image for clip
|
45 |
def preprocess_image(image_path):
|
46 |
image = Image.open(image_path).convert("RGB")
|
@@ -62,10 +64,14 @@ def encode_image(image_path):
|
|
62 |
img_proj_head = _MLPVectorProjector(512, 2560, 1, 4).to("cpu")
|
63 |
img_proj_head.load_state_dict(torch.load('projection_finetuned.pth', map_location=torch.device('cpu')))
|
64 |
|
|
|
|
|
65 |
#Get the fine-tuned phi-2 model
|
66 |
phi2_finetuned = AutoModelForCausalLM.from_pretrained(
|
67 |
"phi2_adaptor_fineTuned", trust_remote_code=True,
|
68 |
torch_dtype = torch.float32).to("cpu")
|
|
|
|
|
69 |
|
70 |
|
71 |
def example_inference(input_text, count): #, image, img_qn, audio):
|
|
|
41 |
model_clip = CLIPModel.from_pretrained(model_id_clip).to("cpu")
|
42 |
processor_clip = CLIPProcessor.from_pretrained(model_id_clip)
|
43 |
|
44 |
+
print('--------------Loaded CLIP----------------------')
|
45 |
+
|
46 |
# Preprocess the image for clip
|
47 |
def preprocess_image(image_path):
|
48 |
image = Image.open(image_path).convert("RGB")
|
|
|
64 |
img_proj_head = _MLPVectorProjector(512, 2560, 1, 4).to("cpu")
|
65 |
img_proj_head.load_state_dict(torch.load('projection_finetuned.pth', map_location=torch.device('cpu')))
|
66 |
|
67 |
+
print('--------------Loaded proj head----------------------')
|
68 |
+
|
69 |
#Get the fine-tuned phi-2 model
|
70 |
phi2_finetuned = AutoModelForCausalLM.from_pretrained(
|
71 |
"phi2_adaptor_fineTuned", trust_remote_code=True,
|
72 |
torch_dtype = torch.float32).to("cpu")
|
73 |
+
|
74 |
+
print('--------------Loaded fine tuned phi2 model----------------------')
|
75 |
|
76 |
|
77 |
def example_inference(input_text, count): #, image, img_qn, audio):
|