sanjanatule commited on
Commit
015cbbb
1 Parent(s): ba525e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -51,7 +51,7 @@ def model_generate_ans(img=None,img_audio=None,val_q=None):
51
  with torch.no_grad():
52
 
53
  # image
54
- if image:
55
  image_processed = processor(images=img, return_tensors="pt").to(device)
56
  clip_val_outputs = clip_model(**image_processed).last_hidden_state[:,1:,:]
57
  val_image_embeds = projection(clip_val_outputs)
@@ -76,7 +76,7 @@ def model_generate_ans(img=None,img_audio=None,val_q=None):
76
  val_q_embeds = merged_model.model.embed_tokens(val_q_tokenised).unsqueeze(0)
77
 
78
  val_combined_embeds = []
79
- if image:
80
  #val_combined_embeds = torch.cat([val_combined_embeds, val_image_embeds, img_token_embeds], dim=1)
81
  val_combined_embeds.append(val_image_embeds)
82
  val_combined_embeds.append(img_token_embeds)
 
51
  with torch.no_grad():
52
 
53
  # image
54
+ if img:
55
  image_processed = processor(images=img, return_tensors="pt").to(device)
56
  clip_val_outputs = clip_model(**image_processed).last_hidden_state[:,1:,:]
57
  val_image_embeds = projection(clip_val_outputs)
 
76
  val_q_embeds = merged_model.model.embed_tokens(val_q_tokenised).unsqueeze(0)
77
 
78
  val_combined_embeds = []
79
+ if img:
80
  #val_combined_embeds = torch.cat([val_combined_embeds, val_image_embeds, img_token_embeds], dim=1)
81
  val_combined_embeds.append(val_image_embeds)
82
  val_combined_embeds.append(img_token_embeds)