jykoh commited on
Commit
d32adcb
1 Parent(s): 5067d2d

Add debug logs

Browse files
Files changed (2) hide show
  1. app.py +4 -0
  2. fromage/models.py +3 -0
app.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import numpy as np
2
  import torch
3
  from PIL import Image
@@ -54,6 +56,8 @@ class FromageChatBot:
54
  top_p = 1.0
55
  if temp != 0.0:
56
  top_p = 0.95
 
 
57
  model_outputs = self.model.generate_for_images_and_texts(model_inputs,
58
  num_words=num_words, ret_scale_factor=ret_scale_factor, top_p=top_p,
59
  temperature=temperature, max_num_rets=max_nm_rets)
 
1
+ import os
2
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "False"
3
  import numpy as np
4
  import torch
5
  from PIL import Image
 
56
  top_p = 1.0
57
  if temp != 0.0:
58
  top_p = 0.95
59
+
60
+ print('Running model.generate_for_images_and_texts')
61
  model_outputs = self.model.generate_for_images_and_texts(model_inputs,
62
  num_words=num_words, ret_scale_factor=ret_scale_factor, top_p=top_p,
63
  temperature=temperature, max_num_rets=max_nm_rets)
fromage/models.py CHANGED
@@ -499,6 +499,7 @@ class Fromage(nn.Module):
499
  input_ids = []
500
  add_bos = True
501
 
 
502
  for i, p in enumerate(prompts):
503
  if type(p) == Image.Image:
504
  # Encode as image.
@@ -525,6 +526,7 @@ class Fromage(nn.Module):
525
  input_embs = torch.cat(input_embs, dim=1)
526
  input_ids = torch.cat(input_ids, dim=1)
527
 
 
528
  if num_words == 0:
529
  generated_ids = input_ids
530
  outputs = self.model.lm(inputs_embeds=input_embs, use_cache=False, output_hidden_states=True)
@@ -552,6 +554,7 @@ class Fromage(nn.Module):
552
  else:
553
  raise ValueError
554
 
 
555
  # Save outputs as an interleaved list.
556
  return_outputs = []
557
  # Find up to max_num_rets [RET] tokens, and their corresponding scores.
 
499
  input_ids = []
500
  add_bos = True
501
 
502
+ print('L502: enumerate(prompts)')
503
  for i, p in enumerate(prompts):
504
  if type(p) == Image.Image:
505
  # Encode as image.
 
526
  input_embs = torch.cat(input_embs, dim=1)
527
  input_ids = torch.cat(input_ids, dim=1)
528
 
529
+ print('L529 called')
530
  if num_words == 0:
531
  generated_ids = input_ids
532
  outputs = self.model.lm(inputs_embeds=input_embs, use_cache=False, output_hidden_states=True)
 
554
  else:
555
  raise ValueError
556
 
557
+ print('L557 called')
558
  # Save outputs as an interleaved list.
559
  return_outputs = []
560
  # Find up to max_num_rets [RET] tokens, and their corresponding scores.