Alberto Carmona commited on
Commit
6d1b898
1 Parent(s): 4912cf9

Remove unused code

Browse files
Files changed (1) hide show
  1. app.py +3 -14
app.py CHANGED
@@ -9,7 +9,6 @@ import pytorch_lightning as pl
9
  import gradio as gr
10
 
11
  from diffusers import LDMTextToImagePipeline
12
- # import PIL.Image
13
  import random
14
  import os
15
 
@@ -21,7 +20,7 @@ class ModelCheckpoint(pl.callbacks.ModelCheckpoint):
21
  filepath = os.path.join(self.dirpath, self.prefix + 'interrupt.ckpt')
22
  self._save_model(filepath)
23
 
24
- device = 'cpu' #@param ["cuda", "cpu"] {allow-input: true}
25
  reward = 'clips_grammar'
26
 
27
  cfg = f'./configs/phase2/clipRN50_{reward}.yml'
@@ -93,7 +92,7 @@ model = model.to(device)
93
  model.eval();
94
 
95
  import clip
96
- from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
97
  from PIL import Image
98
  from timm.models.vision_transformer import resize_pos_embed
99
 
@@ -138,21 +137,11 @@ def generate_text_from_image(img):
138
  tmp_fc = tmp_fc[0]
139
 
140
  att_feat = tmp_att
141
- fc_feat = tmp_fc
142
 
143
  # Inference configurations
144
  eval_kwargs = {}
145
  eval_kwargs.update(vars(opt))
146
 
147
- verbose = eval_kwargs.get('verbose', True)
148
- verbose_beam = eval_kwargs.get('verbose_beam', 0)
149
- verbose_loss = eval_kwargs.get('verbose_loss', 1)
150
-
151
- # dataset = eval_kwargs.get('dataset', 'coco')
152
- beam_size = eval_kwargs.get('beam_size', 1)
153
- sample_n = eval_kwargs.get('sample_n', 1)
154
- remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
155
-
156
  with torch.no_grad():
157
  fc_feats = torch.zeros((1,0)).to(device)
158
  att_feats = att_feat.view(1, 196, 2048).float().to(device)
@@ -162,7 +151,7 @@ def generate_text_from_image(img):
162
  # Only leave one feature for each image, in case duplicate sample
163
  tmp_eval_kwargs = eval_kwargs.copy()
164
  tmp_eval_kwargs.update({'sample_n': 1})
165
- seq, seq_logprobs = model(
166
  fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
167
  seq = seq.data
168
 
 
9
  import gradio as gr
10
 
11
  from diffusers import LDMTextToImagePipeline
 
12
  import random
13
  import os
14
 
 
20
  filepath = os.path.join(self.dirpath, self.prefix + 'interrupt.ckpt')
21
  self._save_model(filepath)
22
 
23
+ device = 'cpu'
24
  reward = 'clips_grammar'
25
 
26
  cfg = f'./configs/phase2/clipRN50_{reward}.yml'
 
92
  model.eval();
93
 
94
  import clip
95
+ from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor
96
  from PIL import Image
97
  from timm.models.vision_transformer import resize_pos_embed
98
 
 
137
  tmp_fc = tmp_fc[0]
138
 
139
  att_feat = tmp_att
 
140
 
141
  # Inference configurations
142
  eval_kwargs = {}
143
  eval_kwargs.update(vars(opt))
144
 
 
 
 
 
 
 
 
 
 
145
  with torch.no_grad():
146
  fc_feats = torch.zeros((1,0)).to(device)
147
  att_feats = att_feat.view(1, 196, 2048).float().to(device)
 
151
  # Only leave one feature for each image, in case duplicate sample
152
  tmp_eval_kwargs = eval_kwargs.copy()
153
  tmp_eval_kwargs.update({'sample_n': 1})
154
+ seq, _ = model(
155
  fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
156
  seq = seq.data
157