Ahsen Khaliq commited on
Commit
ade4ef4
1 Parent(s): 85ee5c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -223,26 +223,27 @@ def generate2(
223
 
224
  return generated_list[0]
225
 
226
- is_gpu = False
227
 
228
- device = CUDA(0) if is_gpu else "cpu"
229
- clip_model, preprocess = clip.load("ViT-B/32", device=device, jit=False)
230
- tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
231
-
232
-
233
- prefix_length = 10
234
-
235
- model = ClipCaptionModel(prefix_length)
236
-
237
- model = model.to(device)
238
 
239
 
240
  def inference(img,model):
 
 
 
 
 
 
 
 
 
 
 
241
  if model == "COCO":
242
  model_path = 'coco_weights.pt'
243
  else:
244
  model_path = 'conceptual_weights.pt'
245
  model.load_state_dict(torch.load(model_path, map_location=CPU))
 
246
 
247
  use_beam_search = False
248
  image = io.imread(img.name)
 
223
 
224
  return generated_list[0]
225
 
 
226
 
 
 
 
 
 
 
 
 
 
 
227
 
228
 
229
  def inference(img,model):
230
+ is_gpu = False
231
+
232
+ device = CUDA(0) if is_gpu else "cpu"
233
+ clip_model, preprocess = clip.load("ViT-B/32", device=device, jit=False)
234
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
235
+
236
+
237
+ prefix_length = 10
238
+
239
+ model = ClipCaptionModel(prefix_length)
240
+
241
  if model == "COCO":
242
  model_path = 'coco_weights.pt'
243
  else:
244
  model_path = 'conceptual_weights.pt'
245
  model.load_state_dict(torch.load(model_path, map_location=CPU))
246
+ model = model.to(device)
247
 
248
  use_beam_search = False
249
  image = io.imread(img.name)