Tech-Meld commited on
Commit
a66b74b
1 Parent(s): db4d557

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -4,7 +4,7 @@ import spaces
4
  import torch
5
  import re
6
 
7
- model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("cuda").eval()
8
  processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner")
9
 
10
  def modify_caption(caption: str) -> str:
@@ -28,13 +28,13 @@ def modify_caption(caption: str) -> str:
28
 
29
  return re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE)
30
 
31
- @spaces.GPU
32
  def create_captions_rich(images):
33
  captions = []
34
  prompt = "caption en"
35
 
36
  for image in images:
37
- model_inputs = processor(text=prompt, images=image, return_tensors="pt").to("cuda")
38
  input_len = model_inputs["input_ids"].shape[-1]
39
 
40
  with torch.inference_mode():
 
4
  import torch
5
  import re
6
 
7
+ model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("cpu").eval()
8
  processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner")
9
 
10
  def modify_caption(caption: str) -> str:
 
28
 
29
  return re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE)
30
 
31
+
32
  def create_captions_rich(images):
33
  captions = []
34
  prompt = "caption en"
35
 
36
  for image in images:
37
+ model_inputs = processor(text=prompt, images=image, return_tensors="pt").to("cpu")
38
  input_len = model_inputs["input_ids"].shape[-1]
39
 
40
  with torch.inference_mode():