Fizzarolli commited on
Commit
78edf01
1 Parent(s): fda67b7

stuff and stuff and stuff and

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -15,10 +15,10 @@ model.to("cuda")
15
  processor = AutoProcessor.from_pretrained(MODEL_ID)
16
 
17
  @spaces.GPU
18
- def tag_image(image, max_new_tokens=128):
19
  inputs = processor(images=image, text="<image>tag en", return_tensors="pt").to("cuda")
20
  streamer = TextIteratorStreamer(tokenizer=processor.tokenizer, skip_prompt=True)
21
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens, use_cache=True, cache_implementation="hybrid")
22
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
23
 
24
  text = ""
@@ -31,7 +31,7 @@ def tag_image(image, max_new_tokens=128):
31
 
32
  gr.Interface(
33
  fn=tag_image,
34
- inputs=[gr.Image(type="pil"), gr.Slider(minimum=1, maximum=1024, value=128)],
35
  outputs=gr.Textbox(type="text"),
36
  title=TITLE,
37
  description=DESCRIPTION,
 
15
  processor = AutoProcessor.from_pretrained(MODEL_ID)
16
 
17
  @spaces.GPU
18
+ def tag_image(image, max_new_tokens=128, temperature=1, top_p=1, min_p=0):
19
  inputs = processor(images=image, text="<image>tag en", return_tensors="pt").to("cuda")
20
  streamer = TextIteratorStreamer(tokenizer=processor.tokenizer, skip_prompt=True)
21
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens, use_cache=True, cache_implementation="hybrid", temperature=temperature, top_p=top_p, min_p=min_p)
22
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
23
 
24
  text = ""
 
31
 
32
  gr.Interface(
33
  fn=tag_image,
34
+ inputs=[gr.Image(type="pil"), gr.Slider(label="Max new tokens", minimum=1, maximum=1024, value=128), gr.Slider(label="Temperature", minimum=0, maximum=2, value=1), gr.Slider(label="Top p", minimum=0, maximum=1, value=1), gr.Slider(label="Min p", minimum=0, maximum=1, value=0)],
35
  outputs=gr.Textbox(type="text"),
36
  title=TITLE,
37
  description=DESCRIPTION,