TobyYang7 commited on
Commit
6548bcf
1 Parent(s): 5b853cd

Update llava_llama3/serve/cli.py

Browse files
Files changed (1) hide show
  1. llava_llama3/serve/cli.py +2 -2
llava_llama3/serve/cli.py CHANGED
@@ -26,7 +26,7 @@ def load_image(image_file):
26
  return image
27
 
28
 
29
- def chat_llava(args, image_file, text, tokenizer, model, image_processor, context_len):
30
  # Model
31
  disable_torch_init()
32
 
@@ -63,7 +63,7 @@ def chat_llava(args, image_file, text, tokenizer, model, image_processor, contex
63
  input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
64
  stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
65
  keywords = [stop_str]
66
- streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
67
 
68
  with torch.inference_mode():
69
  output_ids = model.generate(
 
26
  return image
27
 
28
 
29
+ def chat_llava(args, image_file, text, tokenizer, model, streamer, image_processor, context_len):
30
  # Model
31
  disable_torch_init()
32
 
 
63
  input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
64
  stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
65
  keywords = [stop_str]
66
+ # streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
67
 
68
  with torch.inference_mode():
69
  output_ids = model.generate(