ponytail commited on
Commit
96292e9
·
verified ·
1 Parent(s): 2619577

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -8
app.py CHANGED
@@ -9,7 +9,7 @@ from datetime import datetime
9
  import numpy as np
10
  import os
11
 
12
- #os.environ["no_proxy"] = "localhost,127.0.0.1,::1"
13
 
14
 
15
  def array_to_image_path(image_array):
@@ -55,15 +55,9 @@ def run_example(image, text_input=None, model_id="HumanLlaVA-8B"):
55
  model = models[model_id]
56
  processor = processors[model_id]
57
  raw_image = Image.open(image_path)
 
58
  inputs = processor(images=raw_image, text=prompt, return_tensors='pt').to(cuda, torch.float16)
59
 
60
- # generated_ids = model.generate(**inputs, max_new_tokens=128)
61
- # generated_ids_trimmed = [
62
- # out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
63
- # ]
64
- # output_text = processor.batch_decode(
65
- # generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
66
- # )
67
  output = model.generate(**inputs, max_new_tokens=400, do_sample=False)
68
  print(output)
69
  predict = processor.decode(output[0][:], skip_special_tokens=False)
 
9
  import numpy as np
10
  import os
11
 
12
+ os.environ["no_proxy"] = "localhost,127.0.0.1,::1"
13
 
14
 
15
  def array_to_image_path(image_array):
 
55
  model = models[model_id]
56
  processor = processors[model_id]
57
  raw_image = Image.open(image_path)
58
+ prompt = "USER: <image>\n" + text_input + "\nASSISTANT:"
59
  inputs = processor(images=raw_image, text=prompt, return_tensors='pt').to(cuda, torch.float16)
60
 
 
 
 
 
 
 
 
61
  output = model.generate(**inputs, max_new_tokens=400, do_sample=False)
62
  print(output)
63
  predict = processor.decode(output[0][:], skip_special_tokens=False)