Ivy1997 commited on
Commit
7ac54c1
·
verified ·
1 Parent(s): 0066452

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -3
app.py CHANGED
@@ -44,7 +44,10 @@ def respond(
44
  messages.append({"role": "assistant", "content": val[1]})
45
 
46
  if image:
47
- # Process image if provided
 
 
 
48
  image_tensor = process_images([image], image_processor, model.config)
49
  image_tensor = [_image.to(dtype=torch.float16, device=device) for _image in image_tensor]
50
 
@@ -71,8 +74,23 @@ def respond(
71
  else:
72
  messages.append({"role": "user", "content": message})
73
 
74
- # Simulating a response as `client.chat_completion` is not defined in this script
75
- response = "Simulated response based on input: " + message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
  yield response
78
 
 
44
  messages.append({"role": "assistant", "content": val[1]})
45
 
46
  if image:
47
+ # Load and process the image
48
+ if isinstance(image, str):
49
+ image = Image.open(image)
50
+
51
  image_tensor = process_images([image], image_processor, model.config)
52
  image_tensor = [_image.to(dtype=torch.float16, device=device) for _image in image_tensor]
53
 
 
74
  else:
75
  messages.append({"role": "user", "content": message})
76
 
77
+ conv_template = "qwen_1_5"
78
+ conv = copy.deepcopy(conv_templates[conv_template])
79
+ conv.append_message(conv.roles[0], message)
80
+ conv.append_message(conv.roles[1], None)
81
+ prompt_question = conv.get_prompt()
82
+
83
+ input_ids = tokenizer(prompt_question, return_tensors="pt", max_length=max_tokens, truncation=True).to(device)
84
+
85
+ cont = model.generate(
86
+ input_ids,
87
+ do_sample=True,
88
+ temperature=temperature,
89
+ max_new_tokens=max_tokens,
90
+ top_p=top_p,
91
+ )
92
+
93
+ response = tokenizer.batch_decode(cont, skip_special_tokens=True)[0]
94
 
95
  yield response
96