LanHarmony commited on
Commit
b863a7a
1 Parent(s): 5147e50

add infinity and chinese support

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -152,7 +152,7 @@ class ConversationBot:
152
  f"Current Memory: {self.agent.memory.buffer}")
153
  return state, state
154
 
155
- def run_image(self, image, state, txt):
156
  image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png")
157
  print("======>Auto Resize Image...")
158
  img = Image.open(image.name)
@@ -166,8 +166,12 @@ class ConversationBot:
166
  img.save(image_filename, "PNG")
167
  print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
168
  description = self.models['ImageCaptioning'].inference(image_filename)
169
- Human_prompt = f'\nHuman: provide a figure named {image_filename}. The description is: {description}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
170
- AI_prompt = "Received. "
 
 
 
 
171
  self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
172
  state = state + [(f"![](/file={image_filename})*{image_filename}*", AI_prompt)]
173
  print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n"
@@ -261,7 +265,7 @@ with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo:
261
  txt.submit(lambda: "", None, txt)
262
  run.click(bot.run_text, [txt, state], [chatbot, state])
263
  run.click(lambda: "", None, txt)
264
- btn.upload(bot.run_image, [btn, state, txt], [chatbot, state, txt])
265
  clear.click(bot.memory.clear)
266
  clear.click(lambda: [], None, chatbot)
267
  clear.click(lambda: [], None, state)
 
152
  f"Current Memory: {self.agent.memory.buffer}")
153
  return state, state
154
 
155
+ def run_image(self, image, state, txt, lang):
156
  image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png")
157
  print("======>Auto Resize Image...")
158
  img = Image.open(image.name)
 
166
  img.save(image_filename, "PNG")
167
  print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
168
  description = self.models['ImageCaptioning'].inference(image_filename)
169
+ if lang == 'Chinese':
170
+ Human_prompt = f'\nHuman: 提供一张名为 {image_filename}的图片。它的描述是: {description}。 这些信息帮助你理解这个图像,但是你应该使用工具来完成下面的任务,而不是直接从我的描述中想象。 如果你明白了, 说 \"收到\". \n'
171
+ AI_prompt = "收到。 "
172
+ else:
173
+ Human_prompt = f'\nHuman: provide a figure named {image_filename}. The description is: {description}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n'
174
+ AI_prompt = "Received. "
175
  self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
176
  state = state + [(f"![](/file={image_filename})*{image_filename}*", AI_prompt)]
177
  print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n"
 
265
  txt.submit(lambda: "", None, txt)
266
  run.click(bot.run_text, [txt, state], [chatbot, state])
267
  run.click(lambda: "", None, txt)
268
+ btn.upload(bot.run_image, [btn, state, txt, lang], [chatbot, state, txt])
269
  clear.click(bot.memory.clear)
270
  clear.click(lambda: [], None, chatbot)
271
  clear.click(lambda: [], None, state)