JJteam commited on
Commit
1aba1d6
1 Parent(s): 1a16c10
Files changed (1) hide show
  1. MM-REACT/app.py +19 -125
MM-REACT/app.py CHANGED
@@ -1,10 +1,6 @@
1
- # example call script
2
- # https://dev.azure.com/visionbio/objectdetection/_git/objectdetection?path=/verify/langimg.py&version=GBehazar/langchain&_a=contents
3
-
4
  import re
5
  import io
6
  import os
7
- import ssl
8
  from typing import Optional, Tuple
9
  import datetime
10
  import sys
@@ -23,52 +19,17 @@ from langchain.utilities import ImunAPIWrapper, ImunMultiAPIWrapper
23
  from openai.error import AuthenticationError, InvalidRequestError, RateLimitError
24
  import argparse
25
 
26
- # header_key = os.environ.get("CVFIAHMED_KEY")
27
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
28
- TOOLS_LIST = ['pal-math', 'imun'] #'google-search','news-api','tmdb-api','open-meteo-api'
29
- TOOLS_DEFAULT_LIST = ['pal-math', 'imun']
30
- BUG_FOUND_MSG = "Congratulations, you've found a bug in this application!"
31
- AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. "
32
  MAX_TOKENS = 512
33
 
34
 
35
- ############ GLOBAL CHAIN ###########
36
- # chain = None
37
- # memory = None
38
- #####################################
39
- ############ GLOBAL IMAGE_COUNT #####
40
- IMAGE_COUNT=0
41
- #####################################
42
  ############## ARGS #################
43
  AGRS = None
44
  #####################################
45
 
46
-
47
- # Temporarily address Wolfram Alpha SSL certificate issue
48
- ssl._create_default_https_context = ssl._create_unverified_context
49
-
50
-
51
- def get_caption_onnx_api(imgf):
52
-
53
- headers = {
54
- 'Content-Type': 'application/octet-stream',
55
- 'Ocp-Apim-Subscription-Key': header_key,
56
- }
57
-
58
- params = {
59
- 'features': 'description',
60
- 'model-version': 'latest',
61
- 'language': 'en',
62
- 'descriptionExclude': 'Celebrities,Landmarks',
63
- }
64
-
65
- with open(imgf, 'rb') as f:
66
- data = f.read()
67
-
68
- response = requests.post('https://cvfiahmed.cognitiveservices.azure.com/vision/v2022-07-31-preview/operations/imageanalysis:analyze', params=params, headers=headers, data=data)
69
-
70
- return json.loads(response.content)['descriptionResult']['values'][0]['text']
71
-
72
  def reset_memory(history):
73
  # global memory
74
  # memory.clear()
@@ -77,34 +38,28 @@ def reset_memory(history):
77
  history = []
78
  return history, history
79
 
80
-
81
  def load_chain(history):
82
  global ARGS
83
- # global chain
84
- # global memory
85
- # memory = None
86
 
87
  if ARGS.openAIModel == 'openAIGPT35':
88
  # openAI GPT 3.5
89
  llm = OpenAI(temperature=0, max_tokens=MAX_TOKENS)
90
  elif ARGS.openAIModel == 'azureChatGPT':
91
- # for Azure OpenAI ChatGPT
92
- # Azure OpenAI param name 'deployment_name': 'text-davinci-002', 'model_name': 'text-davinci-002', 'temperature': 0.7, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1
93
- # llm = AzureOpenAI(deployment_name="text-chat-davinci-002", model_name="text-chat-davinci-002", temperature=1, top_p=0.9, max_tokens=MAX_TOKENS)
94
  llm = AzureOpenAI(deployment_name="text-chat-davinci-002", model_name="text-chat-davinci-002", temperature=0, max_tokens=MAX_TOKENS)
95
  elif ARGS.openAIModel == 'azureGPT35turbo':
 
96
  llm = AzureOpenAI(deployment_name="gpt-35-turbo-version-0301", model_name="gpt-35-turbo (version 0301)", temperature=0, max_tokens=MAX_TOKENS)
97
  elif ARGS.openAIModel == 'azureTextDavinci003':
98
- # for Azure OpenAI ChatGPT
99
- # Azure OpenAI param name 'deployment_name': 'text-davinci-002', 'model_name': 'text-davinci-002', 'temperature': 0.7, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1
100
  llm = AzureOpenAI(deployment_name="text-davinci-003", model_name="text-davinci-003", temperature=0, max_tokens=MAX_TOKENS)
101
 
102
- # tool_names = TOOLS_DEFAULT_LIST
103
- # tools = load_tools(tool_names, llm=llm)
104
  memory = ConversationBufferMemory(memory_key="chat_history")
105
 
 
106
  #############################
107
- # loading tools
108
 
109
  imun_dense = ImunAPIWrapper(
110
  imun_url="https://ehazarwestus.cognitiveservices.azure.com/computervision/imageanalysis:analyze",
@@ -141,7 +96,7 @@ def load_chain(history):
141
  bing = BingSearchAPIWrapper(k=2)
142
 
143
  def edit_photo(query: str) -> str:
144
- endpoint = "http://10.123.124.92:7863/"
145
  query = query.strip()
146
  url_idx = query.rfind(" ")
147
  img_url = query[url_idx + 1:].strip()
@@ -149,7 +104,7 @@ def load_chain(history):
149
  img_url = img_url[:-1]
150
  if not img_url.startswith(("http://", "https://")):
151
  return "Invalid image URL"
152
- img_url = img_url.replace("0.0.0.0", "10.123.124.92")
153
  instruction = query[:url_idx]
154
  # This should be some internal IP to wherever the server runs
155
  job = {"image_path": img_url, "instruction": instruction}
@@ -240,8 +195,6 @@ def load_chain(history):
240
  ),
241
  ]
242
 
243
- # chain = initialize_agent(tools, llm, agent="conversational-react-description", verbose=True, memory=memory)
244
- # chain = initialize_agent(tools, llm, agent="conversational-assistant", verbose=True, memory=memory, return_intermediate_steps=True)
245
  chain = initialize_agent(tools, llm, agent="conversational-assistant", verbose=True, memory=memory, return_intermediate_steps=True, max_iterations=4)
246
  print("langchain reloaded")
247
  history = []
@@ -249,6 +202,7 @@ def load_chain(history):
249
  return history, history, chain, gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.UploadButton.update(visible=True)
250
 
251
 
 
252
  def run_chain(chain, inp):
253
  # global chain
254
 
@@ -270,7 +224,7 @@ def run_chain(chain, inp):
270
 
271
  return output
272
 
273
-
274
  class ChatWrapper:
275
 
276
  def __init__(self):
@@ -312,54 +266,17 @@ class ChatWrapper:
312
 
313
  # upload image
314
  def add_image(state, chain, image):
315
- global IMAGE_COUNT
316
  global ARGS
317
- IMAGE_COUNT = IMAGE_COUNT + 1
318
  state = state or []
319
 
320
- # cap_onnx = get_caption_onnx_api(image.name)
321
- # cap_onnx = "The image shows " + cap_onnx
322
- # state = state + [(f"![](/file={image.name})", cap_onnx)]
323
-
324
- # : f"Image {N} http://0.0.0.0:7860/file={image.name}"
325
- # Image_N
326
- # wget http://0.0.0.0:7860/file=/tmp/bananabdzk2eqi.jpg
327
- # url_input_for_chain = "Image_{} http://0.0.0.0:7860/file={}".format(IMAGE_COUNT, image.name)
328
-
329
-
330
- # ############################################
331
- # # move the file name to uuid based instead of real name
332
- # image_path = image.name
333
- # file_dir = os.path.dirname(image_path)
334
- # split_tup = os.path.splitext(image_path)
335
- # fileExtension = split_tup[1]
336
- # new_file_name = str(uuid.uuid1())[:10] + fileExtension
337
-
338
-
339
- # # make dir at app level if not exist
340
- # app_level_folder = 'static/'
341
- # if not os.path.exists(app_level_folder):
342
- # os.makedirs(app_level_folder + file_dir)
343
- # new_file_path = app_level_folder + file_dir + "/" + new_file_name
344
-
345
- # shutil.copyfile(image_path, new_file_path)
346
- # os.remove(image_path)
347
- # ######################################
348
-
349
-
350
  url_input_for_chain = "http://0.0.0.0:{}/file={}".format(ARGS.port, image.name)
351
 
352
- # !!!!!! quick HACK to refer to image in this server for image editing pruprose
353
- # url_input_for_chain = url_input_for_chain.replace("0.0.0.0", "10.123.124.92")
354
-
355
-
356
- ########################
357
- # multi line
358
  outputs = run_chain(chain, url_input_for_chain)
359
 
 
 
360
  outputs = process_chain_output(outputs)
361
 
362
- print (" len(outputs) {}".format(len(outputs)))
363
  for i, output in enumerate(outputs):
364
  if i==0:
365
  # state.append((f"![](/file={image.name})", output))
@@ -368,10 +285,10 @@ def add_image(state, chain, image):
368
  state.append((None, output))
369
 
370
 
371
-
372
  print (state)
373
  return state, state
374
 
 
375
  def replace_with_image_markup(text):
376
  img_url = None
377
  text= text.strip()
@@ -384,6 +301,7 @@ def replace_with_image_markup(text):
384
  # img_url = f"![](/file={img_url})"
385
  return img_url
386
 
 
387
  def process_chain_output(outputs):
388
  global ARGS
389
  # print("outputs {}".format(outputs))
@@ -407,8 +325,6 @@ def process_chain_output(outputs):
407
  # cleanOutputs = cleanOutputs + output+ "."
408
  outputs = cleanOutputs
409
 
410
- # make it bold
411
- # outputs = "<b>{}</b>".format(outputs)
412
  return outputs
413
 
414
 
@@ -417,14 +333,10 @@ def init_and_kick_off():
417
  # initalize chatWrapper
418
  chat = ChatWrapper()
419
 
420
- # with gr.Blocks(css=".gradio-container {background-color: lightgray}") as block:
421
- # with gr.Blocks(css="#resetbtn {background-color: #4CAF50; color: red;} #chatbot {height: 700px; overflow: auto;}") as block:
422
  with gr.Blocks() as block:
423
  llm_state = gr.State()
424
  history_state = gr.State()
425
- chain_state = gr.State()
426
-
427
-
428
 
429
  reset_btn = gr.Button(value="!!!CLICK to wake up the AI!!!", variant="secondary", elem_id="resetbtn").style(full_width=True)
430
 
@@ -440,16 +352,7 @@ def init_and_kick_off():
440
  submit = gr.Button(value="Send", variant="secondary", visible=False).style(full_width=True)
441
  with gr.Column(scale=0.10, min_width=0):
442
  btn = gr.UploadButton("📁", file_types=["image"], visible=False).style(full_width=True)
443
- # btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
444
 
445
- # with gr.Row():
446
- # with gr.Column(scale=0.90):
447
- # gr.HTML("""
448
- # <p>This application, developed by Cognitive Service Team Microsoft, demonstrates all cognitive service APIs in a conversational agent
449
- # </p>""")
450
- # # with gr.Column(scale=0.10):
451
- # # reset_btn = gr.Button(value="Initiate Chat", variant="secondary", elem_id="resetbtn").style(full_width=False)
452
-
453
  message.submit(chat, inputs=[message, history_state, chain_state],
454
  outputs=[chatbot, history_state, message])
455
 
@@ -457,19 +360,10 @@ def init_and_kick_off():
457
  outputs=[chatbot, history_state, message])
458
 
459
  btn.upload(add_image, inputs=[history_state, chain_state, btn], outputs=[history_state, chatbot])
460
- # reset_btn.click(reset_memory, inputs=[history_state], outputs=[chatbot, history_state])
461
-
462
- # openai_api_key_textbox.change(set_openai_api_key,
463
- # inputs=[openai_api_key_textbox],
464
- # outputs=[chain_state])
465
  # load the chain
466
  reset_btn.click(load_chain, inputs=[history_state], outputs=[chatbot, history_state, chain_state, message, submit, btn])
467
 
468
-
469
-
470
- # # load the chain
471
- # load_chain()
472
-
473
  # launch the app
474
  block.launch(server_name="0.0.0.0", server_port = ARGS.port)
475
 
 
 
 
 
1
  import re
2
  import io
3
  import os
 
4
  from typing import Optional, Tuple
5
  import datetime
6
  import sys
 
19
  from openai.error import AuthenticationError, InvalidRequestError, RateLimitError
20
  import argparse
21
 
 
22
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
23
+ BUG_FOUND_MSG = "There is a bug in the application!"
24
+ AUTH_ERR_MSG = "OpenAI key needed"
 
 
25
  MAX_TOKENS = 512
26
 
27
 
 
 
 
 
 
 
 
28
  ############## ARGS #################
29
  AGRS = None
30
  #####################################
31
 
32
+ # resets memory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  def reset_memory(history):
34
  # global memory
35
  # memory.clear()
 
38
  history = []
39
  return history, history
40
 
41
+ # load chain
42
  def load_chain(history):
43
  global ARGS
 
 
 
44
 
45
  if ARGS.openAIModel == 'openAIGPT35':
46
  # openAI GPT 3.5
47
  llm = OpenAI(temperature=0, max_tokens=MAX_TOKENS)
48
  elif ARGS.openAIModel == 'azureChatGPT':
49
+ # for Azure OpenAI ChatGPT
 
 
50
  llm = AzureOpenAI(deployment_name="text-chat-davinci-002", model_name="text-chat-davinci-002", temperature=0, max_tokens=MAX_TOKENS)
51
  elif ARGS.openAIModel == 'azureGPT35turbo':
52
+ # for Azure OpenAI gpt3.5 turbo
53
  llm = AzureOpenAI(deployment_name="gpt-35-turbo-version-0301", model_name="gpt-35-turbo (version 0301)", temperature=0, max_tokens=MAX_TOKENS)
54
  elif ARGS.openAIModel == 'azureTextDavinci003':
55
+ # for Azure OpenAI text davinci
 
56
  llm = AzureOpenAI(deployment_name="text-davinci-003", model_name="text-davinci-003", temperature=0, max_tokens=MAX_TOKENS)
57
 
 
 
58
  memory = ConversationBufferMemory(memory_key="chat_history")
59
 
60
+
61
  #############################
62
+ # loading all tools
63
 
64
  imun_dense = ImunAPIWrapper(
65
  imun_url="https://ehazarwestus.cognitiveservices.azure.com/computervision/imageanalysis:analyze",
 
96
  bing = BingSearchAPIWrapper(k=2)
97
 
98
  def edit_photo(query: str) -> str:
99
+ endpoint = os.environ.get("PHOTO_EDIT_ENDPOINT_URL")
100
  query = query.strip()
101
  url_idx = query.rfind(" ")
102
  img_url = query[url_idx + 1:].strip()
 
104
  img_url = img_url[:-1]
105
  if not img_url.startswith(("http://", "https://")):
106
  return "Invalid image URL"
107
+ img_url = img_url.replace("0.0.0.0", os.environ.get("PHOTO_EDIT_ENDPOINT_URL_SHORT"))
108
  instruction = query[:url_idx]
109
  # This should be some internal IP to wherever the server runs
110
  job = {"image_path": img_url, "instruction": instruction}
 
195
  ),
196
  ]
197
 
 
 
198
  chain = initialize_agent(tools, llm, agent="conversational-assistant", verbose=True, memory=memory, return_intermediate_steps=True, max_iterations=4)
199
  print("langchain reloaded")
200
  history = []
 
202
  return history, history, chain, gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.UploadButton.update(visible=True)
203
 
204
 
205
+ # executes input typed by human
206
  def run_chain(chain, inp):
207
  # global chain
208
 
 
224
 
225
  return output
226
 
227
+ # simple chat function wrapper
228
  class ChatWrapper:
229
 
230
  def __init__(self):
 
266
 
267
  # upload image
268
  def add_image(state, chain, image):
 
269
  global ARGS
 
270
  state = state or []
271
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272
  url_input_for_chain = "http://0.0.0.0:{}/file={}".format(ARGS.port, image.name)
273
 
 
 
 
 
 
 
274
  outputs = run_chain(chain, url_input_for_chain)
275
 
276
+ ########################
277
+ # multi line response handling
278
  outputs = process_chain_output(outputs)
279
 
 
280
  for i, output in enumerate(outputs):
281
  if i==0:
282
  # state.append((f"![](/file={image.name})", output))
 
285
  state.append((None, output))
286
 
287
 
 
288
  print (state)
289
  return state, state
290
 
291
+ # extract image url from response and process differently
292
  def replace_with_image_markup(text):
293
  img_url = None
294
  text= text.strip()
 
301
  # img_url = f"![](/file={img_url})"
302
  return img_url
303
 
304
+ # multi line response handling
305
  def process_chain_output(outputs):
306
  global ARGS
307
  # print("outputs {}".format(outputs))
 
325
  # cleanOutputs = cleanOutputs + output+ "."
326
  outputs = cleanOutputs
327
 
 
 
328
  return outputs
329
 
330
 
 
333
  # initalize chatWrapper
334
  chat = ChatWrapper()
335
 
 
 
336
  with gr.Blocks() as block:
337
  llm_state = gr.State()
338
  history_state = gr.State()
339
+ chain_state = gr.State()
 
 
340
 
341
  reset_btn = gr.Button(value="!!!CLICK to wake up the AI!!!", variant="secondary", elem_id="resetbtn").style(full_width=True)
342
 
 
352
  submit = gr.Button(value="Send", variant="secondary", visible=False).style(full_width=True)
353
  with gr.Column(scale=0.10, min_width=0):
354
  btn = gr.UploadButton("📁", file_types=["image"], visible=False).style(full_width=True)
 
355
 
 
 
 
 
 
 
 
 
356
  message.submit(chat, inputs=[message, history_state, chain_state],
357
  outputs=[chatbot, history_state, message])
358
 
 
360
  outputs=[chatbot, history_state, message])
361
 
362
  btn.upload(add_image, inputs=[history_state, chain_state, btn], outputs=[history_state, chatbot])
363
+
 
 
 
 
364
  # load the chain
365
  reset_btn.click(load_chain, inputs=[history_state], outputs=[chatbot, history_state, chain_state, message, submit, btn])
366
 
 
 
 
 
 
367
  # launch the app
368
  block.launch(server_name="0.0.0.0", server_port = ARGS.port)
369