JJteam commited on
Commit
05d5a3e
1 Parent(s): b7c01de

updating with new langchain

Browse files
AllInOneApp/Chat-GPT-LangChain/app.py CHANGED
@@ -246,7 +246,7 @@ def load_chain(history):
246
  print("langchain reloaded")
247
  history = []
248
  history.append(("Show me what you got!", "Hi Human, I am ready to serve!"))
249
- return history, history, chain
250
 
251
 
252
  def run_chain(chain, inp):
@@ -325,10 +325,32 @@ def add_image(state, chain, image):
325
  # Image_N
326
  # wget http://0.0.0.0:7860/file=/tmp/bananabdzk2eqi.jpg
327
  # url_input_for_chain = "Image_{} http://0.0.0.0:7860/file={}".format(IMAGE_COUNT, image.name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  url_input_for_chain = "http://0.0.0.0:{}/file={}".format(ARGS.port, image.name)
329
 
330
  # !!!!!! quick HACK to refer to image in this server for image editing pruprose
331
- # url_input_for_chain = url_input_for_chain.replace("0.0.0.0", "10.123.124.92")
332
 
333
 
334
  ########################
@@ -378,9 +400,10 @@ def process_chain_output(outputs):
378
  if "assistant: here is the edited image " in output.lower():
379
  img_url = replace_with_image_markup(output)
380
  cleanOutputs.append("Assistant: Here is the edited image")
381
- if img_url is not None:
382
- cleanOutputs.append((img_url,))
383
- cleanOutputs.append(output)
 
384
  # cleanOutputs = cleanOutputs + output+ "."
385
  outputs = cleanOutputs
386
 
@@ -403,7 +426,7 @@ def init_and_kick_off():
403
 
404
 
405
 
406
- reset_btn = gr.Button(value="!!!CLICK to wake up the AI!!!", variant="secondary", elem_id="resetbtn").style(full_width=False)
407
 
408
  with gr.Row():
409
  chatbot = gr.Chatbot(elem_id="chatbot").style(height=620)
@@ -412,20 +435,20 @@ def init_and_kick_off():
412
  with gr.Column(scale=0.75):
413
  message = gr.Textbox(label="What's on your mind??",
414
  placeholder="What's the answer to life, the universe, and everything?",
415
- lines=1)
416
  with gr.Column(scale=0.15):
417
- submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
418
  with gr.Column(scale=0.10, min_width=0):
419
- btn = gr.UploadButton("📁", file_types=["image"])
420
  # btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
421
 
422
- with gr.Row():
423
- with gr.Column(scale=0.90):
424
- gr.HTML("""
425
- <p>This application, developed by Cognitive Service Team Microsoft, demonstrates all cognitive service APIs in a conversational agent
426
- </p>""")
427
- # with gr.Column(scale=0.10):
428
- # reset_btn = gr.Button(value="Initiate Chat", variant="secondary", elem_id="resetbtn").style(full_width=False)
429
 
430
  message.submit(chat, inputs=[message, history_state, chain_state],
431
  outputs=[chatbot, history_state, message])
@@ -440,7 +463,7 @@ def init_and_kick_off():
440
  # inputs=[openai_api_key_textbox],
441
  # outputs=[chain_state])
442
  # load the chain
443
- reset_btn.click(load_chain, inputs=[history_state], outputs=[chatbot, history_state, chain_state])
444
 
445
 
446
 
 
246
  print("langchain reloaded")
247
  history = []
248
  history.append(("Show me what you got!", "Hi Human, I am ready to serve!"))
249
+ return history, history, chain, gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.UploadButton.update(visible=True)
250
 
251
 
252
  def run_chain(chain, inp):
 
325
  # Image_N
326
  # wget http://0.0.0.0:7860/file=/tmp/bananabdzk2eqi.jpg
327
  # url_input_for_chain = "Image_{} http://0.0.0.0:7860/file={}".format(IMAGE_COUNT, image.name)
328
+
329
+
330
+ # ############################################
331
+ # # move the file name to uuid based instead of real name
332
+ # image_path = image.name
333
+ # file_dir = os.path.dirname(image_path)
334
+ # split_tup = os.path.splitext(image_path)
335
+ # fileExtension = split_tup[1]
336
+ # new_file_name = str(uuid.uuid1())[:10] + fileExtension
337
+
338
+
339
+ # # make dir at app level if not exist
340
+ # app_level_folder = 'static/'
341
+ # if not os.path.exists(app_level_folder):
342
+ # os.makedirs(app_level_folder + file_dir)
343
+ # new_file_path = app_level_folder + file_dir + "/" + new_file_name
344
+
345
+ # shutil.copyfile(image_path, new_file_path)
346
+ # os.remove(image_path)
347
+ # ######################################
348
+
349
+
350
  url_input_for_chain = "http://0.0.0.0:{}/file={}".format(ARGS.port, image.name)
351
 
352
  # !!!!!! quick HACK to refer to image in this server for image editing pruprose
353
+ url_input_for_chain = url_input_for_chain.replace("0.0.0.0", "10.123.124.92")
354
 
355
 
356
  ########################
 
400
  if "assistant: here is the edited image " in output.lower():
401
  img_url = replace_with_image_markup(output)
402
  cleanOutputs.append("Assistant: Here is the edited image")
403
+ if img_url is not None:
404
+ cleanOutputs.append((img_url,))
405
+ else:
406
+ cleanOutputs.append(output)
407
  # cleanOutputs = cleanOutputs + output+ "."
408
  outputs = cleanOutputs
409
 
 
426
 
427
 
428
 
429
+ reset_btn = gr.Button(value="!!!CLICK to wake up the AI!!!", variant="secondary", elem_id="resetbtn").style(full_width=True)
430
 
431
  with gr.Row():
432
  chatbot = gr.Chatbot(elem_id="chatbot").style(height=620)
 
435
  with gr.Column(scale=0.75):
436
  message = gr.Textbox(label="What's on your mind??",
437
  placeholder="What's the answer to life, the universe, and everything?",
438
+ lines=1, visible=False)
439
  with gr.Column(scale=0.15):
440
+ submit = gr.Button(value="Send", variant="secondary", visible=False).style(full_width=True)
441
  with gr.Column(scale=0.10, min_width=0):
442
+ btn = gr.UploadButton("📁", file_types=["image"], visible=False).style(full_width=True)
443
  # btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
444
 
445
+ # with gr.Row():
446
+ # with gr.Column(scale=0.90):
447
+ # gr.HTML("""
448
+ # <p>This application, developed by Cognitive Service Team Microsoft, demonstrates all cognitive service APIs in a conversational agent
449
+ # </p>""")
450
+ # # with gr.Column(scale=0.10):
451
+ # # reset_btn = gr.Button(value="Initiate Chat", variant="secondary", elem_id="resetbtn").style(full_width=False)
452
 
453
  message.submit(chat, inputs=[message, history_state, chain_state],
454
  outputs=[chatbot, history_state, message])
 
463
  # inputs=[openai_api_key_textbox],
464
  # outputs=[chain_state])
465
  # load the chain
466
+ reset_btn.click(load_chain, inputs=[history_state], outputs=[chatbot, history_state, chain_state, message, submit, btn])
467
 
468
 
469
 
AllInOneApp/langchain/.github/workflows/linkcheck.yml DELETED
@@ -1,36 +0,0 @@
1
- name: linkcheck
2
-
3
- on:
4
- push:
5
- branches: [master]
6
- pull_request:
7
-
8
- env:
9
- POETRY_VERSION: "1.3.1"
10
-
11
- jobs:
12
- build:
13
- runs-on: ubuntu-latest
14
- strategy:
15
- matrix:
16
- python-version:
17
- - "3.11"
18
- steps:
19
- - uses: actions/checkout@v3
20
- - name: Install poetry
21
- run: |
22
- pipx install poetry==$POETRY_VERSION
23
- - name: Set up Python ${{ matrix.python-version }}
24
- uses: actions/setup-python@v4
25
- with:
26
- python-version: ${{ matrix.python-version }}
27
- cache: poetry
28
- - name: Install dependencies
29
- run: |
30
- poetry install --with docs
31
- - name: Build the docs
32
- run: |
33
- make docs_build
34
- - name: Analyzing the docs with linkcheck
35
- run: |
36
- make docs_linkcheck
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/.github/workflows/lint.yml DELETED
@@ -1,36 +0,0 @@
1
- name: lint
2
-
3
- on:
4
- push:
5
- branches: [master]
6
- pull_request:
7
-
8
- env:
9
- POETRY_VERSION: "1.3.1"
10
-
11
- jobs:
12
- build:
13
- runs-on: ubuntu-latest
14
- strategy:
15
- matrix:
16
- python-version:
17
- - "3.8"
18
- - "3.9"
19
- - "3.10"
20
- - "3.11"
21
- steps:
22
- - uses: actions/checkout@v3
23
- - name: Install poetry
24
- run: |
25
- pipx install poetry==$POETRY_VERSION
26
- - name: Set up Python ${{ matrix.python-version }}
27
- uses: actions/setup-python@v4
28
- with:
29
- python-version: ${{ matrix.python-version }}
30
- cache: poetry
31
- - name: Install dependencies
32
- run: |
33
- poetry install
34
- - name: Analysing the code with our lint
35
- run: |
36
- make lint
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/.github/workflows/release.yml DELETED
@@ -1,49 +0,0 @@
1
- name: release
2
-
3
- on:
4
- pull_request:
5
- types:
6
- - closed
7
- branches:
8
- - master
9
- paths:
10
- - 'pyproject.toml'
11
-
12
- env:
13
- POETRY_VERSION: "1.3.1"
14
-
15
- jobs:
16
- if_release:
17
- if: |
18
- ${{ github.event.pull_request.merged == true }}
19
- && ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
20
- runs-on: ubuntu-latest
21
- steps:
22
- - uses: actions/checkout@v3
23
- - name: Install poetry
24
- run: pipx install poetry==$POETRY_VERSION
25
- - name: Set up Python 3.10
26
- uses: actions/setup-python@v4
27
- with:
28
- python-version: "3.10"
29
- cache: "poetry"
30
- - name: Build project for distribution
31
- run: poetry build
32
- - name: Check Version
33
- id: check-version
34
- run: |
35
- echo version=$(poetry version --short) >> $GITHUB_OUTPUT
36
- - name: Create Release
37
- uses: ncipollo/release-action@v1
38
- with:
39
- artifacts: "dist/*"
40
- token: ${{ secrets.GITHUB_TOKEN }}
41
- draft: false
42
- generateReleaseNotes: true
43
- tag: v${{ steps.check-version.outputs.version }}
44
- commit: master
45
- - name: Publish to PyPI
46
- env:
47
- POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
48
- run: |
49
- poetry publish
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/.github/workflows/test.yml DELETED
@@ -1,34 +0,0 @@
1
- name: test
2
-
3
- on:
4
- push:
5
- branches: [master]
6
- pull_request:
7
-
8
- env:
9
- POETRY_VERSION: "1.3.1"
10
-
11
- jobs:
12
- build:
13
- runs-on: ubuntu-latest
14
- strategy:
15
- matrix:
16
- python-version:
17
- - "3.8"
18
- - "3.9"
19
- - "3.10"
20
- - "3.11"
21
- steps:
22
- - uses: actions/checkout@v3
23
- - name: Install poetry
24
- run: pipx install poetry==$POETRY_VERSION
25
- - name: Set up Python ${{ matrix.python-version }}
26
- uses: actions/setup-python@v4
27
- with:
28
- python-version: ${{ matrix.python-version }}
29
- cache: "poetry"
30
- - name: Install dependencies
31
- run: poetry install
32
- - name: Run unit tests
33
- run: |
34
- make test
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/langchain/agents/assistant/__pycache__/base.cpython-38.pyc DELETED
Binary file (5.76 kB)
 
AllInOneApp/langchain/langchain/agents/assistant/__pycache__/prompt.cpython-38.pyc DELETED
Binary file (5.82 kB)
 
AllInOneApp/langchain/langchain/agents/assistant/base.py CHANGED
@@ -115,7 +115,7 @@ class AssistantAgent(Agent):
115
  if action_input.endswith((".", "?")):
116
  action_input = action_input[:-1]
117
  if "/" not in action_input and "http" not in action_input:
118
- return "Final Answer", ""
119
  cmd = cmd[:cmd_idx + 1].lower()
120
  if "receipt" in cmd:
121
  action = "Receipt Understanding"
 
115
  if action_input.endswith((".", "?")):
116
  action_input = action_input[:-1]
117
  if "/" not in action_input and "http" not in action_input:
118
+ return "Final Answer", "There is no image url. This "
119
  cmd = cmd[:cmd_idx + 1].lower()
120
  if "receipt" in cmd:
121
  action = "Receipt Understanding"
AllInOneApp/langchain/langchain/agents/assistant/prompt.py CHANGED
@@ -3,7 +3,7 @@ PREFIX = """Human: Hey {ai_prefix}!
3
  My name is Human.
4
  Now let me introduce you to Assistant. He is great at understanding what is going on in any image.
5
  Any time there is an image in our conversation that you want to know about objects description, texts, OCR (optical character recognition), people, celebrities inside of the image you could ask Assistant by addressing him.
6
- Make sure to provide Assistant with the best concise task that Assistant can handle.
7
 
8
  EXAMPLE
9
  {ai_prefix}: This is a business card image. Assistant, what OCR text do you see in this business card? https://i.ibb.co/tsQ0Myn/00.jpg
@@ -101,7 +101,7 @@ prescription instructions
101
 
102
  {ai_prefix}: This image is likely a pill bottle with labels.
103
  {ai_prefix}: Assistant, what is the OCR texts in this image? /tmp/path/to/x_d_0(2).jpg
104
- Assistant: This is an image (1100 x 800) with description a bottle of medicine.
105
  This image contains text
106
 
107
  List of texts (words) seen in this image:
@@ -111,7 +111,8 @@ SUPPLEMENT
111
 
112
  {ai_prefix}: This is medicine supplement pills by SPRING VALLEY
113
  Human: where can I buy this medicine? and how much is the price in Euros?
114
- {ai_prefix}: this question requires Bing search. Assistant, Bing search where can I buy SPRING VALLEY supplement pills? and how much is the price in Euros?
 
115
 
116
 
117
  EXAMPLE
@@ -125,10 +126,14 @@ List of object tags seen in this image:
125
  text
126
  {ai_prefix}: This image is likely a receipt or ticket. Let me ask for more information about the text.
127
  {ai_prefix}: Assistant, what are the OCR texts in this receipt? /a/c0%5/XX99096.jpg
 
 
128
  """
129
  SUFFIX = """
130
 
131
- Human: Previous conversation history up to NEW INPUT
 
 
132
  {chat_history}
133
 
134
  NEW INPUT:
 
3
  My name is Human.
4
  Now let me introduce you to Assistant. He is great at understanding what is going on in any image.
5
  Any time there is an image in our conversation that you want to know about objects description, texts, OCR (optical character recognition), people, celebrities inside of the image you could ask Assistant by addressing him.
6
+ Make sure to provide Assistant with the best concise task that Assistant can handle.
7
 
8
  EXAMPLE
9
  {ai_prefix}: This is a business card image. Assistant, what OCR text do you see in this business card? https://i.ibb.co/tsQ0Myn/00.jpg
 
101
 
102
  {ai_prefix}: This image is likely a pill bottle with labels.
103
  {ai_prefix}: Assistant, what is the OCR texts in this image? /tmp/path/to/x_d_0(2).jpg
104
+ Assistant: This is an image (1100 x 800)
105
  This image contains text
106
 
107
  List of texts (words) seen in this image:
 
111
 
112
  {ai_prefix}: This is medicine supplement pills by SPRING VALLEY
113
  Human: where can I buy this medicine? and how much is the price in Euros?
114
+ {ai_prefix}: I do not have that information.
115
+ {ai_prefix}: This question requires Bing search. Assistant, Bing search where can I buy SPRING VALLEY supplement pills? and how much is the price in Euros?
116
 
117
 
118
  EXAMPLE
 
126
  text
127
  {ai_prefix}: This image is likely a receipt or ticket. Let me ask for more information about the text.
128
  {ai_prefix}: Assistant, what are the OCR texts in this receipt? /a/c0%5/XX99096.jpg
129
+
130
+ EXAMPLE_END
131
  """
132
  SUFFIX = """
133
 
134
+ Human: Answer my question after NEW INPUT.
135
+ Human: Answer my question after NEW INPUT. Ask Assistant when needed
136
+ Human: Use conversation history up to NEW INPUT if needed for the answer.
137
  {chat_history}
138
 
139
  NEW INPUT:
AllInOneApp/langchain/langchain/chains/conversation/__pycache__/memory.cpython-38.pyc DELETED
Binary file (16.8 kB)
 
AllInOneApp/langchain/langchain/chains/conversation/memory.py CHANGED
@@ -117,7 +117,10 @@ class ConversationBufferMemory(Memory, BaseModel):
117
  new_input = inputs[prompt_input_key]
118
  # if new_input.startswith("http://0.0.0.0"):
119
  # self.clear()
120
- human = f"{self.human_prefix}: " + new_input
 
 
 
121
  ai = f"{self.ai_prefix}: " + outputs[output_key]
122
  assistant = ""
123
  intermediate = outputs.get(self.output_intermediate) or []
 
117
  new_input = inputs[prompt_input_key]
118
  # if new_input.startswith("http://0.0.0.0"):
119
  # self.clear()
120
+ human = new_input
121
+ prefix = f"{self.human_prefix}: "
122
+ if not human.startswith(prefix):
123
+ human = prefix + new_input
124
  ai = f"{self.ai_prefix}: " + outputs[output_key]
125
  assistant = ""
126
  intermediate = outputs.get(self.output_intermediate) or []
AllInOneApp/langchain/langchain/utilities/__pycache__/imun.cpython-38.pyc DELETED
Binary file (15.2 kB)
 
AllInOneApp/langchain/langchain/utilities/imun.py CHANGED
@@ -305,7 +305,9 @@ def create_prompt(results: Dict) -> str:
305
  if words:
306
  answer += IMUN_PROMPT_WORDS.format(words="\n".join(words))
307
  if languages:
308
- answer += IMUN_PROMPT_LANGUAGES.format(languages="\n".join(languages))
 
 
309
  if faces:
310
  answer += IMUN_PROMPT_FACES.format(faces=_concat_objects(faces))
311
  if celebrities:
 
305
  if words:
306
  answer += IMUN_PROMPT_WORDS.format(words="\n".join(words))
307
  if languages:
308
+ langs = set(languages)
309
+ if len(langs) > 1 or languages[0] != "en":
310
+ answer += IMUN_PROMPT_LANGUAGES.format(languages="\n".join(languages))
311
  if faces:
312
  answer += IMUN_PROMPT_FACES.format(faces=_concat_objects(faces))
313
  if celebrities: