JJteam commited on
Commit
3809d83
1 Parent(s): 97aacc6

removing unnecessary files and big docker with simple one

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. AllInOneApp/Chat-GPT-LangChain/README.md +0 -13
  2. AllInOneApp/Chat-GPT-LangChain/__pycache__/azure_utils.cpython-310.pyc +0 -0
  3. AllInOneApp/Chat-GPT-LangChain/__pycache__/azure_utils.cpython-38.pyc +0 -0
  4. AllInOneApp/Chat-GPT-LangChain/__pycache__/polly_utils.cpython-310.pyc +0 -0
  5. AllInOneApp/Chat-GPT-LangChain/__pycache__/polly_utils.cpython-38.pyc +0 -0
  6. AllInOneApp/Chat-GPT-LangChain/app_bak.py +0 -877
  7. AllInOneApp/Chat-GPT-LangChain/app_test.py +0 -463
  8. AllInOneApp/Chat-GPT-LangChain/audios/tempfile.mp3 +0 -0
  9. AllInOneApp/Chat-GPT-LangChain/azure_utils.py +0 -155
  10. AllInOneApp/Chat-GPT-LangChain/polly_utils.py +0 -635
  11. AllInOneApp/Chat-GPT-LangChain/videos/tempfile.mp4 +0 -0
  12. AllInOneApp/bananabdzk2eqi.jpg.1 +0 -0
  13. AllInOneApp/build/lib/langchain/langimg.py +0 -37
  14. AllInOneApp/chatbot.py +0 -17
  15. AllInOneApp/chatbot1.py +0 -40
  16. AllInOneApp/chatbotVideoDemo.py +0 -94
  17. AllInOneApp/chatbotimage.py +0 -59
  18. AllInOneApp/chatbotvideoupload.py +0 -40
  19. AllInOneApp/flaskTest.py +0 -9
  20. AllInOneApp/gradioTest.py +0 -40
  21. AllInOneApp/langchain/.flake8 +0 -12
  22. AllInOneApp/langchain/CITATION.cff +0 -8
  23. AllInOneApp/langchain/CONTRIBUTING.md +0 -182
  24. AllInOneApp/langchain/LICENSE +0 -21
  25. AllInOneApp/langchain/Makefile +0 -54
  26. AllInOneApp/langchain/README.md +0 -82
  27. AllInOneApp/langchain/docs/Makefile +0 -21
  28. AllInOneApp/langchain/docs/_static/HeliconeDashboard.png +0 -0
  29. AllInOneApp/langchain/docs/_static/HeliconeKeys.png +0 -0
  30. AllInOneApp/langchain/docs/_static/css/custom.css +0 -13
  31. AllInOneApp/langchain/docs/conf.py +0 -105
  32. AllInOneApp/langchain/docs/deployments.md +0 -39
  33. AllInOneApp/langchain/docs/ecosystem.rst +0 -10
  34. AllInOneApp/langchain/docs/ecosystem/ai21.md +0 -16
  35. AllInOneApp/langchain/docs/ecosystem/bananadev.md +0 -74
  36. AllInOneApp/langchain/docs/ecosystem/cerebriumai.md +0 -17
  37. AllInOneApp/langchain/docs/ecosystem/chroma.md +0 -20
  38. AllInOneApp/langchain/docs/ecosystem/cohere.md +0 -25
  39. AllInOneApp/langchain/docs/ecosystem/deepinfra.md +0 -17
  40. AllInOneApp/langchain/docs/ecosystem/forefrontai.md +0 -16
  41. AllInOneApp/langchain/docs/ecosystem/google_search.md +0 -32
  42. AllInOneApp/langchain/docs/ecosystem/google_serper.md +0 -71
  43. AllInOneApp/langchain/docs/ecosystem/gooseai.md +0 -23
  44. AllInOneApp/langchain/docs/ecosystem/graphsignal.md +0 -38
  45. AllInOneApp/langchain/docs/ecosystem/hazy_research.md +0 -19
  46. AllInOneApp/langchain/docs/ecosystem/helicone.md +0 -53
  47. AllInOneApp/langchain/docs/ecosystem/huggingface.md +0 -69
  48. AllInOneApp/langchain/docs/ecosystem/modal.md +0 -66
  49. AllInOneApp/langchain/docs/ecosystem/nlpcloud.md +0 -17
  50. AllInOneApp/langchain/docs/ecosystem/openai.md +0 -55
AllInOneApp/Chat-GPT-LangChain/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: GPT+WolframAlpha+Whisper
3
- emoji: 👀
4
- colorFrom: red
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.16.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/Chat-GPT-LangChain/__pycache__/azure_utils.cpython-310.pyc DELETED
Binary file (3.05 kB)
 
AllInOneApp/Chat-GPT-LangChain/__pycache__/azure_utils.cpython-38.pyc DELETED
Binary file (2.9 kB)
 
AllInOneApp/Chat-GPT-LangChain/__pycache__/polly_utils.cpython-310.pyc DELETED
Binary file (6.93 kB)
 
AllInOneApp/Chat-GPT-LangChain/__pycache__/polly_utils.cpython-38.pyc DELETED
Binary file (6.06 kB)
 
AllInOneApp/Chat-GPT-LangChain/app_bak.py DELETED
@@ -1,877 +0,0 @@
1
- import io
2
- import os
3
- import ssl
4
- from contextlib import closing
5
- from typing import Optional, Tuple
6
- import datetime
7
-
8
- import boto3
9
- import gradio as gr
10
- import requests
11
-
12
- # UNCOMMENT TO USE WHISPER
13
- import warnings
14
- import whisper
15
-
16
- from langchain import ConversationChain, LLMChain
17
-
18
- from langchain.agents import load_tools, initialize_agent
19
- from langchain.chains.conversation.memory import ConversationBufferMemory
20
- from langchain.llms import OpenAI
21
- from threading import Lock
22
-
23
- # Console to variable
24
- from io import StringIO
25
- import sys
26
- import re
27
-
28
- from openai.error import AuthenticationError, InvalidRequestError, RateLimitError
29
-
30
- # Pertains to Express-inator functionality
31
- from langchain.prompts import PromptTemplate
32
-
33
- from polly_utils import PollyVoiceData, NEURAL_ENGINE
34
- from azure_utils import AzureVoiceData
35
-
36
- # Pertains to question answering functionality
37
- from langchain.embeddings.openai import OpenAIEmbeddings
38
- from langchain.text_splitter import CharacterTextSplitter
39
- from langchain.vectorstores.faiss import FAISS
40
- from langchain.docstore.document import Document
41
- from langchain.chains.question_answering import load_qa_chain
42
-
43
- news_api_key = os.environ["NEWS_API_KEY"]
44
- tmdb_bearer_token = os.environ["TMDB_BEARER_TOKEN"]
45
-
46
- TOOLS_LIST = ['serpapi', 'wolfram-alpha', 'pal-math', 'pal-colored-objects'] #'google-search','news-api','tmdb-api','open-meteo-api'
47
- TOOLS_DEFAULT_LIST = ['serpapi', 'pal-math']
48
- BUG_FOUND_MSG = "Congratulations, you've found a bug in this application!"
49
- # AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. It is not necessary to hit a button or key after pasting it."
50
- AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. "
51
- MAX_TOKENS = 512
52
-
53
- LOOPING_TALKING_HEAD = "videos/Masahiro.mp4"
54
- TALKING_HEAD_WIDTH = "192"
55
- MAX_TALKING_HEAD_TEXT_LENGTH = 155
56
-
57
- # Pertains to Express-inator functionality
58
- NUM_WORDS_DEFAULT = 0
59
- MAX_WORDS = 400
60
- FORMALITY_DEFAULT = "N/A"
61
- TEMPERATURE_DEFAULT = 0.5
62
- EMOTION_DEFAULT = "N/A"
63
- LANG_LEVEL_DEFAULT = "N/A"
64
- TRANSLATE_TO_DEFAULT = "N/A"
65
- LITERARY_STYLE_DEFAULT = "N/A"
66
- PROMPT_TEMPLATE = PromptTemplate(
67
- input_variables=["original_words", "num_words", "formality", "emotions", "lang_level", "translate_to",
68
- "literary_style"],
69
- template="Restate {num_words}{formality}{emotions}{lang_level}{translate_to}{literary_style}the following: \n{original_words}\n",
70
- )
71
-
72
- POLLY_VOICE_DATA = PollyVoiceData()
73
- AZURE_VOICE_DATA = AzureVoiceData()
74
-
75
- # Pertains to WHISPER functionality
76
- WHISPER_DETECT_LANG = "Detect language"
77
-
78
-
79
- # UNCOMMENT TO USE WHISPER
80
- warnings.filterwarnings("ignore")
81
- WHISPER_MODEL = whisper.load_model("tiny")
82
- print("WHISPER_MODEL", WHISPER_MODEL)
83
-
84
-
85
- # UNCOMMENT TO USE WHISPER
86
- def transcribe(aud_inp, whisper_lang):
87
- if aud_inp is None:
88
- return ""
89
- aud = whisper.load_audio(aud_inp)
90
- aud = whisper.pad_or_trim(aud)
91
- mel = whisper.log_mel_spectrogram(aud).to(WHISPER_MODEL.device)
92
- _, probs = WHISPER_MODEL.detect_language(mel)
93
- options = whisper.DecodingOptions()
94
- if whisper_lang != WHISPER_DETECT_LANG:
95
- whisper_lang_code = POLLY_VOICE_DATA.get_whisper_lang_code(whisper_lang)
96
- options = whisper.DecodingOptions(language=whisper_lang_code)
97
- result = whisper.decode(WHISPER_MODEL, mel, options)
98
- print("result.text", result.text)
99
- result_text = ""
100
- if result and result.text:
101
- result_text = result.text
102
- return result_text
103
-
104
-
105
- # Temporarily address Wolfram Alpha SSL certificate issue
106
- ssl._create_default_https_context = ssl._create_unverified_context
107
-
108
-
109
- # TEMPORARY FOR TESTING
110
- def transcribe_dummy(aud_inp_tb, whisper_lang):
111
- if aud_inp_tb is None:
112
- return ""
113
- # aud = whisper.load_audio(aud_inp)
114
- # aud = whisper.pad_or_trim(aud)
115
- # mel = whisper.log_mel_spectrogram(aud).to(WHISPER_MODEL.device)
116
- # _, probs = WHISPER_MODEL.detect_language(mel)
117
- # options = whisper.DecodingOptions()
118
- # options = whisper.DecodingOptions(language="ja")
119
- # result = whisper.decode(WHISPER_MODEL, mel, options)
120
- result_text = "Whisper will detect language"
121
- if whisper_lang != WHISPER_DETECT_LANG:
122
- whisper_lang_code = POLLY_VOICE_DATA.get_whisper_lang_code(whisper_lang)
123
- result_text = f"Whisper will use lang code: {whisper_lang_code}"
124
- print("result_text", result_text)
125
- return aud_inp_tb
126
-
127
-
128
- # Pertains to Express-inator functionality
129
- def transform_text(desc, express_chain, num_words, formality,
130
- anticipation_level, joy_level, trust_level,
131
- fear_level, surprise_level, sadness_level, disgust_level, anger_level,
132
- lang_level, translate_to, literary_style):
133
- num_words_prompt = ""
134
- if num_words and int(num_words) != 0:
135
- num_words_prompt = "using up to " + str(num_words) + " words, "
136
-
137
- # Change some arguments to lower case
138
- formality = formality.lower()
139
- anticipation_level = anticipation_level.lower()
140
- joy_level = joy_level.lower()
141
- trust_level = trust_level.lower()
142
- fear_level = fear_level.lower()
143
- surprise_level = surprise_level.lower()
144
- sadness_level = sadness_level.lower()
145
- disgust_level = disgust_level.lower()
146
- anger_level = anger_level.lower()
147
-
148
- formality_str = ""
149
- if formality != "n/a":
150
- formality_str = "in a " + formality + " manner, "
151
-
152
- # put all emotions into a list
153
- emotions = []
154
- if anticipation_level != "n/a":
155
- emotions.append(anticipation_level)
156
- if joy_level != "n/a":
157
- emotions.append(joy_level)
158
- if trust_level != "n/a":
159
- emotions.append(trust_level)
160
- if fear_level != "n/a":
161
- emotions.append(fear_level)
162
- if surprise_level != "n/a":
163
- emotions.append(surprise_level)
164
- if sadness_level != "n/a":
165
- emotions.append(sadness_level)
166
- if disgust_level != "n/a":
167
- emotions.append(disgust_level)
168
- if anger_level != "n/a":
169
- emotions.append(anger_level)
170
-
171
- emotions_str = ""
172
- if len(emotions) > 0:
173
- if len(emotions) == 1:
174
- emotions_str = "with emotion of " + emotions[0] + ", "
175
- else:
176
- emotions_str = "with emotions of " + ", ".join(emotions[:-1]) + " and " + emotions[-1] + ", "
177
-
178
- lang_level_str = ""
179
- if lang_level != LANG_LEVEL_DEFAULT:
180
- lang_level_str = "at a " + lang_level + " level, " if translate_to == TRANSLATE_TO_DEFAULT else ""
181
-
182
- translate_to_str = ""
183
- if translate_to != TRANSLATE_TO_DEFAULT:
184
- translate_to_str = "translated to " + (
185
- "" if lang_level == TRANSLATE_TO_DEFAULT else lang_level + " level ") + translate_to + ", "
186
-
187
- literary_style_str = ""
188
- if literary_style != LITERARY_STYLE_DEFAULT:
189
- if literary_style == "Prose":
190
- literary_style_str = "as prose, "
191
- if literary_style == "Story":
192
- literary_style_str = "as a story, "
193
- elif literary_style == "Summary":
194
- literary_style_str = "as a summary, "
195
- elif literary_style == "Outline":
196
- literary_style_str = "as an outline numbers and lower case letters, "
197
- elif literary_style == "Bullets":
198
- literary_style_str = "as bullet points using bullets, "
199
- elif literary_style == "Poetry":
200
- literary_style_str = "as a poem, "
201
- elif literary_style == "Haiku":
202
- literary_style_str = "as a haiku, "
203
- elif literary_style == "Limerick":
204
- literary_style_str = "as a limerick, "
205
- elif literary_style == "Rap":
206
- literary_style_str = "as a rap, "
207
- elif literary_style == "Joke":
208
- literary_style_str = "as a very funny joke with a setup and punchline, "
209
- elif literary_style == "Knock-knock":
210
- literary_style_str = "as a very funny knock-knock joke, "
211
- elif literary_style == "FAQ":
212
- literary_style_str = "as a FAQ with several questions and answers, "
213
-
214
- formatted_prompt = PROMPT_TEMPLATE.format(
215
- original_words=desc,
216
- num_words=num_words_prompt,
217
- formality=formality_str,
218
- emotions=emotions_str,
219
- lang_level=lang_level_str,
220
- translate_to=translate_to_str,
221
- literary_style=literary_style_str
222
- )
223
-
224
- trans_instr = num_words_prompt + formality_str + emotions_str + lang_level_str + translate_to_str + literary_style_str
225
- if express_chain and len(trans_instr.strip()) > 0:
226
- generated_text = express_chain.run(
227
- {'original_words': desc, 'num_words': num_words_prompt, 'formality': formality_str,
228
- 'emotions': emotions_str, 'lang_level': lang_level_str, 'translate_to': translate_to_str,
229
- 'literary_style': literary_style_str}).strip()
230
- else:
231
- print("Not transforming text")
232
- generated_text = desc
233
-
234
- # replace all newlines with <br> in generated_text
235
- generated_text = generated_text.replace("\n", "\n\n")
236
-
237
- prompt_plus_generated = "GPT prompt: " + formatted_prompt + "\n\n" + generated_text
238
-
239
- print("\n==== date/time: " + str(datetime.datetime.now() - datetime.timedelta(hours=5)) + " ====")
240
- print("prompt_plus_generated: " + prompt_plus_generated)
241
-
242
- return generated_text
243
-
244
-
245
- def load_chain(tools_list, llm):
246
- chain = None
247
- express_chain = None
248
- memory = None
249
- if llm:
250
- print("\ntools_list", tools_list)
251
- tool_names = tools_list
252
- tools = load_tools(tool_names, llm=llm, news_api_key=news_api_key, tmdb_bearer_token=tmdb_bearer_token)
253
-
254
- memory = ConversationBufferMemory(memory_key="chat_history")
255
-
256
- chain = initialize_agent(tools, llm, agent="conversational-react-description", verbose=True, memory=memory)
257
- express_chain = LLMChain(llm=llm, prompt=PROMPT_TEMPLATE, verbose=True)
258
- return chain, express_chain, memory
259
-
260
-
261
- def set_openai_api_key(api_key):
262
- """Set the api key and return chain.
263
- If no api_key, then None is returned.
264
- """
265
- if api_key and api_key.startswith("sk-") and len(api_key) > 50:
266
- os.environ["OPENAI_API_KEY"] = api_key
267
- print("\n\n ++++++++++++++ Setting OpenAI API key ++++++++++++++ \n\n")
268
- print(str(datetime.datetime.now()) + ": Before OpenAI, OPENAI_API_KEY length: " + str(
269
- len(os.environ["OPENAI_API_KEY"])))
270
- llm = OpenAI(temperature=0, max_tokens=MAX_TOKENS)
271
- print(str(datetime.datetime.now()) + ": After OpenAI, OPENAI_API_KEY length: " + str(
272
- len(os.environ["OPENAI_API_KEY"])))
273
- chain, express_chain, memory = load_chain(TOOLS_DEFAULT_LIST, llm)
274
-
275
- # Pertains to question answering functionality
276
- embeddings = OpenAIEmbeddings()
277
- qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
278
-
279
- print(str(datetime.datetime.now()) + ": After load_chain, OPENAI_API_KEY length: " + str(
280
- len(os.environ["OPENAI_API_KEY"])))
281
- os.environ["OPENAI_API_KEY"] = ""
282
- return chain, express_chain, llm, embeddings, qa_chain, memory
283
- return None, None, None, None, None, None
284
-
285
-
286
- def run_chain(chain, inp, capture_hidden_text):
287
- output = ""
288
- hidden_text = None
289
- if capture_hidden_text:
290
- error_msg = None
291
- tmp = sys.stdout
292
- hidden_text_io = StringIO()
293
- sys.stdout = hidden_text_io
294
-
295
- try:
296
- output = chain.run(input=inp)
297
- except AuthenticationError as ae:
298
- error_msg = AUTH_ERR_MSG + str(datetime.datetime.now()) + ". " + str(ae)
299
- print("error_msg", error_msg)
300
- except RateLimitError as rle:
301
- error_msg = "\n\nRateLimitError: " + str(rle)
302
- except ValueError as ve:
303
- error_msg = "\n\nValueError: " + str(ve)
304
- except InvalidRequestError as ire:
305
- error_msg = "\n\nInvalidRequestError: " + str(ire)
306
- except Exception as e:
307
- error_msg = "\n\n" + BUG_FOUND_MSG + ":\n\n" + str(e)
308
-
309
- sys.stdout = tmp
310
- hidden_text = hidden_text_io.getvalue()
311
-
312
- # remove escape characters from hidden_text
313
- hidden_text = re.sub(r'\x1b[^m]*m', '', hidden_text)
314
-
315
- # remove "Entering new AgentExecutor chain..." from hidden_text
316
- hidden_text = re.sub(r"Entering new AgentExecutor chain...\n", "", hidden_text)
317
-
318
- # remove "Finished chain." from hidden_text
319
- hidden_text = re.sub(r"Finished chain.", "", hidden_text)
320
-
321
- # Add newline after "Thought:" "Action:" "Observation:" "Input:" and "AI:"
322
- hidden_text = re.sub(r"Thought:", "\n\nThought:", hidden_text)
323
- hidden_text = re.sub(r"Action:", "\n\nAction:", hidden_text)
324
- hidden_text = re.sub(r"Observation:", "\n\nObservation:", hidden_text)
325
- hidden_text = re.sub(r"Input:", "\n\nInput:", hidden_text)
326
- hidden_text = re.sub(r"AI:", "\n\nAI:", hidden_text)
327
-
328
- if error_msg:
329
- hidden_text += error_msg
330
-
331
- print("hidden_text: ", hidden_text)
332
- else:
333
- try:
334
- output = chain.run(input=inp)
335
- except AuthenticationError as ae:
336
- output = AUTH_ERR_MSG + str(datetime.datetime.now()) + ". " + str(ae)
337
- print("output", output)
338
- except RateLimitError as rle:
339
- output = "\n\nRateLimitError: " + str(rle)
340
- except ValueError as ve:
341
- output = "\n\nValueError: " + str(ve)
342
- except InvalidRequestError as ire:
343
- output = "\n\nInvalidRequestError: " + str(ire)
344
- except Exception as e:
345
- output = "\n\n" + BUG_FOUND_MSG + ":\n\n" + str(e)
346
-
347
- return output, hidden_text
348
-
349
-
350
- def reset_memory(history, memory):
351
- memory.clear()
352
- history = []
353
- return history, history, memory
354
-
355
-
356
- class ChatWrapper:
357
-
358
- def __init__(self):
359
- self.lock = Lock()
360
-
361
- def __call__(
362
- self, api_key: str, inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain],
363
- trace_chain: bool, speak_text: bool, talking_head: bool, monologue: bool, express_chain: Optional[LLMChain],
364
- num_words, formality, anticipation_level, joy_level, trust_level,
365
- fear_level, surprise_level, sadness_level, disgust_level, anger_level,
366
- lang_level, translate_to, literary_style, qa_chain, docsearch, use_embeddings
367
- ):
368
- """Execute the chat functionality."""
369
- self.lock.acquire()
370
- try:
371
- print("\n==== date/time: " + str(datetime.datetime.now()) + " ====")
372
- print("inp: " + inp)
373
- print("trace_chain: ", trace_chain)
374
- print("speak_text: ", speak_text)
375
- print("talking_head: ", talking_head)
376
- print("monologue: ", monologue)
377
- history = history or []
378
- # If chain is None, that is because no API key was provided.
379
- output = "Please paste your OpenAI key from openai.com to use this app. " + str(datetime.datetime.now())
380
- hidden_text = output
381
-
382
- if chain:
383
- # Set OpenAI key
384
- import openai
385
- openai.api_key = api_key
386
- if not monologue:
387
- if use_embeddings:
388
- if inp and inp.strip() != "":
389
- if docsearch:
390
- docs = docsearch.similarity_search(inp)
391
- output = str(qa_chain.run(input_documents=docs, question=inp))
392
- else:
393
- output, hidden_text = "Please supply some text in the the Embeddings tab.", None
394
- else:
395
- output, hidden_text = "What's on your mind?", None
396
- else:
397
- output, hidden_text = run_chain(chain, inp, capture_hidden_text=trace_chain)
398
- else:
399
- output, hidden_text = inp, None
400
-
401
- output = transform_text(output, express_chain, num_words, formality, anticipation_level, joy_level,
402
- trust_level,
403
- fear_level, surprise_level, sadness_level, disgust_level, anger_level,
404
- lang_level, translate_to, literary_style)
405
-
406
- text_to_display = output
407
- if trace_chain:
408
- text_to_display = hidden_text + "\n\n" + output
409
- history.append((inp, text_to_display))
410
-
411
- html_video, temp_file, html_audio, temp_aud_file = None, None, None, None
412
- if speak_text:
413
- if talking_head:
414
- if len(output) <= MAX_TALKING_HEAD_TEXT_LENGTH:
415
- html_video, temp_file = do_html_video_speak(output, translate_to)
416
- else:
417
- temp_file = LOOPING_TALKING_HEAD
418
- html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
419
- html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
420
- else:
421
- html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
422
- else:
423
- if talking_head:
424
- temp_file = LOOPING_TALKING_HEAD
425
- html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
426
- else:
427
- # html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
428
- # html_video = create_html_video(temp_file, "128")
429
- pass
430
-
431
- except Exception as e:
432
- raise e
433
- finally:
434
- self.lock.release()
435
- return history, history, html_video, temp_file, html_audio, temp_aud_file, ""
436
- # return history, history, html_audio, temp_aud_file, ""
437
-
438
-
439
- chat = ChatWrapper()
440
-
441
-
442
- def do_html_audio_speak(words_to_speak, polly_language):
443
- polly_client = boto3.Session(
444
- aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
445
- aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
446
- region_name=os.environ["AWS_DEFAULT_REGION"]
447
- ).client('polly')
448
-
449
- # voice_id, language_code, engine = POLLY_VOICE_DATA.get_voice(polly_language, "Female")
450
- voice_id, language_code, engine = POLLY_VOICE_DATA.get_voice(polly_language, "Male")
451
- if not voice_id:
452
- # voice_id = "Joanna"
453
- voice_id = "Matthew"
454
- language_code = "en-US"
455
- engine = NEURAL_ENGINE
456
- response = polly_client.synthesize_speech(
457
- Text=words_to_speak,
458
- OutputFormat='mp3',
459
- VoiceId=voice_id,
460
- LanguageCode=language_code,
461
- Engine=engine
462
- )
463
-
464
- html_audio = '<pre>no audio</pre>'
465
-
466
- # Save the audio stream returned by Amazon Polly on Lambda's temp directory
467
- if "AudioStream" in response:
468
- with closing(response["AudioStream"]) as stream:
469
- # output = os.path.join("/tmp/", "speech.mp3")
470
-
471
- try:
472
- with open('audios/tempfile.mp3', 'wb') as f:
473
- f.write(stream.read())
474
- temp_aud_file = gr.File("audios/tempfile.mp3")
475
- temp_aud_file_url = "/file=" + temp_aud_file.value['name']
476
- html_audio = f'<audio autoplay><source src={temp_aud_file_url} type="audio/mp3"></audio>'
477
- except IOError as error:
478
- # Could not write to file, exit gracefully
479
- print(error)
480
- return None, None
481
- else:
482
- # The response didn't contain audio data, exit gracefully
483
- print("Could not stream audio")
484
- return None, None
485
-
486
- return html_audio, "audios/tempfile.mp3"
487
-
488
-
489
- def create_html_video(file_name, width):
490
- temp_file_url = "/file=" + tmp_file.value['name']
491
- html_video = f'<video width={width} height={width} autoplay muted loop><source src={temp_file_url} type="video/mp4" poster="Masahiro.png"></video>'
492
- return html_video
493
-
494
-
495
- def do_html_video_speak(words_to_speak, azure_language):
496
- azure_voice = AZURE_VOICE_DATA.get_voice(azure_language, "Male")
497
- if not azure_voice:
498
- azure_voice = "en-US-ChristopherNeural"
499
-
500
- headers = {"Authorization": f"Bearer {os.environ['EXHUMAN_API_KEY']}"}
501
- body = {
502
- 'bot_name': 'Masahiro',
503
- 'bot_response': words_to_speak,
504
- 'azure_voice': azure_voice,
505
- 'azure_style': 'friendly',
506
- 'animation_pipeline': 'high_speed',
507
- }
508
- api_endpoint = "https://api.exh.ai/animations/v1/generate_lipsync"
509
- res = requests.post(api_endpoint, json=body, headers=headers)
510
- print("res.status_code: ", res.status_code)
511
-
512
- html_video = '<pre>no video</pre>'
513
- if isinstance(res.content, bytes):
514
- response_stream = io.BytesIO(res.content)
515
- print("len(res.content)): ", len(res.content))
516
-
517
- with open('videos/tempfile.mp4', 'wb') as f:
518
- f.write(response_stream.read())
519
- temp_file = gr.File("videos/tempfile.mp4")
520
- temp_file_url = "/file=" + temp_file.value['name']
521
- html_video = f'<video width={TALKING_HEAD_WIDTH} height={TALKING_HEAD_WIDTH} autoplay><source src={temp_file_url} type="video/mp4" poster="Masahiro.png"></video>'
522
- else:
523
- print('video url unknown')
524
- return html_video, "videos/tempfile.mp4"
525
-
526
-
527
- def update_selected_tools(widget, state, llm):
528
- if widget:
529
- state = widget
530
- chain, express_chain, memory = load_chain(state, llm)
531
- return state, llm, chain, express_chain
532
-
533
-
534
- def update_talking_head(widget, state):
535
- if widget:
536
- state = widget
537
-
538
- video_html_talking_head = create_html_video(LOOPING_TALKING_HEAD, TALKING_HEAD_WIDTH)
539
- return state, video_html_talking_head
540
- else:
541
- # return state, create_html_video(LOOPING_TALKING_HEAD, "32")
542
- return None, "<pre></pre>"
543
-
544
-
545
- def update_foo(widget, state):
546
- if widget:
547
- state = widget
548
- return state
549
-
550
-
551
- # Pertains to question answering functionality
552
- def update_embeddings(embeddings_text, embeddings, qa_chain):
553
- if embeddings_text:
554
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
555
- texts = text_splitter.split_text(embeddings_text)
556
-
557
- docsearch = FAISS.from_texts(texts, embeddings)
558
- print("Embeddings updated")
559
- return docsearch
560
-
561
-
562
- # Pertains to question answering functionality
563
- def update_use_embeddings(widget, state):
564
- if widget:
565
- state = widget
566
- return state
567
-
568
-
569
- with gr.Blocks(css=".gradio-container {background-color: lightgray}") as block:
570
- llm_state = gr.State()
571
- history_state = gr.State()
572
- chain_state = gr.State()
573
- express_chain_state = gr.State()
574
- tools_list_state = gr.State(TOOLS_DEFAULT_LIST)
575
- trace_chain_state = gr.State(False)
576
- speak_text_state = gr.State(False)
577
- talking_head_state = gr.State(True)
578
- monologue_state = gr.State(False) # Takes the input and repeats it back to the user, optionally transforming it.
579
- memory_state = gr.State()
580
-
581
- # Pertains to Express-inator functionality
582
- num_words_state = gr.State(NUM_WORDS_DEFAULT)
583
- formality_state = gr.State(FORMALITY_DEFAULT)
584
- anticipation_level_state = gr.State(EMOTION_DEFAULT)
585
- joy_level_state = gr.State(EMOTION_DEFAULT)
586
- trust_level_state = gr.State(EMOTION_DEFAULT)
587
- fear_level_state = gr.State(EMOTION_DEFAULT)
588
- surprise_level_state = gr.State(EMOTION_DEFAULT)
589
- sadness_level_state = gr.State(EMOTION_DEFAULT)
590
- disgust_level_state = gr.State(EMOTION_DEFAULT)
591
- anger_level_state = gr.State(EMOTION_DEFAULT)
592
- lang_level_state = gr.State(LANG_LEVEL_DEFAULT)
593
- translate_to_state = gr.State(TRANSLATE_TO_DEFAULT)
594
- literary_style_state = gr.State(LITERARY_STYLE_DEFAULT)
595
-
596
- # Pertains to WHISPER functionality
597
- whisper_lang_state = gr.State(WHISPER_DETECT_LANG)
598
-
599
- # Pertains to question answering functionality
600
- embeddings_state = gr.State()
601
- qa_chain_state = gr.State()
602
- docsearch_state = gr.State()
603
- use_embeddings_state = gr.State(False)
604
-
605
- with gr.Tab("Chat"):
606
- with gr.Row():
607
- with gr.Column():
608
- gr.HTML(
609
- """<b><center>GPT + WolframAlpha + Whisper</center></b>
610
- <p><center>New feature: <b>Embeddings</b></center></p>""")
611
-
612
- openai_api_key_textbox = gr.Textbox(placeholder="Paste your OpenAI API key (sk-...)",
613
- show_label=False, lines=1, type='password')
614
-
615
- with gr.Row():
616
- with gr.Column(scale=1, min_width=TALKING_HEAD_WIDTH, visible=True):
617
- speak_text_cb = gr.Checkbox(label="Enable speech", value=False)
618
- speak_text_cb.change(update_foo, inputs=[speak_text_cb, speak_text_state],
619
- outputs=[speak_text_state])
620
-
621
- my_file = gr.File(label="Upload a file", type="file", visible=False)
622
- tmp_file = gr.File(LOOPING_TALKING_HEAD, visible=False)
623
- # tmp_file_url = "/file=" + tmp_file.value['name']
624
- htm_video = create_html_video(LOOPING_TALKING_HEAD, TALKING_HEAD_WIDTH)
625
- video_html = gr.HTML(htm_video)
626
-
627
- # my_aud_file = gr.File(label="Audio file", type="file", visible=True)
628
- tmp_aud_file = gr.File("audios/tempfile.mp3", visible=False)
629
- tmp_aud_file_url = "/file=" + tmp_aud_file.value['name']
630
- htm_audio = f'<audio><source src={tmp_aud_file_url} type="audio/mp3"></audio>'
631
- audio_html = gr.HTML(htm_audio)
632
-
633
- with gr.Column(scale=7):
634
- chatbot = gr.Chatbot()
635
-
636
- with gr.Row():
637
- message = gr.Textbox(label="What's on your mind??",
638
- placeholder="What's the answer to life, the universe, and everything?",
639
- lines=1)
640
- submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
641
-
642
- # UNCOMMENT TO USE WHISPER
643
- with gr.Row():
644
- audio_comp = gr.Microphone(source="microphone", type="filepath", label="Just say it!",
645
- interactive=True, streaming=False)
646
- audio_comp.change(transcribe, inputs=[audio_comp, whisper_lang_state], outputs=[message])
647
-
648
- # TEMPORARY FOR TESTING
649
- # with gr.Row():
650
- # audio_comp_tb = gr.Textbox(label="Just say it!", lines=1)
651
- # audio_comp_tb.submit(transcribe_dummy, inputs=[audio_comp_tb, whisper_lang_state], outputs=[message])
652
-
653
- gr.Examples(
654
- examples=["How many people live in Canada?",
655
- "What is 2 to the 30th power?",
656
- "If x+y=10 and x-y=4, what are x and y?",
657
- "How much did it rain in SF today?",
658
- "Get me information about the movie 'Avatar'",
659
- "What are the top tech headlines in the US?",
660
- "On the desk, you see two blue booklets, two purple booklets, and two yellow pairs of sunglasses - "
661
- "if I remove all the pairs of sunglasses from the desk, how many purple items remain on it?"],
662
- inputs=message
663
- )
664
-
665
- with gr.Tab("Settings"):
666
- tools_cb_group = gr.CheckboxGroup(label="Tools:", choices=TOOLS_LIST,
667
- value=TOOLS_DEFAULT_LIST)
668
- tools_cb_group.change(update_selected_tools,
669
- inputs=[tools_cb_group, tools_list_state, llm_state],
670
- outputs=[tools_list_state, llm_state, chain_state, express_chain_state])
671
-
672
- trace_chain_cb = gr.Checkbox(label="Show reasoning chain in chat bubble", value=False)
673
- trace_chain_cb.change(update_foo, inputs=[trace_chain_cb, trace_chain_state],
674
- outputs=[trace_chain_state])
675
-
676
- # speak_text_cb = gr.Checkbox(label="Speak text from agent", value=False)
677
- # speak_text_cb.change(update_foo, inputs=[speak_text_cb, speak_text_state],
678
- # outputs=[speak_text_state])
679
-
680
- talking_head_cb = gr.Checkbox(label="Show talking head", value=True)
681
- talking_head_cb.change(update_talking_head, inputs=[talking_head_cb, talking_head_state],
682
- outputs=[talking_head_state, video_html])
683
-
684
- monologue_cb = gr.Checkbox(label="Babel fish mode (translate/restate what you enter, no conversational agent)",
685
- value=False)
686
- monologue_cb.change(update_foo, inputs=[monologue_cb, monologue_state],
687
- outputs=[monologue_state])
688
-
689
- reset_btn = gr.Button(value="Reset chat", variant="secondary").style(full_width=False)
690
- reset_btn.click(reset_memory, inputs=[history_state, memory_state], outputs=[chatbot, history_state, memory_state])
691
-
692
- with gr.Tab("Whisper STT"):
693
- whisper_lang_radio = gr.Radio(label="Whisper speech-to-text language:", choices=[
694
- WHISPER_DETECT_LANG, "Arabic", "Arabic (Gulf)", "Catalan", "Chinese (Cantonese)", "Chinese (Mandarin)",
695
- "Danish", "Dutch", "English (Australian)", "English (British)", "English (Indian)", "English (New Zealand)",
696
- "English (South African)", "English (US)", "English (Welsh)", "Finnish", "French", "French (Canadian)",
697
- "German", "German (Austrian)", "Georgian", "Hindi", "Icelandic", "Indonesian", "Italian", "Japanese",
698
- "Korean", "Norwegian", "Polish",
699
- "Portuguese (Brazilian)", "Portuguese (European)", "Romanian", "Russian", "Spanish (European)",
700
- "Spanish (Mexican)", "Spanish (US)", "Swedish", "Turkish", "Ukrainian", "Welsh"],
701
- value=WHISPER_DETECT_LANG)
702
-
703
- whisper_lang_radio.change(update_foo,
704
- inputs=[whisper_lang_radio, whisper_lang_state],
705
- outputs=[whisper_lang_state])
706
-
707
- with gr.Tab("Translate to"):
708
- lang_level_radio = gr.Radio(label="Language level:", choices=[
709
- LANG_LEVEL_DEFAULT, "1st grade", "2nd grade", "3rd grade", "4th grade", "5th grade", "6th grade",
710
- "7th grade", "8th grade", "9th grade", "10th grade", "11th grade", "12th grade", "University"],
711
- value=LANG_LEVEL_DEFAULT)
712
- lang_level_radio.change(update_foo, inputs=[lang_level_radio, lang_level_state],
713
- outputs=[lang_level_state])
714
-
715
- translate_to_radio = gr.Radio(label="Language:", choices=[
716
- TRANSLATE_TO_DEFAULT, "Arabic", "Arabic (Gulf)", "Catalan", "Chinese (Cantonese)", "Chinese (Mandarin)",
717
- "Danish", "Dutch", "English (Australian)", "English (British)", "English (Indian)", "English (New Zealand)",
718
- "English (South African)", "English (US)", "English (Welsh)", "Finnish", "French", "French (Canadian)",
719
- "German", "German (Austrian)", "Georgian", "Hindi", "Icelandic", "Indonesian", "Italian", "Japanese",
720
- "Korean", "Norwegian", "Polish",
721
- "Portuguese (Brazilian)", "Portuguese (European)", "Romanian", "Russian", "Spanish (European)",
722
- "Spanish (Mexican)", "Spanish (US)", "Swedish", "Turkish", "Ukrainian", "Welsh",
723
- "emojis", "Gen Z slang", "how the stereotypical Karen would say it", "Klingon", "Neanderthal",
724
- "Pirate", "Strange Planet expospeak technical talk", "Yoda"],
725
- value=TRANSLATE_TO_DEFAULT)
726
-
727
- translate_to_radio.change(update_foo,
728
- inputs=[translate_to_radio, translate_to_state],
729
- outputs=[translate_to_state])
730
-
731
- with gr.Tab("Formality"):
732
- formality_radio = gr.Radio(label="Formality:",
733
- choices=[FORMALITY_DEFAULT, "Casual", "Polite", "Honorific"],
734
- value=FORMALITY_DEFAULT)
735
- formality_radio.change(update_foo,
736
- inputs=[formality_radio, formality_state],
737
- outputs=[formality_state])
738
-
739
- with gr.Tab("Lit style"):
740
- literary_style_radio = gr.Radio(label="Literary style:", choices=[
741
- LITERARY_STYLE_DEFAULT, "Prose", "Story", "Summary", "Outline", "Bullets", "Poetry", "Haiku", "Limerick", "Rap",
742
- "Joke", "Knock-knock", "FAQ"],
743
- value=LITERARY_STYLE_DEFAULT)
744
-
745
- literary_style_radio.change(update_foo,
746
- inputs=[literary_style_radio, literary_style_state],
747
- outputs=[literary_style_state])
748
-
749
- with gr.Tab("Emotions"):
750
- anticipation_level_radio = gr.Radio(label="Anticipation level:",
751
- choices=[EMOTION_DEFAULT, "Interest", "Anticipation", "Vigilance"],
752
- value=EMOTION_DEFAULT)
753
- anticipation_level_radio.change(update_foo,
754
- inputs=[anticipation_level_radio, anticipation_level_state],
755
- outputs=[anticipation_level_state])
756
-
757
- joy_level_radio = gr.Radio(label="Joy level:",
758
- choices=[EMOTION_DEFAULT, "Serenity", "Joy", "Ecstasy"],
759
- value=EMOTION_DEFAULT)
760
- joy_level_radio.change(update_foo,
761
- inputs=[joy_level_radio, joy_level_state],
762
- outputs=[joy_level_state])
763
-
764
- trust_level_radio = gr.Radio(label="Trust level:",
765
- choices=[EMOTION_DEFAULT, "Acceptance", "Trust", "Admiration"],
766
- value=EMOTION_DEFAULT)
767
- trust_level_radio.change(update_foo,
768
- inputs=[trust_level_radio, trust_level_state],
769
- outputs=[trust_level_state])
770
-
771
- fear_level_radio = gr.Radio(label="Fear level:",
772
- choices=[EMOTION_DEFAULT, "Apprehension", "Fear", "Terror"],
773
- value=EMOTION_DEFAULT)
774
- fear_level_radio.change(update_foo,
775
- inputs=[fear_level_radio, fear_level_state],
776
- outputs=[fear_level_state])
777
-
778
- surprise_level_radio = gr.Radio(label="Surprise level:",
779
- choices=[EMOTION_DEFAULT, "Distraction", "Surprise", "Amazement"],
780
- value=EMOTION_DEFAULT)
781
- surprise_level_radio.change(update_foo,
782
- inputs=[surprise_level_radio, surprise_level_state],
783
- outputs=[surprise_level_state])
784
-
785
- sadness_level_radio = gr.Radio(label="Sadness level:",
786
- choices=[EMOTION_DEFAULT, "Pensiveness", "Sadness", "Grief"],
787
- value=EMOTION_DEFAULT)
788
- sadness_level_radio.change(update_foo,
789
- inputs=[sadness_level_radio, sadness_level_state],
790
- outputs=[sadness_level_state])
791
-
792
- disgust_level_radio = gr.Radio(label="Disgust level:",
793
- choices=[EMOTION_DEFAULT, "Boredom", "Disgust", "Loathing"],
794
- value=EMOTION_DEFAULT)
795
- disgust_level_radio.change(update_foo,
796
- inputs=[disgust_level_radio, disgust_level_state],
797
- outputs=[disgust_level_state])
798
-
799
- anger_level_radio = gr.Radio(label="Anger level:",
800
- choices=[EMOTION_DEFAULT, "Annoyance", "Anger", "Rage"],
801
- value=EMOTION_DEFAULT)
802
- anger_level_radio.change(update_foo,
803
- inputs=[anger_level_radio, anger_level_state],
804
- outputs=[anger_level_state])
805
-
806
- with gr.Tab("Max words"):
807
- num_words_slider = gr.Slider(label="Max number of words to generate (0 for don't care)",
808
- value=NUM_WORDS_DEFAULT, minimum=0, maximum=MAX_WORDS, step=10)
809
- num_words_slider.change(update_foo,
810
- inputs=[num_words_slider, num_words_state],
811
- outputs=[num_words_state])
812
-
813
- with gr.Tab("Embeddings"):
814
- embeddings_text_box = gr.Textbox(label="Enter text for embeddings and hit Create:",
815
- lines=20)
816
-
817
- with gr.Row():
818
- use_embeddings_cb = gr.Checkbox(label="Use embeddings", value=False)
819
- use_embeddings_cb.change(update_use_embeddings, inputs=[use_embeddings_cb, use_embeddings_state],
820
- outputs=[use_embeddings_state])
821
-
822
- embeddings_text_submit = gr.Button(value="Create", variant="secondary").style(full_width=False)
823
- embeddings_text_submit.click(update_embeddings,
824
- inputs=[embeddings_text_box, embeddings_state, qa_chain_state],
825
- outputs=[docsearch_state])
826
-
827
- gr.HTML("""
828
- <p>This application, developed by <a href='https://www.linkedin.com/in/javafxpert/'>James L. Weaver</a>,
829
- demonstrates a conversational agent implemented with OpenAI GPT-3.5 and LangChain.
830
- When necessary, it leverages tools for complex math, searching the internet, and accessing news and weather.
831
- Uses talking heads from <a href='https://exh.ai/'>Ex-Human</a>.
832
- For faster inference without waiting in queue, you may duplicate the space.
833
- </p>""")
834
-
835
- gr.HTML("""
836
- <form action="https://www.paypal.com/donate" method="post" target="_blank">
837
- <input type="hidden" name="business" value="AK8BVNALBXSPQ" />
838
- <input type="hidden" name="no_recurring" value="0" />
839
- <input type="hidden" name="item_name" value="Please consider helping to defray the cost of APIs such as SerpAPI and WolframAlpha that this app uses." />
840
- <input type="hidden" name="currency_code" value="USD" />
841
- <input type="image" src="https://www.paypalobjects.com/en_US/i/btn/btn_donate_LG.gif" border="0" name="submit" title="PayPal - The safer, easier way to pay online!" alt="Donate with PayPal button" />
842
- <img alt="" border="0" src="https://www.paypal.com/en_US/i/scr/pixel.gif" width="1" height="1" />
843
- </form>
844
- """)
845
-
846
- gr.HTML("""<center>
847
- <a href="https://huggingface.co/spaces/JavaFXpert/Chat-GPT-LangChain?duplicate=true">
848
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
849
- Powered by <a href='https://github.com/hwchase17/langchain'>LangChain 🦜️🔗</a>
850
- </center>""")
851
-
852
- message.submit(chat, inputs=[openai_api_key_textbox, message, history_state, chain_state, trace_chain_state,
853
- speak_text_state, talking_head_state, monologue_state,
854
- express_chain_state, num_words_state, formality_state,
855
- anticipation_level_state, joy_level_state, trust_level_state, fear_level_state,
856
- surprise_level_state, sadness_level_state, disgust_level_state, anger_level_state,
857
- lang_level_state, translate_to_state, literary_style_state,
858
- qa_chain_state, docsearch_state, use_embeddings_state],
859
- outputs=[chatbot, history_state, video_html, my_file, audio_html, tmp_aud_file, message])
860
- # outputs=[chatbot, history_state, audio_html, tmp_aud_file, message])
861
-
862
- submit.click(chat, inputs=[openai_api_key_textbox, message, history_state, chain_state, trace_chain_state,
863
- speak_text_state, talking_head_state, monologue_state,
864
- express_chain_state, num_words_state, formality_state,
865
- anticipation_level_state, joy_level_state, trust_level_state, fear_level_state,
866
- surprise_level_state, sadness_level_state, disgust_level_state, anger_level_state,
867
- lang_level_state, translate_to_state, literary_style_state,
868
- qa_chain_state, docsearch_state, use_embeddings_state],
869
- outputs=[chatbot, history_state, video_html, my_file, audio_html, tmp_aud_file, message])
870
- # outputs=[chatbot, history_state, audio_html, tmp_aud_file, message])
871
-
872
- openai_api_key_textbox.change(set_openai_api_key,
873
- inputs=[openai_api_key_textbox],
874
- outputs=[chain_state, express_chain_state, llm_state, embeddings_state,
875
- qa_chain_state, memory_state])
876
-
877
- block.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/Chat-GPT-LangChain/app_test.py DELETED
@@ -1,463 +0,0 @@
1
- # example call script
2
- # https://dev.azure.com/visionbio/objectdetection/_git/objectdetection?path=/verify/langimg.py&version=GBehazar/langchain&_a=contents
3
-
4
- import re
5
- import io
6
- import os
7
- import ssl
8
- from typing import Optional, Tuple
9
- import datetime
10
- import sys
11
- import gradio as gr
12
- import requests
13
- import json
14
- from threading import Lock
15
- from langchain import ConversationChain, LLMChain
16
- from langchain.agents import load_tools, initialize_agent, Tool
17
- from langchain.tools.bing_search.tool import BingSearchRun, BingSearchAPIWrapper
18
- from langchain.chains.conversation.memory import ConversationBufferMemory
19
- from langchain.llms import OpenAI
20
- from langchain.chains import PALChain
21
- from langchain.llms import AzureOpenAI
22
- from langchain.utilities import ImunAPIWrapper, ImunMultiAPIWrapper
23
- from openai.error import AuthenticationError, InvalidRequestError, RateLimitError
24
- import argparse
25
-
26
- # header_key = os.environ["CVFIAHMED_KEY"]
27
- OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
28
- TOOLS_LIST = ['pal-math', 'imun'] #'google-search','news-api','tmdb-api','open-meteo-api'
29
- TOOLS_DEFAULT_LIST = ['pal-math', 'imun']
30
- BUG_FOUND_MSG = "Congratulations, you've found a bug in this application!"
31
- AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. "
32
- MAX_TOKENS = 512
33
-
34
-
35
- ############ GLOBAL CHAIN ###########
36
- # chain = None
37
- # memory = None
38
- #####################################
39
- ############ GLOBAL IMAGE_COUNT #####
40
- IMAGE_COUNT=0
41
- #####################################
42
- ############## ARGS #################
43
- AGRS = None
44
- #####################################
45
-
46
-
47
- # Temporarily address Wolfram Alpha SSL certificate issue
48
- ssl._create_default_https_context = ssl._create_unverified_context
49
-
50
-
51
- def get_caption_onnx_api(imgf):
52
-
53
- headers = {
54
- 'Content-Type': 'application/octet-stream',
55
- 'Ocp-Apim-Subscription-Key': header_key,
56
- }
57
-
58
- params = {
59
- 'features': 'description',
60
- 'model-version': 'latest',
61
- 'language': 'en',
62
- 'descriptionExclude': 'Celebrities,Landmarks',
63
- }
64
-
65
- with open(imgf, 'rb') as f:
66
- data = f.read()
67
-
68
- response = requests.post('https://cvfiahmed.cognitiveservices.azure.com/vision/v2022-07-31-preview/operations/imageanalysis:analyze', params=params, headers=headers, data=data)
69
-
70
- return json.loads(response.content)['descriptionResult']['values'][0]['text']
71
-
72
- def reset_memory(history):
73
- # global memory
74
- # memory.clear()
75
- print ("clearning memory, loading langchain...")
76
- load_chain()
77
- history = []
78
- return history, history
79
-
80
-
81
- def load_chain(history):
82
- global ARGS
83
- # global chain
84
- # global memory
85
- # memory = None
86
-
87
- if ARGS.openAIModel == 'openAIGPT35':
88
- # openAI GPT 3.5
89
- llm = OpenAI(temperature=0, max_tokens=MAX_TOKENS)
90
- elif ARGS.openAIModel == 'azureChatGPT':
91
- # for Azure OpenAI ChatGPT
92
- # Azure OpenAI param name 'deployment_name': 'text-davinci-002', 'model_name': 'text-davinci-002', 'temperature': 0.7, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1
93
- # llm = AzureOpenAI(deployment_name="text-chat-davinci-002", model_name="text-chat-davinci-002", temperature=1, top_p=0.9, max_tokens=MAX_TOKENS)
94
- llm = AzureOpenAI(deployment_name="text-chat-davinci-002", model_name="text-chat-davinci-002", temperature=0, max_tokens=MAX_TOKENS)
95
- # llm = AzureOpenAI(deployment_name="gpt-35-turbo-version-0301", model_name="gpt-35-turbo (version 0301)", temperature=0, max_tokens=MAX_TOKENS)
96
- elif ARGS.openAIModel == 'azureTextDavinci003':
97
- # for Azure OpenAI ChatGPT
98
- # Azure OpenAI param name 'deployment_name': 'text-davinci-002', 'model_name': 'text-davinci-002', 'temperature': 0.7, 'max_tokens': 256, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'best_of': 1
99
- llm = AzureOpenAI(deployment_name="text-davinci-003", model_name="text-davinci-003", temperature=0, max_tokens=MAX_TOKENS)
100
-
101
- # tool_names = TOOLS_DEFAULT_LIST
102
- # tools = load_tools(tool_names, llm=llm)
103
- memory = ConversationBufferMemory(memory_key="chat_history")
104
-
105
- #############################
106
- # loading tools
107
-
108
- imun_dense = ImunAPIWrapper(
109
- imun_url="https://ehazarwestus.cognitiveservices.azure.com/computervision/imageanalysis:analyze",
110
- params="api-version=2023-02-01-preview&model-version=latest&features=denseCaptions",
111
- imun_subscription_key=os.environ["IMUN_SUBSCRIPTION_KEY2"])
112
-
113
- imun = ImunAPIWrapper()
114
- imun = ImunMultiAPIWrapper(imuns=[imun, imun_dense])
115
-
116
- imun_celeb = ImunAPIWrapper(
117
- imun_url="https://cvfiahmed.cognitiveservices.azure.com/vision/v3.2/models/celebrities/analyze",
118
- params="")
119
-
120
- imun_read = ImunAPIWrapper(
121
- imun_url="https://vigehazar.cognitiveservices.azure.com/formrecognizer/documentModels/prebuilt-read:analyze",
122
- params="api-version=2022-08-31",
123
- imun_subscription_key=os.environ["IMUN_OCR_SUBSCRIPTION_KEY"])
124
-
125
- imun_receipt = ImunAPIWrapper(
126
- imun_url="https://vigehazar.cognitiveservices.azure.com/formrecognizer/documentModels/prebuilt-receipt:analyze",
127
- params="api-version=2022-08-31",
128
- imun_subscription_key=os.environ["IMUN_OCR_SUBSCRIPTION_KEY"])
129
-
130
- imun_businesscard = ImunAPIWrapper(
131
- imun_url="https://vigehazar.cognitiveservices.azure.com/formrecognizer/documentModels/prebuilt-businessCard:analyze",
132
- params="api-version=2022-08-31",
133
- imun_subscription_key=os.environ["IMUN_OCR_SUBSCRIPTION_KEY"])
134
-
135
- imun_layout = ImunAPIWrapper(
136
- imun_url="https://vigehazar.cognitiveservices.azure.com/formrecognizer/documentModels/prebuilt-layout:analyze",
137
- params="api-version=2022-08-31",
138
- imun_subscription_key=os.environ["IMUN_OCR_SUBSCRIPTION_KEY"])
139
-
140
- bing = BingSearchAPIWrapper(k=2)
141
-
142
- def edit_photo(query: str) -> str:
143
- endpoint = "http://10.123.124.92:7863/"
144
- query = query.strip()
145
- url_idx = query.rfind(" ")
146
- img_url = query[url_idx + 1:].strip()
147
- if img_url.endswith((".", "?")):
148
- img_url = img_url[:-1]
149
- if not img_url.startswith(("http://", "https://")):
150
- return "Invalid image URL"
151
- img_url = img_url.replace("0.0.0.0", "10.123.124.92")
152
- instruction = query[:url_idx]
153
- # This should be some internal IP to wherever the server runs
154
- job = {"image_path": img_url, "instruction": instruction}
155
- response = requests.post(endpoint, json=job)
156
- if response.status_code != 200:
157
- return "Could not finish the task try again later!"
158
- return "Here is the edited image " + endpoint + response.json()["edited_image"]
159
-
160
- # these tools should not step on each other's toes
161
- tools = [
162
- Tool(
163
- name="PAL-MATH",
164
- func=PALChain.from_math_prompt(llm).run,
165
- description=(
166
- "A wrapper around calculator. "
167
- "A language model that is really good at solving complex word math problems."
168
- "Input should be a fully worded hard word math problem."
169
- )
170
- ),
171
- Tool(
172
- name = "Image Understanding",
173
- func=imun.run,
174
- description=(
175
- "A wrapper around Image Understanding. "
176
- "Useful for when you need to understand what is inside an image (objects, texts, people)."
177
- "Input should be an image url, or path to an image file (e.g. .jpg, .png)."
178
- )
179
- ),
180
- Tool(
181
- name = "OCR Understanding",
182
- func=imun_read.run,
183
- description=(
184
- "A wrapper around OCR Understanding (Optical Character Recognition). "
185
- "Useful after Image Understanding tool has found text or handwriting is present in the image tags."
186
- "This tool can find the actual text, written name, or product name in the image."
187
- "Input should be an image url, or path to an image file (e.g. .jpg, .png)."
188
- )
189
- ),
190
- Tool(
191
- name = "Receipt Understanding",
192
- func=imun_receipt.run,
193
- description=(
194
- "A wrapper receipt understanding. "
195
- "Useful after Image Understanding tool has recognized a receipt in the image tags."
196
- "This tool can find the actual receipt text, prices and detailed items."
197
- "Input should be an image url, or path to an image file (e.g. .jpg, .png)."
198
- )
199
- ),
200
- Tool(
201
- name = "Business Card Understanding",
202
- func=imun_businesscard.run,
203
- description=(
204
- "A wrapper around business card understanding. "
205
- "Useful after Image Understanding tool has recognized businesscard in the image tags."
206
- "This tool can find the actual business card text, name, address, email, website on the card."
207
- "Input should be an image url, or path to an image file (e.g. .jpg, .png)."
208
- )
209
- ),
210
- Tool(
211
- name = "Layout Understanding",
212
- func=imun_layout.run,
213
- description=(
214
- "A wrapper around layout and table understanding. "
215
- "Useful after Image Understanding tool has recognized businesscard in the image tags."
216
- "This tool can find the actual business card text, name, address, email, website on the card."
217
- "Input should be an image url, or path to an image file (e.g. .jpg, .png)."
218
- )
219
- ),
220
- Tool(
221
- name = "Celebrity Understanding",
222
- func=imun_celeb.run,
223
- description=(
224
- "A wrapper around celebrity understanding. "
225
- "Useful after Image Understanding tool has recognized people in the image tags that could be celebrities."
226
- "This tool can find the name of celebrities in the image."
227
- "Input should be an image url, or path to an image file (e.g. .jpg, .png)."
228
- )
229
- ),
230
- BingSearchRun(api_wrapper=bing),
231
- Tool(
232
- name = "Photo Editing",
233
- func=edit_photo,
234
- description=(
235
- "A wrapper around photo editing. "
236
- "Useful to edit an image with a given instruction."
237
- "Input should be an image url, or path to an image file (e.g. .jpg, .png)."
238
- )
239
- ),
240
- ]
241
-
242
- # chain = initialize_agent(tools, llm, agent="conversational-react-description", verbose=True, memory=memory)
243
- # chain = initialize_agent(tools, llm, agent="conversational-assistant", verbose=True, memory=memory, return_intermediate_steps=True)
244
- chain = initialize_agent(tools, llm, agent="conversational-assistant", verbose=True, memory=memory, return_intermediate_steps=True, max_iterations=4)
245
- print("langchain reloaded")
246
- history = []
247
- history.append(("Show me what you got!", "Hi Human, I am ready to serve!"))
248
- return history, history, chain
249
-
250
-
251
- def run_chain(chain, inp):
252
- # global chain
253
-
254
- output = ""
255
- try:
256
- output = chain.conversation(input=inp, keep_short=ARGS.noIntermediateConv)
257
- # output = chain.run(input=inp)
258
- except AuthenticationError as ae:
259
- output = AUTH_ERR_MSG + str(datetime.datetime.now()) + ". " + str(ae)
260
- print("output", output)
261
- except RateLimitError as rle:
262
- output = "\n\nRateLimitError: " + str(rle)
263
- except ValueError as ve:
264
- output = "\n\nValueError: " + str(ve)
265
- except InvalidRequestError as ire:
266
- output = "\n\nInvalidRequestError: " + str(ire)
267
- except Exception as e:
268
- output = "\n\n" + BUG_FOUND_MSG + ":\n\n" + str(e)
269
-
270
- return output
271
-
272
-
273
- class ChatWrapper:
274
-
275
- def __init__(self):
276
- self.lock = Lock()
277
-
278
- def __call__(
279
- self, inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain]
280
- ):
281
- """Execute the chat functionality."""
282
- self.lock.acquire()
283
- try:
284
- print("\n==== date/time: " + str(datetime.datetime.now()) + " ====")
285
- print("inp: " + inp)
286
- history = history or []
287
- # If chain is None, that is because no API key was provided.
288
- output = "Please paste your OpenAI key from openai.com to use this app. " + str(datetime.datetime.now())
289
-
290
- ########################
291
- # multi line
292
- outputs = run_chain(chain, inp)
293
-
294
- outputs = process_chain_output(outputs)
295
-
296
- print (" len(outputs) {}".format(len(outputs)))
297
- for i, output in enumerate(outputs):
298
- if i==0:
299
- history.append((inp, output))
300
- else:
301
- history.append((None, output))
302
-
303
-
304
- except Exception as e:
305
- raise e
306
- finally:
307
- self.lock.release()
308
-
309
- print (history)
310
- return history, history, ""
311
-
312
- # upload image
313
- def add_image(state, chain, image):
314
- global IMAGE_COUNT
315
- global ARGS
316
- IMAGE_COUNT = IMAGE_COUNT + 1
317
- state = state or []
318
-
319
- # cap_onnx = get_caption_onnx_api(image.name)
320
- # cap_onnx = "The image shows " + cap_onnx
321
- # state = state + [(f"![](/file={image.name})", cap_onnx)]
322
-
323
- # : f"Image {N} http://0.0.0.0:7860/file={image.name}"
324
- # Image_N
325
- # wget http://0.0.0.0:7860/file=/tmp/bananabdzk2eqi.jpg
326
- # url_input_for_chain = "Image_{} http://0.0.0.0:7860/file={}".format(IMAGE_COUNT, image.name)
327
- url_input_for_chain = "http://0.0.0.0:{}/file={}".format(ARGS.port, image.name)
328
-
329
- # !!!!!! quick HACK to refer to image in this server for image editing pruprose
330
- url_input_for_chain = url_input_for_chain.replace("0.0.0.0", "10.123.124.92")
331
-
332
-
333
- ########################
334
- # multi line
335
- outputs = run_chain(chain, url_input_for_chain)
336
-
337
- outputs = process_chain_output(outputs)
338
-
339
- print (" len(outputs) {}".format(len(outputs)))
340
- for i, output in enumerate(outputs):
341
- if i==0:
342
- # state.append((f"![](/file={image.name})", output))
343
- state.append(((image.name,), output))
344
- else:
345
- state.append((None, output))
346
-
347
-
348
-
349
- print (state)
350
- return state, state
351
-
352
- def replace_with_image_markup(text):
353
- img_url = None
354
- text= text.strip()
355
- url_idx = text.rfind(" ")
356
- img_url = text[url_idx + 1:].strip()
357
- if img_url.endswith((".", "?")):
358
- img_url = img_url[:-1]
359
-
360
- # if img_url is not None:
361
- # img_url = f"![](/file={img_url})"
362
- return img_url
363
-
364
- def process_chain_output(outputs):
365
- global ARGS
366
- # print("outputs {}".format(outputs))
367
- if isinstance(outputs, str): # single line output
368
- outputs = [outputs]
369
- elif isinstance(outputs, list): # multi line output
370
- if ARGS.noIntermediateConv: # remove the items with assistant in it.
371
- cleanOutputs = []
372
- for output in outputs:
373
- # print("inside loop outputs {}".format(output))
374
- # found an edited image url to embed
375
- img_url = None
376
- # print ("type list: {}".format(output))
377
- if "assistant: here is the edited image " in output.lower():
378
- img_url = replace_with_image_markup(output)
379
- cleanOutputs.append("Assistant: Here is the edited image")
380
- if img_url is not None:
381
- cleanOutputs.append((img_url,))
382
- cleanOutputs.append(output)
383
- # cleanOutputs = cleanOutputs + output+ "."
384
- outputs = cleanOutputs
385
-
386
- # make it bold
387
- # outputs = "<b>{}</b>".format(outputs)
388
- return outputs
389
-
390
-
391
- def init_and_kick_off():
392
- global ARGS
393
- # initalize chatWrapper
394
- chat = ChatWrapper()
395
-
396
- # with gr.Blocks(css=".gradio-container {background-color: lightgray}") as block:
397
- # with gr.Blocks(css="#resetbtn {background-color: #4CAF50; color: red;} #chatbot {height: 700px; overflow: auto;}") as block:
398
- with gr.Blocks() as block:
399
- llm_state = gr.State()
400
- history_state = gr.State()
401
- chain_state = gr.State()
402
-
403
-
404
-
405
- reset_btn = gr.Button(value="!!!CLICK to wake up the AI!!!", variant="secondary", elem_id="resetbtn").style(full_width=False)
406
-
407
- with gr.Row():
408
- chatbot = gr.Chatbot(elem_id="chatbot").style(height=620)
409
-
410
- with gr.Row():
411
- with gr.Column(scale=0.75):
412
- message = gr.Textbox(label="What's on your mind??",
413
- placeholder="What's the answer to life, the universe, and everything?",
414
- lines=1)
415
- with gr.Column(scale=0.15):
416
- submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
417
- with gr.Column(scale=0.10, min_width=0):
418
- btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
419
-
420
- with gr.Row():
421
- with gr.Column(scale=0.90):
422
- gr.HTML("""
423
- <p>This application, developed by Cognitive Service Team Microsoft, demonstrates all cognitive service APIs in a conversational agent
424
- </p>""")
425
- # with gr.Column(scale=0.10):
426
- # reset_btn = gr.Button(value="Initiate Chat", variant="secondary", elem_id="resetbtn").style(full_width=False)
427
-
428
- message.submit(chat, inputs=[message, history_state, chain_state],
429
- outputs=[chatbot, history_state, message])
430
-
431
- submit.click(chat, inputs=[message, history_state, chain_state],
432
- outputs=[chatbot, history_state, message])
433
-
434
- btn.upload(add_image, inputs=[history_state, chain_state, btn], outputs=[history_state, chatbot])
435
- # reset_btn.click(reset_memory, inputs=[history_state], outputs=[chatbot, history_state])
436
-
437
- # openai_api_key_textbox.change(set_openai_api_key,
438
- # inputs=[openai_api_key_textbox],
439
- # outputs=[chain_state])
440
- # load the chain
441
- reset_btn.click(load_chain, inputs=[history_state], outputs=[chatbot, history_state, chain_state])
442
-
443
-
444
-
445
- # # load the chain
446
- # load_chain()
447
-
448
- # launch the app
449
- block.launch(server_name="0.0.0.0", server_port = ARGS.port)
450
-
451
- if __name__ == '__main__':
452
- parser = argparse.ArgumentParser()
453
-
454
- parser.add_argument('--port', type=int, required=False, default=7860)
455
- parser.add_argument('--openAIModel', type=str, required=False, default='openAIGPT35')
456
- parser.add_argument('--noIntermediateConv', default=False, action='store_true', help='if this flag is turned on no intermediate conversation should be shown')
457
-
458
- global ARGS
459
- ARGS = parser.parse_args()
460
-
461
- init_and_kick_off()
462
-
463
- # python app_test.py --port 7862 --openAIModel 'azureChatGPT'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/Chat-GPT-LangChain/audios/tempfile.mp3 DELETED
Binary file (785 kB)
 
AllInOneApp/Chat-GPT-LangChain/azure_utils.py DELETED
@@ -1,155 +0,0 @@
1
- # This class stores Azure voice data. Specifically, the class stores several records containing
2
- # language, lang_code, gender, voice_id and engine. The class also has a method to return the
3
- # voice_id, lang_code and engine given a language and gender.
4
-
5
- NEURAL_ENGINE = "neural"
6
- STANDARD_ENGINE = "standard"
7
-
8
-
9
- class AzureVoiceData:
10
- def get_voice(self, language, gender):
11
- for voice in self.voice_data:
12
- if voice['language'] == language and voice['gender'] == gender:
13
- return voice['azure_voice']
14
- return None
15
-
16
- def __init__(self):
17
- self.voice_data = [
18
- {'language': 'Arabic',
19
- 'azure_voice': 'ar-EG-ShakirNeural',
20
- 'gender': 'Male'},
21
- {'language': 'Arabic (Gulf)',
22
- 'azure_voice': 'ar-KW-FahedNeural',
23
- 'gender': 'Male'},
24
- {'language': 'Catalan',
25
- 'azure_voice': 'ca-ES-EnricNeural',
26
- 'gender': 'Male'},
27
- {'language': 'Chinese (Cantonese)',
28
- 'azure_voice': 'yue-CN-YunSongNeural',
29
- 'gender': 'Male'},
30
- {'language': 'Chinese (Mandarin)',
31
- 'azure_voice': 'zh-CN-YunxiNeural',
32
- 'gender': 'Male'},
33
- {'language': 'Danish',
34
- 'azure_voice': 'da-DK-JeppeNeural',
35
- 'gender': 'Male'},
36
- {'language': 'Dutch',
37
- 'azure_voice': 'nl-NL-MaartenNeural',
38
- 'gender': 'Male'},
39
- {'language': 'English (Australian)',
40
- 'azure_voice': 'en-AU-KenNeural',
41
- 'gender': 'Male'},
42
- {'language': 'English (British)',
43
- 'azure_voice': 'en-GB-RyanNeural',
44
- 'gender': 'Male'},
45
- {'language': 'English (Indian)',
46
- 'azure_voice': 'en-IN-PrabhatNeural',
47
- 'gender': 'Male'},
48
- {'language': 'English (New Zealand)',
49
- 'azure_voice': 'en-NZ-MitchellNeural',
50
- 'gender': 'Male'},
51
- {'language': 'English (South African)',
52
- 'azure_voice': 'en-ZA-LukeNeural',
53
- 'gender': 'Male'},
54
- {'language': 'English (US)',
55
- 'azure_voice': 'en-US-ChristopherNeural',
56
- 'gender': 'Male'},
57
- {'language': 'English (Welsh)',
58
- 'azure_voice': 'cy-GB-AledNeural',
59
- 'gender': 'Male'},
60
- {'language': 'Finnish',
61
- 'azure_voice': 'fi-FI-HarriNeural',
62
- 'gender': 'Male'},
63
- {'language': 'French',
64
- 'azure_voice': 'fr-FR-HenriNeural',
65
- 'gender': 'Male'},
66
- {'language': 'French (Canadian)',
67
- 'azure_voice': 'fr-CA-AntoineNeural',
68
- 'gender': 'Male'},
69
- {'language': 'German',
70
- 'azure_voice': 'de-DE-KlausNeural',
71
- 'gender': 'Male'},
72
- {'language': 'German (Austrian)',
73
- 'azure_voice': 'de-AT-JonasNeural',
74
- 'gender': 'Male'},
75
- {'language': 'Hindi',
76
- 'azure_voice': 'hi-IN-MadhurNeural',
77
- 'gender': 'Male'},
78
- {'language': 'Icelandic',
79
- 'azure_voice': 'is-IS-GunnarNeural',
80
- 'gender': 'Male'},
81
- {'language': 'Italian',
82
- 'azure_voice': 'it-IT-GianniNeural',
83
- 'gender': 'Male'},
84
- {'language': 'Japanese',
85
- 'azure_voice': 'ja-JP-KeitaNeural',
86
- 'gender': 'Male'},
87
- {'language': 'Korean',
88
- 'azure_voice': 'ko-KR-GookMinNeural',
89
- 'gender': 'Male'},
90
- {'language': 'Norwegian',
91
- 'azure_voice': 'nb-NO-FinnNeural',
92
- 'gender': 'Male'},
93
- {'language': 'Polish',
94
- 'azure_voice': 'pl-PL-MarekNeural',
95
- 'gender': 'Male'},
96
- {'language': 'Portuguese (Brazilian)',
97
- 'azure_voice': 'pt-BR-NicolauNeural',
98
- 'gender': 'Male'},
99
- {'language': 'Portuguese (European)',
100
- 'azure_voice': 'pt-PT-DuarteNeural',
101
- 'gender': 'Male'},
102
- {'language': 'Romanian',
103
- 'azure_voice': 'ro-RO-EmilNeural',
104
- 'gender': 'Male'},
105
- {'language': 'Russian',
106
- 'azure_voice': 'ru-RU-DmitryNeural',
107
- 'gender': 'Male'},
108
- {'language': 'Spanish (European)',
109
- 'azure_voice': 'es-ES-TeoNeural',
110
- 'gender': 'Male'},
111
- {'language': 'Spanish (Mexican)',
112
- 'azure_voice': 'es-MX-LibertoNeural',
113
- 'gender': 'Male'},
114
- {'language': 'Spanish (US)',
115
- 'azure_voice': 'es-US-AlonsoNeural"',
116
- 'gender': 'Male'},
117
- {'language': 'Swedish',
118
- 'azure_voice': 'sv-SE-MattiasNeural',
119
- 'gender': 'Male'},
120
- {'language': 'Turkish',
121
- 'azure_voice': 'tr-TR-AhmetNeural',
122
- 'gender': 'Male'},
123
- {'language': 'Welsh',
124
- 'azure_voice': 'cy-GB-AledNeural',
125
- 'gender': 'Male'},
126
- ]
127
-
128
-
129
- # Run from the command-line
130
- if __name__ == '__main__':
131
- azure_voice_data = AzureVoiceData()
132
-
133
- azure_voice = azure_voice_data.get_voice('English (US)', 'Male')
134
- print('English (US)', 'Male', azure_voice)
135
-
136
- azure_voice = azure_voice_data.get_voice('English (US)', 'Female')
137
- print('English (US)', 'Female', azure_voice)
138
-
139
- azure_voice = azure_voice_data.get_voice('French', 'Female')
140
- print('French', 'Female', azure_voice)
141
-
142
- azure_voice = azure_voice_data.get_voice('French', 'Male')
143
- print('French', 'Male', azure_voice)
144
-
145
- azure_voice = azure_voice_data.get_voice('Japanese', 'Female')
146
- print('Japanese', 'Female', azure_voice)
147
-
148
- azure_voice = azure_voice_data.get_voice('Japanese', 'Male')
149
- print('Japanese', 'Male', azure_voice)
150
-
151
- azure_voice = azure_voice_data.get_voice('Hindi', 'Female')
152
- print('Hindi', 'Female', azure_voice)
153
-
154
- azure_voice = azure_voice_data.get_voice('Hindi', 'Male')
155
- print('Hindi', 'Male', azure_voice)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/Chat-GPT-LangChain/polly_utils.py DELETED
@@ -1,635 +0,0 @@
1
- # This class stores Polly voice data. Specifically, the class stores several records containing
2
- # language, lang_code, gender, voice_id and engine. The class also has a method to return the
3
- # voice_id, lang_code and engine given a language and gender.
4
-
5
- NEURAL_ENGINE = "neural"
6
- STANDARD_ENGINE = "standard"
7
-
8
-
9
- class PollyVoiceData:
10
- def get_voice(self, language, gender):
11
- for voice in self.voice_data:
12
- if voice['language'] == language and voice['gender'] == gender:
13
- if voice['neural'] == 'Yes':
14
- return voice['voice_id'], voice['lang_code'], NEURAL_ENGINE
15
- for voice in self.voice_data:
16
- if voice['language'] == language and voice['gender'] == gender:
17
- if voice['standard'] == 'Yes':
18
- return voice['voice_id'], voice['lang_code'], STANDARD_ENGINE
19
- return None, None, None
20
-
21
- def get_whisper_lang_code(self, language):
22
- for voice in self.voice_data:
23
- if voice['language'] == language:
24
- return voice['whisper_lang_code']
25
- return "en"
26
-
27
- def __init__(self):
28
- self.voice_data = [
29
- {'language': 'Arabic',
30
- 'lang_code': 'arb',
31
- 'whisper_lang_code': 'ar',
32
- 'voice_id': 'Zeina',
33
- 'gender': 'Female',
34
- 'neural': 'No',
35
- 'standard': 'Yes'},
36
- {'language': 'Arabic (Gulf)',
37
- 'lang_code': 'ar-AE',
38
- 'whisper_lang_code': 'ar',
39
- 'voice_id': 'Hala',
40
- 'gender': 'Female',
41
- 'neural': 'Yes',
42
- 'standard': 'No'},
43
- {'language': 'Catalan',
44
- 'lang_code': 'ca-ES',
45
- 'whisper_lang_code': 'ca',
46
- 'voice_id': 'Arlet',
47
- 'gender': 'Female',
48
- 'neural': 'Yes',
49
- 'standard': 'No'},
50
- {'language': 'Chinese (Cantonese)',
51
- 'lang_code': 'yue-CN',
52
- 'whisper_lang_code': 'zh',
53
- 'voice_id': 'Hiujin',
54
- 'gender': 'Female',
55
- 'neural': 'Yes',
56
- 'standard': 'No'},
57
- {'language': 'Chinese (Mandarin)',
58
- 'lang_code': 'cmn-CN',
59
- 'whisper_lang_code': 'zh',
60
- 'voice_id': 'Zhiyu',
61
- 'gender': 'Female',
62
- 'neural': 'Yes',
63
- 'standard': 'No'},
64
- {'language': 'Danish',
65
- 'lang_code': 'da-DK',
66
- 'whisper_lang_code': 'da',
67
- 'voice_id': 'Naja',
68
- 'gender': 'Female',
69
- 'neural': 'No',
70
- 'standard': 'Yes'},
71
- {'language': 'Danish',
72
- 'lang_code': 'da-DK',
73
- 'whisper_lang_code': 'da',
74
- 'voice_id': 'Mads',
75
- 'gender': 'Male',
76
- 'neural': 'No',
77
- 'standard': 'Yes'},
78
- {'language': 'Dutch',
79
- 'lang_code': 'nl-NL',
80
- 'whisper_lang_code': 'nl',
81
- 'voice_id': 'Laura',
82
- 'gender': 'Female',
83
- 'neural': 'Yes',
84
- 'standard': 'No'},
85
- {'language': 'Dutch',
86
- 'lang_code': 'nl-NL',
87
- 'whisper_lang_code': 'nl',
88
- 'voice_id': 'Lotte',
89
- 'gender': 'Female',
90
- 'neural': 'No',
91
- 'standard': 'Yes'},
92
- {'language': 'Dutch',
93
- 'lang_code': 'nl-NL',
94
- 'whisper_lang_code': 'nl',
95
- 'voice_id': 'Ruben',
96
- 'gender': 'Male',
97
- 'neural': 'No',
98
- 'standard': 'Yes'},
99
- {'language': 'English (Australian)',
100
- 'lang_code': 'en-AU',
101
- 'whisper_lang_code': 'en',
102
- 'voice_id': 'Nicole',
103
- 'gender': 'Female',
104
- 'neural': 'No',
105
- 'standard': 'Yes'},
106
- {'language': 'English (Australian)',
107
- 'lang_code': 'en-AU',
108
- 'whisper_lang_code': 'en',
109
- 'voice_id': 'Olivia',
110
- 'gender': 'Female',
111
- 'neural': 'Yes',
112
- 'standard': 'No'},
113
- {'language': 'English (Australian)',
114
- 'lang_code': 'en-AU',
115
- 'whisper_lang_code': 'en',
116
- 'voice_id': 'Russell',
117
- 'gender': 'Male',
118
- 'neural': 'No',
119
- 'standard': 'Yes'},
120
- {'language': 'English (British)',
121
- 'lang_code': 'en-GB',
122
- 'whisper_lang_code': 'en',
123
- 'voice_id': 'Amy',
124
- 'gender': 'Female',
125
- 'neural': 'Yes',
126
- 'standard': 'Yes'},
127
- {'language': 'English (British)',
128
- 'lang_code': 'en-GB',
129
- 'whisper_lang_code': 'en',
130
- 'voice_id': 'Emma',
131
- 'gender': 'Female',
132
- 'neural': 'Yes',
133
- 'standard': 'Yes'},
134
- {'language': 'English (British)',
135
- 'lang_code': 'en-GB',
136
- 'whisper_lang_code': 'en',
137
- 'voice_id': 'Brian',
138
- 'gender': 'Male',
139
- 'neural': 'Yes',
140
- 'standard': 'Yes'},
141
- {'language': 'English (British)',
142
- 'lang_code': 'en-GB',
143
- 'whisper_lang_code': 'en',
144
- 'voice_id': 'Arthur',
145
- 'gender': 'Male',
146
- 'neural': 'Yes',
147
- 'standard': 'No'},
148
- {'language': 'English (Indian)',
149
- 'lang_code': 'en-IN',
150
- 'whisper_lang_code': 'en',
151
- 'voice_id': 'Aditi',
152
- 'gender': 'Female',
153
- 'neural': 'No',
154
- 'standard': 'Yes'},
155
- {'language': 'English (Indian)',
156
- 'lang_code': 'en-IN',
157
- 'whisper_lang_code': 'en',
158
- 'voice_id': 'Raveena',
159
- 'gender': 'Female',
160
- 'neural': 'No',
161
- 'standard': 'Yes'},
162
- {'language': 'English (Indian)',
163
- 'lang_code': 'en-IN',
164
- 'whisper_lang_code': 'en',
165
- 'voice_id': 'Kajal',
166
- 'gender': 'Female',
167
- 'neural': 'Yes',
168
- 'standard': 'No'},
169
- {'language': 'English (New Zealand)',
170
- 'lang_code': 'en-NZ',
171
- 'whisper_lang_code': 'en',
172
- 'voice_id': 'Aria',
173
- 'gender': 'Female',
174
- 'neural': 'Yes',
175
- 'standard': 'No'},
176
- {'language': 'English (South African)',
177
- 'lang_code': 'en-ZA',
178
- 'whisper_lang_code': 'en',
179
- 'voice_id': 'Ayanda',
180
- 'gender': 'Female',
181
- 'neural': 'Yes',
182
- 'standard': 'No'},
183
- {'language': 'English (US)',
184
- 'lang_code': 'en-US',
185
- 'whisper_lang_code': 'en',
186
- 'voice_id': 'Ivy',
187
- 'gender': 'Female (child)',
188
- 'neural': 'Yes',
189
- 'standard': 'Yes'},
190
- {'language': 'English (US)',
191
- 'lang_code': 'en-US',
192
- 'whisper_lang_code': 'en',
193
- 'voice_id': 'Joanna',
194
- 'gender': 'Female',
195
- 'neural': 'Yes',
196
- 'standard': 'Yes'},
197
- {'language': 'English (US)',
198
- 'lang_code': 'en-US',
199
- 'whisper_lang_code': 'en',
200
- 'voice_id': 'Kendra',
201
- 'gender': 'Female',
202
- 'neural': 'Yes',
203
- 'standard': 'Yes'},
204
- {'language': 'English (US)',
205
- 'lang_code': 'en-US',
206
- 'whisper_lang_code': 'en',
207
- 'voice_id': 'Kimberly',
208
- 'gender': 'Female',
209
- 'neural': 'Yes',
210
- 'standard': 'Yes'},
211
- {'language': 'English (US)',
212
- 'lang_code': 'en-US',
213
- 'whisper_lang_code': 'en',
214
- 'voice_id': 'Salli',
215
- 'gender': 'Female',
216
- 'neural': 'Yes',
217
- 'standard': 'Yes'},
218
- {'language': 'English (US)',
219
- 'lang_code': 'en-US',
220
- 'whisper_lang_code': 'en',
221
- 'voice_id': 'Joey',
222
- 'gender': 'Male',
223
- 'neural': 'Yes',
224
- 'standard': 'Yes'},
225
- {'language': 'English (US)',
226
- 'lang_code': 'en-US',
227
- 'whisper_lang_code': 'en',
228
- 'voice_id': 'Justin',
229
- 'gender': 'Male (child)',
230
- 'neural': 'Yes',
231
- 'standard': 'Yes'},
232
- {'language': 'English (US)',
233
- 'lang_code': 'en-US',
234
- 'whisper_lang_code': 'en',
235
- 'voice_id': 'Kevin',
236
- 'gender': 'Male (child)',
237
- 'neural': 'Yes',
238
- 'standard': 'No'},
239
- {'language': 'English (US)',
240
- 'lang_code': 'en-US',
241
- 'whisper_lang_code': 'en',
242
- 'voice_id': 'Matthew',
243
- 'gender': 'Male',
244
- 'neural': 'Yes',
245
- 'standard': 'Yes'},
246
- {'language': 'English (Welsh)',
247
- 'lang_code': 'en-GB-WLS',
248
- 'whisper_lang_code': 'en',
249
- 'voice_id': 'Geraint',
250
- 'gender': 'Male',
251
- 'neural': 'No',
252
- 'standard': 'Yes'},
253
- {'language': 'Finnish',
254
- 'lang_code': 'fi-FI',
255
- 'whisper_lang_code': 'fi',
256
- 'voice_id': 'Suvi',
257
- 'gender': 'Female',
258
- 'neural': 'Yes',
259
- 'standard': 'No'},
260
- {'language': 'French',
261
- 'lang_code': 'fr-FR',
262
- 'whisper_lang_code': 'fr',
263
- 'voice_id': 'Celine',
264
- 'gender': 'Female',
265
- 'neural': 'No',
266
- 'standard': 'Yes'},
267
- {'language': 'French',
268
- 'lang_code': 'fr-FR',
269
- 'whisper_lang_code': 'fr',
270
- 'voice_id': 'Lea',
271
- 'gender': 'Female',
272
- 'neural': 'Yes',
273
- 'standard': 'Yes'},
274
- {'language': 'French',
275
- 'lang_code': 'fr-FR',
276
- 'whisper_lang_code': 'fr',
277
- 'voice_id': 'Mathieu',
278
- 'gender': 'Male',
279
- 'neural': 'No',
280
- 'standard': 'Yes'},
281
- {'language': 'French (Canadian)',
282
- 'lang_code': 'fr-CA',
283
- 'whisper_lang_code': 'fr',
284
- 'voice_id': 'Chantal',
285
- 'gender': 'Female',
286
- 'neural': 'No',
287
- 'standard': 'Yes'},
288
- {'language': 'French (Canadian)',
289
- 'lang_code': 'fr-CA',
290
- 'whisper_lang_code': 'fr',
291
- 'voice_id': 'Gabrielle',
292
- 'gender': 'Female',
293
- 'neural': 'Yes',
294
- 'standard': 'No'},
295
- {'language': 'French (Canadian)',
296
- 'lang_code': 'fr-CA',
297
- 'whisper_lang_code': 'fr',
298
- 'voice_id': 'Liam',
299
- 'gender': 'Male',
300
- 'neural': 'Yes',
301
- 'standard': 'No'},
302
- {'language': 'German',
303
- 'lang_code': 'de-DE',
304
- 'whisper_lang_code': 'de',
305
- 'voice_id': 'Marlene',
306
- 'gender': 'Female',
307
- 'neural': 'No',
308
- 'standard': 'Yes'},
309
- {'language': 'German',
310
- 'lang_code': 'de-DE',
311
- 'whisper_lang_code': 'de',
312
- 'voice_id': 'Vicki',
313
- 'gender': 'Female',
314
- 'neural': 'Yes',
315
- 'standard': 'Yes'},
316
- {'language': 'German',
317
- 'lang_code': 'de-DE',
318
- 'whisper_lang_code': 'de',
319
- 'voice_id': 'Hans',
320
- 'gender': 'Male',
321
- 'neural': 'No',
322
- 'standard': 'Yes'},
323
- {'language': 'German',
324
- 'lang_code': 'de-DE',
325
- 'whisper_lang_code': 'de',
326
- 'voice_id': 'Daniel',
327
- 'gender': 'Male',
328
- 'neural': 'Yes',
329
- 'standard': 'No'},
330
- {'language': 'German (Austrian)',
331
- 'lang_code': 'de-AT',
332
- 'whisper_lang_code': 'de',
333
- 'voice_id': 'Hannah',
334
- 'gender': 'Female',
335
- 'neural': 'Yes',
336
- 'standard': 'No'},
337
- {'language': 'Hindi',
338
- 'lang_code': 'hi-IN',
339
- 'whisper_lang_code': 'hi',
340
- 'voice_id': 'Aditi',
341
- 'gender': 'Female',
342
- 'neural': 'No',
343
- 'standard': 'Yes'},
344
- {'language': 'Hindi',
345
- 'lang_code': 'hi-IN',
346
- 'whisper_lang_code': 'hi',
347
- 'voice_id': 'Kajal',
348
- 'gender': 'Female',
349
- 'neural': 'Yes',
350
- 'standard': 'No'},
351
- {'language': 'Icelandic',
352
- 'lang_code': 'is-IS',
353
- 'whisper_lang_code': 'is',
354
- 'voice_id': 'Dora',
355
- 'gender': 'Female',
356
- 'neural': 'No',
357
- 'standard': 'Yes'},
358
- {'language': 'Icelandic',
359
- 'lang_code': 'is-IS',
360
- 'whisper_lang_code': 'is',
361
- 'voice_id': 'Karl',
362
- 'gender': 'Male',
363
- 'neural': 'No',
364
- 'standard': 'Yes'},
365
- {'language': 'Italian',
366
- 'lang_code': 'it-IT',
367
- 'whisper_lang_code': 'it',
368
- 'voice_id': 'Carla',
369
- 'gender': 'Female',
370
- 'neural': 'No',
371
- 'standard': 'Yes'},
372
- {'language': 'Italian',
373
- 'lang_code': 'it-IT',
374
- 'whisper_lang_code': 'it',
375
- 'voice_id': 'Bianca',
376
- 'gender': 'Female',
377
- 'neural': 'Yes',
378
- 'standard': 'Yes'},
379
- {'language': 'Japanese',
380
- 'lang_code': 'ja-JP',
381
- 'whisper_lang_code': 'ja',
382
- 'voice_id': 'Mizuki',
383
- 'gender': 'Female',
384
- 'neural': 'No',
385
- 'standard': 'Yes'},
386
- {'language': 'Japanese',
387
- 'lang_code': 'ja-JP',
388
- 'whisper_lang_code': 'ja',
389
- 'voice_id': 'Takumi',
390
- 'gender': 'Male',
391
- 'neural': 'Yes',
392
- 'standard': 'Yes'},
393
- {'language': 'Korean',
394
- 'lang_code': 'ko-KR',
395
- 'whisper_lang_code': 'ko',
396
- 'voice_id': 'Seoyeon',
397
- 'gender': 'Female',
398
- 'neural': 'Yes',
399
- 'standard': 'Yes'},
400
- {'language': 'Norwegian',
401
- 'lang_code': 'nb-NO',
402
- 'whisper_lang_code': 'no',
403
- 'voice_id': 'Liv',
404
- 'gender': 'Female',
405
- 'neural': 'No',
406
- 'standard': 'Yes'},
407
- {'language': 'Norwegian',
408
- 'lang_code': 'nb-NO',
409
- 'whisper_lang_code': 'no',
410
- 'voice_id': 'Ida',
411
- 'gender': 'Female',
412
- 'neural': 'Yes',
413
- 'standard': 'No'},
414
- {'language': 'Polish',
415
- 'lang_code': 'pl-PL',
416
- 'whisper_lang_code': 'pl',
417
- 'voice_id': 'Ewa',
418
- 'gender': 'Female',
419
- 'neural': 'No',
420
- 'standard': 'Yes'},
421
- {'language': 'Polish',
422
- 'lang_code': 'pl-PL',
423
- 'whisper_lang_code': 'pl',
424
- 'voice_id': 'Maja',
425
- 'gender': 'Female',
426
- 'neural': 'No',
427
- 'standard': 'Yes'},
428
- {'language': 'Polish',
429
- 'lang_code': 'pl-PL',
430
- 'whisper_lang_code': 'pl',
431
- 'voice_id': 'Jacek',
432
- 'gender': 'Male',
433
- 'neural': 'No',
434
- 'standard': 'Yes'},
435
- {'language': 'Polish',
436
- 'lang_code': 'pl-PL',
437
- 'whisper_lang_code': 'pl',
438
- 'voice_id': 'Jan',
439
- 'gender': 'Male',
440
- 'neural': 'No',
441
- 'standard': 'Yes'},
442
- {'language': 'Polish',
443
- 'lang_code': 'pl-PL',
444
- 'whisper_lang_code': 'pl',
445
- 'voice_id': 'Ola',
446
- 'gender': 'Female',
447
- 'neural': 'Yes',
448
- 'standard': 'No'},
449
- {'language': 'Portuguese (Brazilian)',
450
- 'lang_code': 'pt-BR',
451
- 'whisper_lang_code': 'pt',
452
- 'voice_id': 'Camila',
453
- 'gender': 'Female',
454
- 'neural': 'Yes',
455
- 'standard': 'Yes'},
456
- {'language': 'Portuguese (Brazilian)',
457
- 'lang_code': 'pt-BR',
458
- 'whisper_lang_code': 'pt',
459
- 'voice_id': 'Vitoria',
460
- 'gender': 'Female',
461
- 'neural': 'Yes',
462
- 'standard': 'Yes'},
463
- {'language': 'Portuguese (Brazilian)',
464
- 'lang_code': 'pt-BR',
465
- 'whisper_lang_code': 'pt',
466
- 'voice_id': 'Ricardo',
467
- 'gender': 'Male',
468
- 'neural': 'No',
469
- 'standard': 'Yes'},
470
- {'language': 'Portuguese (European)',
471
- 'lang_code': 'pt-PT',
472
- 'whisper_lang_code': 'pt',
473
- 'voice_id': 'Ines',
474
- 'gender': 'Female',
475
- 'neural': 'Yes',
476
- 'standard': 'Yes'},
477
- {'language': 'Portuguese (European)',
478
- 'lang_code': 'pt-PT',
479
- 'whisper_lang_code': 'pt',
480
- 'voice_id': 'Cristiano',
481
- 'gender': 'Male',
482
- 'neural': 'No',
483
- 'standard': 'Yes'},
484
- {'language': 'Romanian',
485
- 'lang_code': 'ro-RO',
486
- 'whisper_lang_code': 'ro',
487
- 'voice_id': 'Carmen',
488
- 'gender': 'Female',
489
- 'neural': 'No',
490
- 'standard': 'Yes'},
491
- {'language': 'Russian',
492
- 'lang_code': 'ru-RU',
493
- 'whisper_lang_code': 'ru',
494
- 'voice_id': 'Tatyana',
495
- 'gender': 'Female',
496
- 'neural': 'No',
497
- 'standard': 'Yes'},
498
- {'language': 'Russian',
499
- 'lang_code': 'ru-RU',
500
- 'whisper_lang_code': 'ru',
501
- 'voice_id': 'Maxim',
502
- 'gender': 'Male',
503
- 'neural': 'No',
504
- 'standard': 'Yes'},
505
- {'language': 'Spanish (European)',
506
- 'lang_code': 'es-ES',
507
- 'whisper_lang_code': 'es',
508
- 'voice_id': 'Conchita',
509
- 'gender': 'Female',
510
- 'neural': 'No',
511
- 'standard': 'Yes'},
512
- {'language': 'Spanish (European)',
513
- 'lang_code': 'es-ES',
514
- 'whisper_lang_code': 'es',
515
- 'voice_id': 'Lucia',
516
- 'gender': 'Female',
517
- 'neural': 'Yes',
518
- 'standard': 'Yes'},
519
- {'language': 'Spanish (European)',
520
- 'lang_code': 'es-ES',
521
- 'whisper_lang_code': 'es',
522
- 'voice_id': 'Enrique',
523
- 'gender': 'Male',
524
- 'neural': 'No',
525
- 'standard': 'Yes'},
526
- {'language': 'Spanish (Mexican)',
527
- 'lang_code': 'es-MX',
528
- 'whisper_lang_code': 'es',
529
- 'voice_id': 'Mia',
530
- 'gender': 'Female',
531
- 'neural': 'Yes',
532
- 'standard': 'Yes'},
533
- {'language': 'Spanish (US)',
534
- 'lang_code': 'es-US',
535
- 'whisper_lang_code': 'es',
536
- 'voice_id': 'Lupe',
537
- 'gender': 'Female',
538
- 'neural': 'Yes',
539
- 'standard': 'Yes'},
540
- {'language': 'Spanish (US)',
541
- 'lang_code': 'es-US',
542
- 'whisper_lang_code': 'es',
543
- 'voice_id': 'Penelope',
544
- 'gender': 'Female',
545
- 'neural': 'No',
546
- 'standard': 'Yes'},
547
- {'language': 'Spanish (US)',
548
- 'lang_code': 'es-US',
549
- 'whisper_lang_code': 'es',
550
- 'voice_id': 'Miguel',
551
- 'gender': 'Male',
552
- 'neural': 'No',
553
- 'standard': 'Yes'},
554
- {'language': 'Spanish (US)',
555
- 'lang_code': 'es-US',
556
- 'whisper_lang_code': 'es',
557
- 'voice_id': 'Pedro',
558
- 'gender': 'Male',
559
- 'neural': 'Yes',
560
- 'standard': 'No'},
561
- {'language': 'Swedish',
562
- 'lang_code': 'sv-SE',
563
- 'whisper_lang_code': 'sv',
564
- 'voice_id': 'Astrid',
565
- 'gender': 'Female',
566
- 'neural': 'No',
567
- 'standard': 'Yes'},
568
- {'language': 'Swedish',
569
- 'lang_code': 'sv-SE',
570
- 'whisper_lang_code': 'sv',
571
- 'voice_id': 'Elin',
572
- 'gender': 'Female',
573
- 'neural': 'Yes',
574
- 'standard': 'No'},
575
- {'language': 'Turkish',
576
- 'lang_code': 'tr-TR',
577
- 'whisper_lang_code': 'tr',
578
- 'voice_id': 'Filiz',
579
- 'gender': 'Female',
580
- 'neural': 'No',
581
- 'standard': 'Yes'},
582
- {'language': 'Welsh',
583
- 'lang_code': 'cy-GB',
584
- 'whisper_lang_code': 'cy',
585
- 'voice_id': 'Gwyneth',
586
- 'gender': 'Female',
587
- 'neural': 'No',
588
- 'standard': 'Yes'}
589
- ]
590
-
591
-
592
- # Run from the command-line
593
- if __name__ == '__main__':
594
- polly_voice_data = PollyVoiceData()
595
-
596
- voice_id, language_code, engine = polly_voice_data.get_voice('English (US)', 'Male')
597
- print('English (US)', 'Male', voice_id, language_code, engine)
598
-
599
- voice_id, language_code, engine = polly_voice_data.get_voice('English (US)', 'Female')
600
- print('English (US)', 'Female', voice_id, language_code, engine)
601
-
602
- voice_id, language_code, engine = polly_voice_data.get_voice('French', 'Female')
603
- print('French', 'Female', voice_id, language_code, engine)
604
-
605
- voice_id, language_code, engine = polly_voice_data.get_voice('French', 'Male')
606
- print('French', 'Male', voice_id, language_code, engine)
607
-
608
- voice_id, language_code, engine = polly_voice_data.get_voice('Japanese', 'Female')
609
- print('Japanese', 'Female', voice_id, language_code, engine)
610
-
611
- voice_id, language_code, engine = polly_voice_data.get_voice('Japanese', 'Male')
612
- print('Japanese', 'Male', voice_id, language_code, engine)
613
-
614
- voice_id, language_code, engine = polly_voice_data.get_voice('Hindi', 'Female')
615
- print('Hindi', 'Female', voice_id, language_code, engine)
616
-
617
- voice_id, language_code, engine = polly_voice_data.get_voice('Hindi', 'Male')
618
- print('Hindi', 'Male', voice_id, language_code, engine)
619
-
620
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('English (US)')
621
- print('English (US) whisper_lang_code:', whisper_lang_code)
622
-
623
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('Chinese (Mandarin)')
624
- print('Chinese (Mandarin) whisper_lang_code:', whisper_lang_code)
625
-
626
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('Norwegian')
627
- print('Norwegian whisper_lang_code:', whisper_lang_code)
628
-
629
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('Dutch')
630
- print('Dutch whisper_lang_code:', whisper_lang_code)
631
-
632
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('Foo')
633
- print('Foo whisper_lang_code:', whisper_lang_code)
634
-
635
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/Chat-GPT-LangChain/videos/tempfile.mp4 DELETED
Binary file (103 kB)
 
AllInOneApp/bananabdzk2eqi.jpg.1 DELETED
Binary file (16.8 kB)
 
AllInOneApp/build/lib/langchain/langimg.py DELETED
@@ -1,37 +0,0 @@
1
- ### Set up environment variables:
2
- #
3
- # export IMUN_URL="https://cognitivewudev.azure-api.net/computervision/imageanalysis:analyze"
4
- # export IMUN_SUBSCRIPTION_KEY=a*
5
- # export OPENAI_API_KEY=sk-*
6
-
7
- from langchain import ConversationChain, LLMChain
8
-
9
- from langchain.agents import load_tools, initialize_agent
10
- from langchain.chains.conversation.memory import ConversationBufferMemory
11
- from langchain.llms import OpenAI
12
-
13
- MAX_TOKENS = 512
14
-
15
- llm = OpenAI(temperature=0, max_tokens=MAX_TOKENS)
16
-
17
- tool_names = ['pal-math', 'imun']
18
- tools = load_tools(tool_names, llm=llm)
19
-
20
- memory = ConversationBufferMemory(memory_key="chat_history")
21
-
22
- chain = initialize_agent(tools, llm, agent="conversational-react-description", verbose=True, memory=memory)
23
-
24
- text = "what is the meaning of life"
25
- output = chain.run(input=text)
26
-
27
- text = "if I have two red balls and a blue ball, with blue balls half as heavy as the red balls. How many more blue balls do I need to have equal weight blue and red balls"
28
- output = chain.run(input=text)
29
-
30
- text = "summarize what you see in this image https://upload.wikimedia.org/wikipedia/commons/thumb/a/ad/Football_in_Bloomington%2C_Indiana%2C_1996.jpg/1920px-Football_in_Bloomington%2C_Indiana%2C_1996.jpg"
31
- output = chain.run(input=text)
32
-
33
-
34
- # To run imun as a tool
35
- from langchain.utilities import ImunAPIWrapper
36
- imun = ImunAPIWrapper()
37
- print(imun.run("https://upload.wikimedia.org/wikipedia/commons/thumb/a/ad/Football_in_Bloomington%2C_Indiana%2C_1996.jpg/1920px-Football_in_Bloomington%2C_Indiana%2C_1996.jpg"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/chatbot.py DELETED
@@ -1,17 +0,0 @@
1
- import numpy as np
2
- import gradio as gr
3
-
4
- def add_text(text, state):
5
- state = state + [(text, text + "?")]
6
- return state, state
7
-
8
- with gr.Blocks(css="#chatbot {height: 600px; overflow: auto;}") as demo:
9
- chatbot = gr.Chatbot(elem_id = "chatbot")
10
- state = gr.State([])
11
-
12
- with gr.Row():
13
- txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
14
-
15
- txt.submit(add_text, [txt, state], [chatbot, state])
16
-
17
- demo.launch(server_name="0.0.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/chatbot1.py DELETED
@@ -1,40 +0,0 @@
1
- import gradio as gr
2
- import time
3
- import os
4
-
5
- qaMap = {}
6
- qaMap['']
7
-
8
- def add_text(state, text):
9
- state = state + [(text, text + "?")]
10
- return state, state
11
-
12
- def add_image(state, image):
13
- time.sleep(2)
14
- localFilePath = image.name
15
- ext = localFilePath.split(".")[-1]
16
- rest = "".join(localFilePath.split(".")[:-1])
17
- removed_hash = "".join(rest.split("_")[:-1])
18
- original_filename = removed_hash + "." + ext
19
-
20
- state = state + [(f"![](/file={image.name})", f"This is an image edited![](/file={image.name})")]
21
- return state, state
22
-
23
- # with gr.Blocks(css="#chatbot .overflow-y-scroll{height:600px}") as demo:
24
- # with gr.Blocks(css="#chatbot {height:600px}") as demo:
25
- with gr.Blocks(css="#chatbot {height: 800px; overflow: auto;}") as demo:
26
- chatbot = gr.Chatbot(elem_id="chatbot").style(height=620)
27
- state = gr.State([])
28
-
29
-
30
- with gr.Row():
31
- with gr.Column(scale=0.85):
32
- txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
33
- with gr.Column(scale=0.15, min_width=0):
34
- btn = gr.UploadButton("🖼️", file_types=["image"])
35
-
36
- txt.submit(add_text, [state, txt], [state, chatbot])
37
- txt.submit(lambda :"", None, txt)
38
- btn.upload(add_image, [state, btn], [state, chatbot])
39
-
40
- demo.launch(debug=True, server_name="0.0.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/chatbotVideoDemo.py DELETED
@@ -1,94 +0,0 @@
1
- import gradio as gr
2
- import time
3
- import os
4
-
5
- qaMap = {}
6
- qaMap['receipt1.png'] = "AI: This is an image..AI: This image is likely a receipt or ticket..AI: This is a receipt from BOARDWALK restaurant on 8/16/2019 at 15:57. The check number is 312086, table number is 100, and there are 8 guests. The server's name is Kris H? and the terminal number is 31. The items ordered are: 1 Iced Tea ($3.00), 1 Nachos ($9.00), 1 Parfait ($6.00), 1 HH Sea Rum Swiz ($7.00), 1 HH Sea Rum Swiz ($7.00), 1 HH RM Honey Bee ($7.00), 1 HH Sea Rum Swiz ($7.00), 1 HH C R Manhattan ($7.00), and 1 Glass Prosecco ($10.00).."
7
- qaMap['How much was the Parfait?'] = "AI: The Parfait was $6.00."
8
- qaMap['how much was Iced tea and parfait in total?'] = "AI: The total cost of Ice tea and parfait was $9.00.."
9
- qaMap['find the menu for this restaurant'] = "AI: This question requires Bing search..AI: This is the menu for BOARDWALK restaurant. They serve breakfast, continental breakfast, seafood brunch, and lunch. They have an all-day menu with island favorites and homemade items. They also serve a rich country and delicious lobster bisque in a bread bowl or regular bowl.."
10
- qaMap['Can you do a reservation?'] = "AI: This question requires Bing search..AI: You can make a reservation at BOARDWALK restaurant by calling (714) 845-8444. You can also make a reservation on OpenTable. BOARDWALK restaurant is open from 7:00 am to 10:00 pm from Monday to Thursday and Sunday, and from 7:00 am to 11:00 pm on Friday and Saturday. The dining style is casual elegant and the cuisine is unspecified.."
11
- qaMap['face4.jpg'] = "AI: This is an image..AI: There are faces detected in this image, let me find if I know the person..AI: This image contains Emilia Clarke, Margot Robbie, Angelina Jolie, Emma Watson, Anne Hathaway, Megan Fox, Emma Stone, Jennifer Lawrence, Gal Gadot, Scarlett Johansson. They are posing for a picture.."
12
- qaMap['Who is in the top left corner of the image?'] = "AI: The person in the top left corner of the image is Angelina Jolie."
13
- qaMap['What is her most popular movie?'] = "AI: This question requires Bing search..AI: Angelina Jolie's most popular movie is Girl, Interrupted. It is the movie that garnered her first Academy Award win for acting.."
14
- qaMap['Where can I watch this movie?'] = "AI: This question requires Bing search..AI: You can watch Girl, Interrupted on Netflix.."
15
- qaMap['Can you share some link to watch?'] = "AI: This question requires Bing search..AI: You can watch Girl, Interrupted on Netflix by following this link: https://www.netflix.com/title/60024077."
16
- qaMap['money6.jpg'] = "AI: This is an image..AI: This image is likely a banknote or money..AI: This is a banknote of 500 in Bangladesh. The banknote number is 5 48683171. The banknote is in the Bengali language.."
17
- qaMap['Which country is it used?'] = "AI: This banknote is used in Bangladesh.."
18
- qaMap['How much is it in US dollar?'] = "AI: This question requires Bing search..AI: 500 BDT is equal to 4.67 USD at the rate on 2023-02-25. The exchange rate increased by 0.09% (by +$0.000009) compared to yesterday.."
19
- qaMap['pillbottlefront.png'] = "AI: This is an image..AI: This image is likely a pill bottle with labels..AI: This is a dietary supplement of NATURE'S BOUNTY Co Q-10 100mg for cardiovascular health. It comes in 75 Rapid Release Softgels.."
20
- qaMap['Is this medicine good for heart?'] = "AI: This is a subjective question and depends on individual health conditions and doctor's advice. It is recommended to consult a doctor before taking any new medicine.."
21
- qaMap['where can I buy it?'] = "AI: This question requires Bing search..AI: You can buy NATURE'S BOUNTY Co Q-10 100mg dietary supplement at Walgreens and Nature's Bounty store. The price is $57.64 for a pack of 2.."
22
- qaMap['blue_white_bird.jpg'] = "AI: This is an image..AI: This image is likely a blue bird on a branch..AI: This image does not contain any celebrities.."
23
- qaMap['change the color of bird\'s feathers from blue to red'] = "AI: Here is the updated image.. ![](/file=temp_pictures/xdecoder_edit_image/blue_white_bird_red.jpg)"
24
- qaMap['Furniture_Gateway_02.jpg'] = "AI: This is an image..AI: This image is likely a couch in a room..AI: This image does not contain any celebrities.."
25
- qaMap['make the sofa to one with leather'] = "AI: Here is the updated image.. ![](/file=temp_pictures/xdecoder_edit_image/Furniture_Gateway_02_result.jpg)"
26
- qaMap['zebras.jpg'] = "AI: This is an image..AI: This image is likely zebras walking in water..AI: This image does not contain any celebrities.."
27
- qaMap['change sky to Seattle skyline'] = "AI: Here is the updated image.. ![](/file=temp_pictures/xdecoder_edit_image/zebras_result.jpg)"
28
- qaMap['safari_rhino.mp4'] = "AI: This is a video. AI: This video is about a rhino with a large horn is walking down a road. Another rhino is following behind it, eating grass.. A rhino is seen walking down a road in this video. The rhino has a small horn on its head. It is seen walking on the road and eating grass on the side of the road.. A rhino walks down a dirt road. It's large horn is visible on its head.. A rhino walks down a road, with other rhinos on either side. It has a large horn on its head.. A rhino is seen walking on a dirt road in this video. It has a large horn on its head and is walking on all four legs. The rhino appears to be alone in the video.. A rhino walks down a dirt road and stops to eat grass in a field. Another rhino watches as it passes by.. A rhino is walking on the road and eating grass in a field.. A rhino is walking down a road in Africa, with another rhino walking beside it. The scenery is of green grass and trees.. A rhino is walking on the road and the other one is looking at it. The second rhino then walks next to the first one and looks at the camera. The first rhino then walks with a baby on the side of it. The second rhino walks with a small horn on the side of the road.. Rhinos on the move! These massive animals are usually peaceful, but can be dangerous if provoked.. A rhino and its baby are walking down a road in Africa. The rhino has a small horn, and the baby is holding a bird.. The rhinos are out for a stroll with one of them carrying a ball. Along the way, they meet a small bird that joins them on their journey.. "
29
- qaMap['Can you give me a shorter summary?'] = "AI: A rhino walks down a dirt road with another rhino following closely behind. The second rhino has a large horn, while the first rhino has a small horn. They both eat grass on the side of the road as they walk.. A rhino is walking down a dirt road with a large horn. The other rhino is looking at it.. A rhino is seen walking on a dirt road and eating grass in a field in this video.. A mother and child rhinoceros walk down a dirt path in Africa. The mother looks around protectively as the child playfully follows behind.. Two rhinos are walking down the road. One is carrying a small bird on its head, and the other is holding a ball. They are followed by a smaller rhino.. "
30
- qaMap['find the baby rhino in the video'] = "AI: The baby Rhino is at 2:05 in the video.. ![](/file=temp_pictures/xdecoder_edit_image/baby_rhino_appear.jpg)"
31
- qaMap[''] = ""
32
- qaMap[''] = ""
33
- qaMap[''] = ""
34
-
35
-
36
-
37
- # qaMap['blue_white_bird.jpg'] =
38
- def lookup(text):
39
- for key in qaMap.keys():
40
- if text.lower().strip() == key.lower().strip():
41
- return qaMap[key]
42
-
43
- def lookupImage(text):
44
- # remove /tmp/
45
- replaced_tmp = text.replace('/tmp/', '').lower().strip()
46
-
47
- for key in qaMap.keys():
48
- splitKey = key.lower().strip().split('.')
49
- if replaced_tmp.startswith(splitKey[0]) and replaced_tmp.endswith(splitKey[1]):
50
- return qaMap[key]
51
-
52
- def add_text(state, text):
53
- time.sleep(10)
54
- state = state + [("<b>{}</b>".format(text), "<b>{}</b>".format(lookup(text)))]
55
- print (state)
56
- # state = state + [(text, text + "?")]
57
- return state, state
58
-
59
- def add_image(state, image):
60
- time.sleep(8)
61
- # localFilePath = image.name
62
- # ext = localFilePath.split(".")[-1]
63
- # rest = ".".join(localFilePath.split(".")[:-1])
64
- # removed_hash = "_".join(rest.split("_")[:-1])
65
- # filename_only = removed_hash.split("/")[-1]
66
- # original_filename = filename_only + "." + ext
67
- if image.name.endswith('.mp4'):
68
- state = state + [(f"{image.name}", "<b>{}</b>".format(lookupImage(image.name)))]
69
- else:
70
- state = state + [(f"![](/file={image.name})", "<b>{}</b>".format(lookupImage(image.name)))]
71
- # state = state + [(f"![](/file={image.name})", f"This is an image edited![](/file={image.name})")]
72
- # state = state + [(f"![](/file={image.name})", f"<font size=\"24\">This is an image edited</font>This is an image edited<b>This is an image edited</b>![](/file={image.name})")]
73
-
74
- print (state)
75
- return state, state
76
-
77
- # with gr.Blocks(css="#chatbot .overflow-y-scroll{height:600px}") as demo:
78
- # with gr.Blocks(css="#chatbot {height:600px}") as demo:
79
- # with gr.Blocks(css="#chatbot {height: 650px; overflow: auto;}") as demo:
80
- with gr.Blocks() as demo:
81
- chatbot = gr.Chatbot(elem_id="chatbot").style(height=620)
82
- state = gr.State([])
83
-
84
- with gr.Row():
85
- with gr.Column(scale=0.85):
86
- txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
87
- with gr.Column(scale=0.15, min_width=0):
88
- btn = gr.UploadButton("🖼️", file_types=["image", "video"])
89
-
90
- txt.submit(add_text, [state, txt], [state, chatbot])
91
- txt.submit(lambda :"", None, txt)
92
- btn.upload(add_image, [state, btn], [state, chatbot])
93
-
94
- demo.launch(debug=True, server_name="0.0.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/chatbotimage.py DELETED
@@ -1,59 +0,0 @@
1
- import gradio as gr
2
- import numpy as np
3
- import json
4
- import os
5
- import requests
6
-
7
-
8
- header_key = os.environ["CVFIAHMED_KEY"]
9
-
10
- def get_caption_onnx_api(imgf):
11
-
12
- headers = {
13
- 'Content-Type': 'application/octet-stream',
14
- 'Ocp-Apim-Subscription-Key': header_key,
15
- }
16
-
17
- params = {
18
- 'features': 'description',
19
- 'model-version': 'latest',
20
- 'language': 'en',
21
- 'descriptionExclude': 'Celebrities,Landmarks',
22
- }
23
-
24
- with open(imgf, 'rb') as f:
25
- data = f.read()
26
-
27
- response = requests.post('https://cvfiahmed.cognitiveservices.azure.com/vision/v2022-07-31-preview/operations/imageanalysis:analyze', params=params, headers=headers, data=data)
28
-
29
- return json.loads(response.content)['descriptionResult']['values'][0]['text']
30
-
31
-
32
- def add_image(state, image):
33
- cap_onnx = get_caption_onnx_api(image.name)
34
- state = state + [(f"![](/file={image.name})", cap_onnx)]
35
-
36
- # print (image)
37
- # print( np.fliplr(image) )
38
- # print(state)
39
- return state, state
40
-
41
- def add_text(state, text):
42
- state = state + [(text, text + "?")]
43
- return state, state
44
-
45
- with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
46
- chatbot = gr.Chatbot(elem_id="chatbot")
47
- state = gr.State([])
48
-
49
- with gr.Row():
50
- with gr.Column(scale=0.85):
51
- txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
52
- with gr.Column(scale=0.15, min_width=0):
53
- btn = gr.UploadButton("Upload image here", file_types=["image", '.png', 'gif'])
54
-
55
- txt.submit(add_text, [state, txt], [state, chatbot])
56
- txt.submit(lambda :"", None, txt)
57
- btn.upload(add_image, [state, btn], [state, chatbot])
58
-
59
- demo.launch(share = False, server_name="0.0.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/chatbotvideoupload.py DELETED
@@ -1,40 +0,0 @@
1
- import gradio as gr
2
-
3
- def add_text(history, text):
4
- history = history + [(text, "reply")]
5
- return history, ""
6
-
7
- def add_file(history, file):
8
- print(file.name)
9
- history = history + [((file.name,), None)]
10
- return history
11
-
12
- def bot(history):
13
- response = "**That's cool!**"
14
- history[-1][1] = response
15
- return history
16
-
17
- with gr.Blocks() as demo:
18
- chatbot = gr.Chatbot([], elem_id="chatbot").style(height=750)
19
-
20
- with gr.Row():
21
- with gr.Column(scale=0.85):
22
- txt = gr.Textbox(
23
- show_label=False,
24
- placeholder="Enter text and press enter, or upload an image",
25
- ).style(container=False)
26
- with gr.Column(scale=0.15, min_width=0):
27
- btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
28
-
29
- txt.submit(add_text, [chatbot, txt], [chatbot, txt])
30
- btn.upload(add_file, [chatbot, btn], [chatbot])
31
-
32
-
33
- # txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
34
- # bot, chatbot, chatbot
35
- # )
36
- # btn.upload(add_file, [chatbot, btn], [chatbot]).then(
37
- # bot, chatbot, chatbot
38
- # )
39
-
40
- demo.launch(debug=True, server_name="0.0.0.0", server_port = 7862)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/flaskTest.py DELETED
@@ -1,9 +0,0 @@
1
- from flask import Flask
2
-
3
- app = Flask(__name__)
4
-
5
- @app.route('/')
6
- def index():
7
- return 'Web App with Python Flask!'
8
-
9
- app.run(host='0.0.0.0', port=7860)
 
 
 
 
 
 
 
 
 
 
AllInOneApp/gradioTest.py DELETED
@@ -1,40 +0,0 @@
1
- # import gradio as gr
2
-
3
- # def greet(name):
4
- # return "Hello " + name + "!"
5
-
6
- # demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
-
8
- import numpy as np
9
- import gradio as gr
10
-
11
-
12
- def flip_text(x):
13
- return x[::-1]
14
-
15
-
16
- def flip_image(x):
17
- return np.fliplr(x)
18
-
19
-
20
- with gr.Blocks() as demo:
21
- gr.Markdown("Flip text or image files using this demo.")
22
- with gr.Tab("Flip Text"):
23
- text_input = gr.Textbox()
24
- text_output = gr.Textbox()
25
- text_button = gr.Button("Flip")
26
- with gr.Tab("Flip Image"):
27
- with gr.Row():
28
- image_input = gr.Image()
29
- image_output = gr.Image()
30
- image_button = gr.Button("Flip")
31
-
32
- with gr.Accordion("Open for More!"):
33
- gr.Markdown("Look at me...")
34
-
35
- text_button.click(flip_text, inputs=text_input, outputs=text_output)
36
- image_button.click(flip_image, inputs=image_input, outputs=image_output)
37
-
38
-
39
-
40
- demo.launch(share=False, server_name="0.0.0.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/.flake8 DELETED
@@ -1,12 +0,0 @@
1
- [flake8]
2
- exclude =
3
- venv
4
- .venv
5
- __pycache__
6
- notebooks
7
- # Recommend matching the black line length (default 88),
8
- # rather than using the flake8 default of 79:
9
- max-line-length = 88
10
- extend-ignore =
11
- # See https://github.com/PyCQA/pycodestyle/issues/373
12
- E203,
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/CITATION.cff DELETED
@@ -1,8 +0,0 @@
1
- cff-version: 1.2.0
2
- message: "If you use this software, please cite it as below."
3
- authors:
4
- - family-names: "Chase"
5
- given-names: "Harrison"
6
- title: "LangChain"
7
- date-released: 2022-10-17
8
- url: "https://github.com/hwchase17/langchain"
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/CONTRIBUTING.md DELETED
@@ -1,182 +0,0 @@
1
- # Contributing to LangChain
2
-
3
- Hi there! Thank you for even being interested in contributing to LangChain.
4
- As an open source project in a rapidly developing field, we are extremely open
5
- to contributions, whether it be in the form of a new feature, improved infra, or better documentation.
6
-
7
- To contribute to this project, please follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow.
8
- Please do not try to push directly to this repo unless you are maintainer.
9
-
10
- ## 🗺️Contributing Guidelines
11
-
12
- ### 🚩GitHub Issues
13
-
14
- Our [issues](https://github.com/hwchase17/langchain/issues) page is kept up to date
15
- with bugs, improvements, and feature requests. There is a taxonomy of labels to help
16
- with sorting and discovery of issues of interest. These include:
17
-
18
- - prompts: related to prompt tooling/infra.
19
- - llms: related to LLM wrappers/tooling/infra.
20
- - chains
21
- - utilities: related to different types of utilities to integrate with (Python, SQL, etc.).
22
- - agents
23
- - memory
24
- - applications: related to example applications to build
25
-
26
- If you start working on an issue, please assign it to yourself.
27
-
28
- If you are adding an issue, please try to keep it focused on a single modular bug/improvement/feature.
29
- If the two issues are related, or blocking, please link them rather than keep them as one single one.
30
-
31
- We will try to keep these issues as up to date as possible, though
32
- with the rapid rate of develop in this field some may get out of date.
33
- If you notice this happening, please just let us know.
34
-
35
- ### 🙋Getting Help
36
-
37
- Although we try to have a developer setup to make it as easy as possible for others to contribute (see below)
38
- it is possible that some pain point may arise around environment setup, linting, documentation, or other.
39
- Should that occur, please contact a maintainer! Not only do we want to help get you unblocked,
40
- but we also want to make sure that the process is smooth for future contributors.
41
-
42
- In a similar vein, we do enforce certain linting, formatting, and documentation standards in the codebase.
43
- If you are finding these difficult (or even just annoying) to work with,
44
- feel free to contact a maintainer for help - we do not want these to get in the way of getting
45
- good code into the codebase.
46
-
47
- ### 🏭Release process
48
-
49
- As of now, LangChain has an ad hoc release process: releases are cut with high frequency via by
50
- a developer and published to [PyPI](https://pypi.org/project/langchain/).
51
-
52
- LangChain follows the [semver](https://semver.org/) versioning standard. However, as pre-1.0 software,
53
- even patch releases may contain [non-backwards-compatible changes](https://semver.org/#spec-item-4).
54
-
55
- If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)!
56
- If you have a Twitter account you would like us to mention, please let us know in the PR or in another manner.
57
-
58
- ## 🚀Quick Start
59
-
60
- This project uses [Poetry](https://python-poetry.org/) as a dependency manager. Check out Poetry's [documentation on how to install it](https://python-poetry.org/docs/#installation) on your system before proceeding.
61
-
62
- ❗Note: If you use `Conda` or `Pyenv` as your environment / package manager, avoid dependency conflicts by doing the following first:
63
- 1. *Before installing Poetry*, create and activate a new Conda env (e.g. `conda create -n langchain python=3.9`)
64
- 2. Install Poetry (see above)
65
- 3. Tell Poetry to use the virtualenv python environment (`poetry config virtualenvs.prefer-active-python true`)
66
- 4. Continue with the following steps.
67
-
68
- To install requirements:
69
-
70
- ```bash
71
- poetry install -E all
72
- ```
73
-
74
- This will install all requirements for running the package, examples, linting, formatting, tests, and coverage. Note the `-E all` flag will install all optional dependencies necessary for integration testing.
75
-
76
- Now, you should be able to run the common tasks in the following section.
77
-
78
- ## ✅Common Tasks
79
-
80
- Type `make` for a list of common tasks.
81
-
82
- ### Code Formatting
83
-
84
- Formatting for this project is done via a combination of [Black](https://black.readthedocs.io/en/stable/) and [isort](https://pycqa.github.io/isort/).
85
-
86
- To run formatting for this project:
87
-
88
- ```bash
89
- make format
90
- ```
91
-
92
- ### Linting
93
-
94
- Linting for this project is done via a combination of [Black](https://black.readthedocs.io/en/stable/), [isort](https://pycqa.github.io/isort/), [flake8](https://flake8.pycqa.org/en/latest/), and [mypy](http://mypy-lang.org/).
95
-
96
- To run linting for this project:
97
-
98
- ```bash
99
- make lint
100
- ```
101
-
102
- We recognize linting can be annoying - if you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
103
-
104
- ### Coverage
105
-
106
- Code coverage (i.e. the amount of code that is covered by unit tests) helps identify areas of the code that are potentially more or less brittle.
107
-
108
- To get a report of current coverage, run the following:
109
-
110
- ```bash
111
- make coverage
112
- ```
113
-
114
- ### Testing
115
-
116
- Unit tests cover modular logic that does not require calls to outside APIs.
117
-
118
- To run unit tests:
119
-
120
- ```bash
121
- make test
122
- ```
123
-
124
- If you add new logic, please add a unit test.
125
-
126
- Integration tests cover logic that requires making calls to outside APIs (often integration with other services).
127
-
128
- To run integration tests:
129
-
130
- ```bash
131
- make integration_tests
132
- ```
133
-
134
- If you add support for a new external API, please add a new integration test.
135
-
136
- ### Adding a Jupyter Notebook
137
-
138
- If you are adding a Jupyter notebook example, you'll want to install the optional `dev` dependencies.
139
-
140
- To install dev dependencies:
141
-
142
- ```bash
143
- poetry install --with dev
144
- ```
145
-
146
- Launch a notebook:
147
-
148
- ```bash
149
- poetry run jupyter notebook
150
- ```
151
-
152
- When you run `poetry install`, the `langchain` package is installed as editable in the virtualenv, so your new logic can be imported into the notebook.
153
-
154
- ## Documentation
155
-
156
- ### Contribute Documentation
157
-
158
- Docs are largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code.
159
-
160
- For that reason, we ask that you add good documentation to all classes and methods.
161
-
162
- Similar to linting, we recognize documentation can be annoying. If you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
163
-
164
- ### Build Documentation Locally
165
-
166
- Before building the documentation, it is always a good idea to clean the build directory:
167
-
168
- ```bash
169
- make docs_clean
170
- ```
171
-
172
- Next, you can run the linkchecker to make sure all links are valid:
173
-
174
- ```bash
175
- make docs_linkcheck
176
- ```
177
-
178
- Finally, you can build the documentation as outlined below:
179
-
180
- ```bash
181
- make docs_build
182
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/LICENSE DELETED
@@ -1,21 +0,0 @@
1
- The MIT License
2
-
3
- Copyright (c) Harrison Chase
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in
13
- all copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
- THE SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/Makefile DELETED
@@ -1,54 +0,0 @@
1
- .PHONY: all clean format lint test tests test_watch integration_tests help
2
-
3
- all: help
4
-
5
- coverage:
6
- poetry run pytest --cov \
7
- --cov-config=.coveragerc \
8
- --cov-report xml \
9
- --cov-report term-missing:skip-covered
10
-
11
- clean: docs_clean
12
-
13
- docs_build:
14
- cd docs && poetry run make html
15
-
16
- docs_clean:
17
- cd docs && poetry run make clean
18
-
19
- docs_linkcheck:
20
- poetry run linkchecker docs/_build/html/index.html
21
-
22
- format:
23
- poetry run black .
24
- poetry run isort .
25
-
26
- lint:
27
- poetry run mypy .
28
- poetry run black . --check
29
- poetry run isort . --check
30
- poetry run flake8 .
31
-
32
- test:
33
- poetry run pytest tests/unit_tests
34
-
35
- tests:
36
- poetry run pytest tests/unit_tests
37
-
38
- test_watch:
39
- poetry run ptw --now . -- tests/unit_tests
40
-
41
- integration_tests:
42
- poetry run pytest tests/integration_tests
43
-
44
- help:
45
- @echo '----'
46
- @echo 'coverage - run unit tests and generate coverage report'
47
- @echo 'docs_build - build the documentation'
48
- @echo 'docs_clean - clean the documentation build artifacts'
49
- @echo 'docs_linkcheck - run linkchecker on the documentation'
50
- @echo 'format - run code formatters'
51
- @echo 'lint - run linters'
52
- @echo 'test - run unit tests'
53
- @echo 'test_watch - run unit tests in watch mode'
54
- @echo 'integration_tests - run integration tests'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/README.md DELETED
@@ -1,82 +0,0 @@
1
- # 🦜️🔗 LangChain
2
-
3
- ⚡ Building applications with LLMs through composability ⚡
4
-
5
- [![lint](https://github.com/hwchase17/langchain/actions/workflows/lint.yml/badge.svg)](https://github.com/hwchase17/langchain/actions/workflows/lint.yml) [![test](https://github.com/hwchase17/langchain/actions/workflows/test.yml/badge.svg)](https://github.com/hwchase17/langchain/actions/workflows/test.yml) [![linkcheck](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml/badge.svg)](https://github.com/hwchase17/langchain/actions/workflows/linkcheck.yml) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS)
6
-
7
- **Production Support:** As you move your LangChains into production, we'd love to offer more comprehensive support.
8
- Please fill out [this form](https://forms.gle/57d8AmXBYp8PP8tZA) and we'll set up a dedicated support Slack channel.
9
-
10
- ## Quick Install
11
-
12
- `pip install langchain`
13
-
14
- ## 🤔 What is this?
15
-
16
- Large language models (LLMs) are emerging as a transformative technology, enabling
17
- developers to build applications that they previously could not.
18
- But using these LLMs in isolation is often not enough to
19
- create a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge.
20
-
21
- This library is aimed at assisting in the development of those types of applications. Common examples of these types of applications include:
22
-
23
- **❓ Question Answering over specific documents**
24
-
25
- - [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/question_answering.html)
26
- - End-to-end Example: [Question Answering over Notion Database](https://github.com/hwchase17/notion-qa)
27
-
28
- **💬 Chatbots**
29
-
30
- - [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/chatbots.html)
31
- - End-to-end Example: [Chat-LangChain](https://github.com/hwchase17/chat-langchain)
32
-
33
- **🤖 Agents**
34
-
35
- - [Documentation](https://langchain.readthedocs.io/en/latest/use_cases/agents.html)
36
- - End-to-end Example: [GPT+WolframAlpha](https://huggingface.co/spaces/JavaFXpert/Chat-GPT-LangChain)
37
-
38
- ## 📖 Documentation
39
-
40
- Please see [here](https://langchain.readthedocs.io/en/latest/?) for full documentation on:
41
-
42
- - Getting started (installation, setting up the environment, simple examples)
43
- - How-To examples (demos, integrations, helper functions)
44
- - Reference (full API docs)
45
- Resources (high-level explanation of core concepts)
46
-
47
- ## 🚀 What can this help with?
48
-
49
- There are six main areas that LangChain is designed to help with.
50
- These are, in increasing order of complexity:
51
-
52
- **📃 LLMs and Prompts:**
53
-
54
- This includes prompt management, prompt optimization, generic interface for all LLMs, and common utilities for working with LLMs.
55
-
56
- **🔗 Chains:**
57
-
58
- Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.
59
-
60
- **📚 Data Augmented Generation:**
61
-
62
- Data Augmented Generation involves specific types of chains that first interact with an external datasource to fetch data to use in the generation step. Examples of this include summarization of long pieces of text and question/answering over specific data sources.
63
-
64
- **🤖 Agents:**
65
-
66
- Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents.
67
-
68
- **🧠 Memory:**
69
-
70
- Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory.
71
-
72
- **🧐 Evaluation:**
73
-
74
- [BETA] Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
75
-
76
- For more information on these concepts, please see our [full documentation](https://langchain.readthedocs.io/en/latest/?).
77
-
78
- ## 💁 Contributing
79
-
80
- As an open source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infra, or better documentation.
81
-
82
- For detailed information on how to contribute, see [here](CONTRIBUTING.md).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/Makefile DELETED
@@ -1,21 +0,0 @@
1
- # Minimal makefile for Sphinx documentation
2
- #
3
-
4
- # You can set these variables from the command line, and also
5
- # from the environment for the first two.
6
- SPHINXOPTS ?=
7
- SPHINXBUILD ?= sphinx-build
8
- SPHINXAUTOBUILD ?= sphinx-autobuild
9
- SOURCEDIR = .
10
- BUILDDIR = _build
11
-
12
- # Put it first so that "make" without argument is like "make help".
13
- help:
14
- @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
15
-
16
- .PHONY: help Makefile
17
-
18
- # Catch-all target: route all unknown targets to Sphinx using the new
19
- # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
20
- %: Makefile
21
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/_static/HeliconeDashboard.png DELETED
Binary file (241 kB)
 
AllInOneApp/langchain/docs/_static/HeliconeKeys.png DELETED
Binary file (151 kB)
 
AllInOneApp/langchain/docs/_static/css/custom.css DELETED
@@ -1,13 +0,0 @@
1
- pre {
2
- white-space: break-spaces;
3
- }
4
-
5
- @media (min-width: 1200px) {
6
- .container,
7
- .container-lg,
8
- .container-md,
9
- .container-sm,
10
- .container-xl {
11
- max-width: 2560px !important;
12
- }
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/conf.py DELETED
@@ -1,105 +0,0 @@
1
- """Configuration file for the Sphinx documentation builder."""
2
- # Configuration file for the Sphinx documentation builder.
3
- #
4
- # This file only contains a selection of the most common options. For a full
5
- # list see the documentation:
6
- # https://www.sphinx-doc.org/en/master/usage/configuration.html
7
-
8
- # -- Path setup --------------------------------------------------------------
9
-
10
- # If extensions (or modules to document with autodoc) are in another directory,
11
- # add these directories to sys.path here. If the directory is relative to the
12
- # documentation root, use os.path.abspath to make it absolute, like shown here.
13
- #
14
- # import os
15
- # import sys
16
- # sys.path.insert(0, os.path.abspath('.'))
17
-
18
- import toml
19
-
20
- with open("../pyproject.toml") as f:
21
- data = toml.load(f)
22
-
23
- # -- Project information -----------------------------------------------------
24
-
25
- project = "🦜🔗 LangChain"
26
- copyright = "2022, Harrison Chase"
27
- author = "Harrison Chase"
28
-
29
- version = data["tool"]["poetry"]["version"]
30
- release = version
31
-
32
- html_title = project + " " + version
33
-
34
-
35
- # -- General configuration ---------------------------------------------------
36
-
37
- # Add any Sphinx extension module names here, as strings. They can be
38
- # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
39
- # ones.
40
- extensions = [
41
- "sphinx.ext.autodoc",
42
- "sphinx.ext.autodoc.typehints",
43
- "sphinx.ext.autosummary",
44
- "sphinx.ext.napoleon",
45
- "sphinx.ext.viewcode",
46
- "sphinxcontrib.autodoc_pydantic",
47
- "myst_nb",
48
- "sphinx_panels",
49
- "IPython.sphinxext.ipython_console_highlighting",
50
- ]
51
- source_suffix = [".ipynb", ".html", ".md", ".rst"]
52
-
53
- autodoc_pydantic_model_show_json = False
54
- autodoc_pydantic_field_list_validators = False
55
- autodoc_pydantic_config_members = False
56
- autodoc_pydantic_model_show_config_summary = False
57
- autodoc_pydantic_model_show_validator_members = False
58
- autodoc_pydantic_model_show_field_summary = False
59
- autodoc_pydantic_model_members = False
60
- autodoc_pydantic_model_undoc_members = False
61
- # autodoc_typehints = "signature"
62
- # autodoc_typehints = "description"
63
-
64
- # Add any paths that contain templates here, relative to this directory.
65
- templates_path = ["_templates"]
66
-
67
- # List of patterns, relative to source directory, that match files and
68
- # directories to ignore when looking for source files.
69
- # This pattern also affects html_static_path and html_extra_path.
70
- exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
71
-
72
-
73
- # -- Options for HTML output -------------------------------------------------
74
-
75
- # The theme to use for HTML and HTML Help pages. See the documentation for
76
- # a list of builtin themes.
77
- #
78
- html_theme = "sphinx_book_theme"
79
-
80
- html_theme_options = {
81
- "path_to_docs": "docs",
82
- "repository_url": "https://github.com/hwchase17/langchain",
83
- "use_repository_button": True,
84
- }
85
-
86
- html_context = {
87
- "display_github": True, # Integrate GitHub
88
- "github_user": "hwchase17", # Username
89
- "github_repo": "langchain", # Repo name
90
- "github_version": "master", # Version
91
- "conf_py_path": "/docs/", # Path in the checkout to the docs root
92
- }
93
-
94
- # Add any paths that contain custom static files (such as style sheets) here,
95
- # relative to this directory. They are copied after the builtin static files,
96
- # so a file named "default.css" will overwrite the builtin "default.css".
97
- html_static_path = ["_static"]
98
-
99
- # These paths are either relative to html_static_path
100
- # or fully qualified paths (eg. https://...)
101
- html_css_files = [
102
- "css/custom.css",
103
- ]
104
- nb_execution_mode = "off"
105
- myst_enable_extensions = ["colon_fence"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/deployments.md DELETED
@@ -1,39 +0,0 @@
1
- # Deployments
2
-
3
- So you've made a really cool chain - now what? How do you deploy it and make it easily sharable with the world?
4
-
5
- This section covers several options for that.
6
- Note that these are meant as quick deployment options for prototypes and demos, and not for production systems.
7
- If you are looking for help with deployment of a production system, please contact us directly.
8
-
9
- What follows is a list of template GitHub repositories aimed that are intended to be
10
- very easy to fork and modify to use your chain.
11
- This is far from an exhaustive list of options, and we are EXTREMELY open to contributions here.
12
-
13
- ## [Streamlit](https://github.com/hwchase17/langchain-streamlit-template)
14
-
15
- This repo serves as a template for how to deploy a LangChain with Streamlit.
16
- It implements a chatbot interface.
17
- It also contains instructions for how to deploy this app on the Streamlit platform.
18
-
19
- ## [Gradio (on Hugging Face)](https://github.com/hwchase17/langchain-gradio-template)
20
-
21
- This repo serves as a template for how deploy a LangChain with Gradio.
22
- It implements a chatbot interface, with a "Bring-Your-Own-Token" approach (nice for not wracking up big bills).
23
- It also contains instructions for how to deploy this app on the Hugging Face platform.
24
- This is heavily influenced by James Weaver's [excellent examples](https://huggingface.co/JavaFXpert).
25
-
26
- ## [Beam](https://github.com/slai-labs/get-beam/tree/main/examples/langchain-question-answering)
27
-
28
- This repo serves as a template for how deploy a LangChain with [Beam](https://beam.cloud).
29
-
30
- It implements a Question Answering app and contains instructions for deploying the app as a serverless REST API.
31
-
32
- ## [Vercel](https://github.com/homanp/vercel-langchain)
33
-
34
- A minimal example on how to run LangChain on Vercel using Flask.
35
-
36
-
37
- ## [SteamShip](https://github.com/steamship-core/steamship-langchain/)
38
- This repository contains LangChain adapters for Steamship, enabling LangChain developers to rapidly deploy their apps on Steamship.
39
- This includes: production ready endpoints, horizontal scaling across dependencies, persistant storage of app state, multi-tenancy support, etc.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem.rst DELETED
@@ -1,10 +0,0 @@
1
- LangChain Ecosystem
2
- ===================
3
-
4
- Guides for how other companies/products can be used with LangChain
5
-
6
- .. toctree::
7
- :maxdepth: 1
8
- :glob:
9
-
10
- ecosystem/*
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/ai21.md DELETED
@@ -1,16 +0,0 @@
1
- # AI21 Labs
2
-
3
- This page covers how to use the AI21 ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific AI21 wrappers.
5
-
6
- ## Installation and Setup
7
- - Get an AI21 api key and set it as an environment variable (`AI21_API_KEY`)
8
-
9
- ## Wrappers
10
-
11
- ### LLM
12
-
13
- There exists an AI21 LLM wrapper, which you can access with
14
- ```python
15
- from langchain.llms import AI21
16
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/bananadev.md DELETED
@@ -1,74 +0,0 @@
1
- # Banana
2
-
3
- This page covers how to use the Banana ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific Banana wrappers.
5
-
6
- ## Installation and Setup
7
- - Install with `pip3 install banana-dev`
8
- - Get an CerebriumAI api key and set it as an environment variable (`BANANA_API_KEY`)
9
-
10
- ## Define your Banana Template
11
-
12
- If you want to use an available language model template you can find one [here](https://app.banana.dev/templates/conceptofmind/serverless-template-palmyra-base).
13
- This template uses the Palmyra-Base model by [Writer](https://writer.com/product/api/).
14
- You can check out an example Banana repository [here](https://github.com/conceptofmind/serverless-template-palmyra-base).
15
-
16
- ## Build the Banana app
17
-
18
- You must include a output in the result. There is a rigid response structure.
19
- ```python
20
- # Return the results as a dictionary
21
- result = {'output': result}
22
- ```
23
-
24
- An example inference function would be:
25
- ```python
26
- def inference(model_inputs:dict) -> dict:
27
- global model
28
- global tokenizer
29
-
30
- # Parse out your arguments
31
- prompt = model_inputs.get('prompt', None)
32
- if prompt == None:
33
- return {'message': "No prompt provided"}
34
-
35
- # Run the model
36
- input_ids = tokenizer.encode(prompt, return_tensors='pt').cuda()
37
- output = model.generate(
38
- input_ids,
39
- max_length=100,
40
- do_sample=True,
41
- top_k=50,
42
- top_p=0.95,
43
- num_return_sequences=1,
44
- temperature=0.9,
45
- early_stopping=True,
46
- no_repeat_ngram_size=3,
47
- num_beams=5,
48
- length_penalty=1.5,
49
- repetition_penalty=1.5,
50
- bad_words_ids=[[tokenizer.encode(' ', add_prefix_space=True)[0]]]
51
- )
52
-
53
- result = tokenizer.decode(output[0], skip_special_tokens=True)
54
- # Return the results as a dictionary
55
- result = {'output': result}
56
- return result
57
- ```
58
-
59
- You can find a full example of a Banana app [here](https://github.com/conceptofmind/serverless-template-palmyra-base/blob/main/app.py).
60
-
61
-
62
- ## Wrappers
63
-
64
- ### LLM
65
-
66
- There exists an Banana LLM wrapper, which you can access with
67
- ```python
68
- from langchain.llms import Banana
69
- ```
70
-
71
- You need to provide a model key located in the dashboard:
72
- ```python
73
- llm = Banana(model_key="YOUR_MODEL_KEY")
74
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/cerebriumai.md DELETED
@@ -1,17 +0,0 @@
1
- # CerebriumAI
2
-
3
- This page covers how to use the CerebriumAI ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific CerebriumAI wrappers.
5
-
6
- ## Installation and Setup
7
- - Install with `pip install cerebrium`
8
- - Get an CerebriumAI api key and set it as an environment variable (`CEREBRIUMAI_API_KEY`)
9
-
10
- ## Wrappers
11
-
12
- ### LLM
13
-
14
- There exists an CerebriumAI LLM wrapper, which you can access with
15
- ```python
16
- from langchain.llms import CerebriumAI
17
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/chroma.md DELETED
@@ -1,20 +0,0 @@
1
- # Chroma
2
-
3
- This page covers how to use the Chroma ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific Chroma wrappers.
5
-
6
- ## Installation and Setup
7
- - Install the Python package with `pip install chromadb`
8
- ## Wrappers
9
-
10
- ### VectorStore
11
-
12
- There exists a wrapper around Chroma vector databases, allowing you to use it as a vectorstore,
13
- whether for semantic search or example selection.
14
-
15
- To import this vectorstore:
16
- ```python
17
- from langchain.vectorstores import Chroma
18
- ```
19
-
20
- For a more detailed walkthrough of the Chroma wrapper, see [this notebook](../modules/indexes/examples/vectorstores.ipynb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/cohere.md DELETED
@@ -1,25 +0,0 @@
1
- # Cohere
2
-
3
- This page covers how to use the Cohere ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific Cohere wrappers.
5
-
6
- ## Installation and Setup
7
- - Install the Python SDK with `pip install cohere`
8
- - Get an Cohere api key and set it as an environment variable (`COHERE_API_KEY`)
9
-
10
- ## Wrappers
11
-
12
- ### LLM
13
-
14
- There exists an Cohere LLM wrapper, which you can access with
15
- ```python
16
- from langchain.llms import Cohere
17
- ```
18
-
19
- ### Embeddings
20
-
21
- There exists an Cohere Embeddings wrapper, which you can access with
22
- ```python
23
- from langchain.embeddings import CohereEmbeddings
24
- ```
25
- For a more detailed walkthrough of this, see [this notebook](../modules/indexes/examples/embeddings.ipynb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/deepinfra.md DELETED
@@ -1,17 +0,0 @@
1
- # DeepInfra
2
-
3
- This page covers how to use the DeepInfra ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific DeepInfra wrappers.
5
-
6
- ## Installation and Setup
7
- - Get your DeepInfra api key from this link [here](https://deepinfra.com/).
8
- - Get an DeepInfra api key and set it as an environment variable (`DEEPINFRA_API_TOKEN`)
9
-
10
- ## Wrappers
11
-
12
- ### LLM
13
-
14
- There exists an DeepInfra LLM wrapper, which you can access with
15
- ```python
16
- from langchain.llms import DeepInfra
17
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/forefrontai.md DELETED
@@ -1,16 +0,0 @@
1
- # ForefrontAI
2
-
3
- This page covers how to use the ForefrontAI ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific ForefrontAI wrappers.
5
-
6
- ## Installation and Setup
7
- - Get an ForefrontAI api key and set it as an environment variable (`FOREFRONTAI_API_KEY`)
8
-
9
- ## Wrappers
10
-
11
- ### LLM
12
-
13
- There exists an ForefrontAI LLM wrapper, which you can access with
14
- ```python
15
- from langchain.llms import ForefrontAI
16
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/google_search.md DELETED
@@ -1,32 +0,0 @@
1
- # Google Search Wrapper
2
-
3
- This page covers how to use the Google Search API within LangChain.
4
- It is broken into two parts: installation and setup, and then references to the specific Google Search wrapper.
5
-
6
- ## Installation and Setup
7
- - Install requirements with `pip install google-api-python-client`
8
- - Set up a Custom Search Engine, following [these instructions](https://stackoverflow.com/questions/37083058/programmatically-searching-google-in-python-using-custom-search)
9
- - Get an API Key and Custom Search Engine ID from the previous step, and set them as environment variables `GOOGLE_API_KEY` and `GOOGLE_CSE_ID` respectively
10
-
11
- ## Wrappers
12
-
13
- ### Utility
14
-
15
- There exists a GoogleSearchAPIWrapper utility which wraps this API. To import this utility:
16
-
17
- ```python
18
- from langchain.utilities import GoogleSearchAPIWrapper
19
- ```
20
-
21
- For a more detailed walkthrough of this wrapper, see [this notebook](../modules/utils/examples/google_search.ipynb).
22
-
23
- ### Tool
24
-
25
- You can also easily load this wrapper as a Tool (to use with an Agent).
26
- You can do this with:
27
- ```python
28
- from langchain.agents import load_tools
29
- tools = load_tools(["google-search"])
30
- ```
31
-
32
- For more information on this, see [this page](../modules/agents/tools.md)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/google_serper.md DELETED
@@ -1,71 +0,0 @@
1
- # Google Serper Wrapper
2
-
3
- This page covers how to use the [Serper](https://serper.dev) Google Search API within LangChain. Serper is a low-cost Google Search API that can be used to add answer box, knowledge graph, and organic results data from Google Search.
4
- It is broken into two parts: setup, and then references to the specific Google Serper wrapper.
5
-
6
- ## Setup
7
- - Go to [serper.dev](https://serper.dev) to sign up for a free account
8
- - Get the api key and set it as an environment variable (`SERPER_API_KEY`)
9
-
10
- ## Wrappers
11
-
12
- ### Utility
13
-
14
- There exists a GoogleSerperAPIWrapper utility which wraps this API. To import this utility:
15
-
16
- ```python
17
- from langchain.utilities import GoogleSerperAPIWrapper
18
- ```
19
-
20
- You can use it as part of a Self Ask chain:
21
-
22
- ```python
23
- from langchain.utilities import GoogleSerperAPIWrapper
24
- from langchain.llms.openai import OpenAI
25
- from langchain.agents import initialize_agent, Tool
26
-
27
- import os
28
-
29
- os.environ["SERPER_API_KEY"] = ""
30
- os.environ['OPENAI_API_KEY'] = ""
31
-
32
- llm = OpenAI(temperature=0)
33
- search = GoogleSerperAPIWrapper()
34
- tools = [
35
- Tool(
36
- name="Intermediate Answer",
37
- func=search.run
38
- )
39
- ]
40
-
41
- self_ask_with_search = initialize_agent(tools, llm, agent="self-ask-with-search", verbose=True)
42
- self_ask_with_search.run("What is the hometown of the reigning men's U.S. Open champion?")
43
- ```
44
-
45
- #### Output
46
- ```
47
- Entering new AgentExecutor chain...
48
- Yes.
49
- Follow up: Who is the reigning men's U.S. Open champion?
50
- Intermediate answer: Current champions Carlos Alcaraz, 2022 men's singles champion.
51
- Follow up: Where is Carlos Alcaraz from?
52
- Intermediate answer: El Palmar, Spain
53
- So the final answer is: El Palmar, Spain
54
-
55
- > Finished chain.
56
-
57
- 'El Palmar, Spain'
58
- ```
59
-
60
- For a more detailed walkthrough of this wrapper, see [this notebook](../modules/utils/examples/google_serper.ipynb).
61
-
62
- ### Tool
63
-
64
- You can also easily load this wrapper as a Tool (to use with an Agent).
65
- You can do this with:
66
- ```python
67
- from langchain.agents import load_tools
68
- tools = load_tools(["google-serper"])
69
- ```
70
-
71
- For more information on this, see [this page](../modules/agents/tools.md)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/gooseai.md DELETED
@@ -1,23 +0,0 @@
1
- # GooseAI
2
-
3
- This page covers how to use the GooseAI ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific GooseAI wrappers.
5
-
6
- ## Installation and Setup
7
- - Install the Python SDK with `pip install openai`
8
- - Get your GooseAI api key from this link [here](https://goose.ai/).
9
- - Set the environment variable (`GOOSEAI_API_KEY`).
10
-
11
- ```python
12
- import os
13
- os.environ["GOOSEAI_API_KEY"] = "YOUR_API_KEY"
14
- ```
15
-
16
- ## Wrappers
17
-
18
- ### LLM
19
-
20
- There exists an GooseAI LLM wrapper, which you can access with:
21
- ```python
22
- from langchain.llms import GooseAI
23
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/graphsignal.md DELETED
@@ -1,38 +0,0 @@
1
- # Graphsignal
2
-
3
- This page covers how to use the Graphsignal to trace and monitor LangChain.
4
-
5
- ## Installation and Setup
6
-
7
- - Install the Python library with `pip install graphsignal`
8
- - Create free Graphsignal account [here](https://graphsignal.com)
9
- - Get an API key and set it as an environment variable (`GRAPHSIGNAL_API_KEY`)
10
-
11
- ## Tracing and Monitoring
12
-
13
- Graphsignal automatically instruments and starts tracing and monitoring chains. Traces, metrics and errors are then available in your [Graphsignal dashboard](https://app.graphsignal.com/). No prompts or other sensitive data are sent to Graphsignal cloud, only statistics and metadata.
14
-
15
- Initialize the tracer by providing a deployment name:
16
-
17
- ```python
18
- import graphsignal
19
-
20
- graphsignal.configure(deployment='my-langchain-app-prod')
21
- ```
22
-
23
- In order to trace full runs and see a breakdown by chains and tools, you can wrap the calling routine or use a decorator:
24
-
25
- ```python
26
- with graphsignal.start_trace('my-chain'):
27
- chain.run("some initial text")
28
- ```
29
-
30
- Optionally, enable profiling to record function-level statistics for each trace.
31
-
32
- ```python
33
- with graphsignal.start_trace(
34
- 'my-chain', options=graphsignal.TraceOptions(enable_profiling=True)):
35
- chain.run("some initial text")
36
- ```
37
-
38
- See the [Quick Start](https://graphsignal.com/docs/guides/quick-start/) guide for complete setup instructions.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/hazy_research.md DELETED
@@ -1,19 +0,0 @@
1
- # Hazy Research
2
-
3
- This page covers how to use the Hazy Research ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific Hazy Research wrappers.
5
-
6
- ## Installation and Setup
7
- - To use the `manifest`, install it with `pip install manifest-ml`
8
-
9
- ## Wrappers
10
-
11
- ### LLM
12
-
13
- There exists an LLM wrapper around Hazy Research's `manifest` library.
14
- `manifest` is a python library which is itself a wrapper around many model providers, and adds in caching, history, and more.
15
-
16
- To use this wrapper:
17
- ```python
18
- from langchain.llms.manifest import ManifestWrapper
19
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/helicone.md DELETED
@@ -1,53 +0,0 @@
1
- # Helicone
2
-
3
- This page covers how to use the [Helicone](https://helicone.ai) within LangChain.
4
-
5
- ## What is Helicone?
6
-
7
- Helicone is an [open source](https://github.com/Helicone/helicone) observability platform that proxies your OpenAI traffic and provides you key insights into your spend, latency and usage.
8
-
9
- ![Helicone](../_static/HeliconeDashboard.png)
10
-
11
- ## Quick start
12
-
13
- With your LangChain environment you can just add the following parameter.
14
-
15
- ```bash
16
- export OPENAI_API_BASE="https://oai.hconeai.com/v1"
17
- ```
18
-
19
- Now head over to [helicone.ai](https://helicone.ai/onboarding?step=2) to create your account, and add your OpenAI API key within our dashboard to view your logs.
20
-
21
- ![Helicone](../_static/HeliconeKeys.png)
22
-
23
- ## How to enable Helicone caching
24
-
25
- ```python
26
- from langchain.llms import OpenAI
27
- import openai
28
- openai.api_base = "https://oai.hconeai.com/v1"
29
-
30
- llm = OpenAI(temperature=0.9, headers={"Helicone-Cache-Enabled": "true"})
31
- text = "What is a helicone?"
32
- print(llm(text))
33
- ```
34
-
35
- [Helicone caching docs](https://docs.helicone.ai/advanced-usage/caching)
36
-
37
- ## How to use Helicone custom properties
38
-
39
- ```python
40
- from langchain.llms import OpenAI
41
- import openai
42
- openai.api_base = "https://oai.hconeai.com/v1"
43
-
44
- llm = OpenAI(temperature=0.9, headers={
45
- "Helicone-Property-Session": "24",
46
- "Helicone-Property-Conversation": "support_issue_2",
47
- "Helicone-Property-App": "mobile",
48
- })
49
- text = "What is a helicone?"
50
- print(llm(text))
51
- ```
52
-
53
- [Helicone property docs](https://docs.helicone.ai/advanced-usage/custom-properties)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/huggingface.md DELETED
@@ -1,69 +0,0 @@
1
- # Hugging Face
2
-
3
- This page covers how to use the Hugging Face ecosystem (including the [Hugging Face Hub](https://huggingface.co)) within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific Hugging Face wrappers.
5
-
6
- ## Installation and Setup
7
-
8
- If you want to work with the Hugging Face Hub:
9
- - Install the Hub client library with `pip install huggingface_hub`
10
- - Create a Hugging Face account (it's free!)
11
- - Create an [access token](https://huggingface.co/docs/hub/security-tokens) and set it as an environment variable (`HUGGINGFACEHUB_API_TOKEN`)
12
-
13
- If you want work with the Hugging Face Python libraries:
14
- - Install `pip install transformers` for working with models and tokenizers
15
- - Install `pip install datasets` for working with datasets
16
-
17
- ## Wrappers
18
-
19
- ### LLM
20
-
21
- There exists two Hugging Face LLM wrappers, one for a local pipeline and one for a model hosted on Hugging Face Hub.
22
- Note that these wrappers only work for models that support the following tasks: [`text2text-generation`](https://huggingface.co/models?library=transformers&pipeline_tag=text2text-generation&sort=downloads), [`text-generation`](https://huggingface.co/models?library=transformers&pipeline_tag=text-classification&sort=downloads)
23
-
24
- To use the local pipeline wrapper:
25
- ```python
26
- from langchain.llms import HuggingFacePipeline
27
- ```
28
-
29
- To use a the wrapper for a model hosted on Hugging Face Hub:
30
- ```python
31
- from langchain.llms import HuggingFaceHub
32
- ```
33
- For a more detailed walkthrough of the Hugging Face Hub wrapper, see [this notebook](../modules/llms/integrations/huggingface_hub.ipynb)
34
-
35
-
36
- ### Embeddings
37
-
38
- There exists two Hugging Face Embeddings wrappers, one for a local model and one for a model hosted on Hugging Face Hub.
39
- Note that these wrappers only work for [`sentence-transformers` models](https://huggingface.co/models?library=sentence-transformers&sort=downloads).
40
-
41
- To use the local pipeline wrapper:
42
- ```python
43
- from langchain.embeddings import HuggingFaceEmbeddings
44
- ```
45
-
46
- To use a the wrapper for a model hosted on Hugging Face Hub:
47
- ```python
48
- from langchain.embeddings import HuggingFaceHubEmbeddings
49
- ```
50
- For a more detailed walkthrough of this, see [this notebook](../modules/indexes/examples/embeddings.ipynb)
51
-
52
- ### Tokenizer
53
-
54
- There are several places you can use tokenizers available through the `transformers` package.
55
- By default, it is used to count tokens for all LLMs.
56
-
57
- You can also use it to count tokens when splitting documents with
58
- ```python
59
- from langchain.text_splitter import CharacterTextSplitter
60
- CharacterTextSplitter.from_huggingface_tokenizer(...)
61
- ```
62
- For a more detailed walkthrough of this, see [this notebook](../modules/indexes/examples/textsplitter.ipynb)
63
-
64
-
65
- ### Datasets
66
-
67
- The Hugging Face Hub has lots of great [datasets](https://huggingface.co/datasets) that can be used to evaluate your LLM chains.
68
-
69
- For a detailed walkthrough of how to use them to do so, see [this notebook](../use_cases/evaluation/huggingface_datasets.ipynb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/modal.md DELETED
@@ -1,66 +0,0 @@
1
- # Modal
2
-
3
- This page covers how to use the Modal ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific Modal wrappers.
5
-
6
- ## Installation and Setup
7
- - Install with `pip install modal-client`
8
- - Run `modal token new`
9
-
10
- ## Define your Modal Functions and Webhooks
11
-
12
- You must include a prompt. There is a rigid response structure.
13
-
14
- ```python
15
- class Item(BaseModel):
16
- prompt: str
17
-
18
- @stub.webhook(method="POST")
19
- def my_webhook(item: Item):
20
- return {"prompt": my_function.call(item.prompt)}
21
- ```
22
-
23
- An example with GPT2:
24
-
25
- ```python
26
- from pydantic import BaseModel
27
-
28
- import modal
29
-
30
- stub = modal.Stub("example-get-started")
31
-
32
- volume = modal.SharedVolume().persist("gpt2_model_vol")
33
- CACHE_PATH = "/root/model_cache"
34
-
35
- @stub.function(
36
- gpu="any",
37
- image=modal.Image.debian_slim().pip_install(
38
- "tokenizers", "transformers", "torch", "accelerate"
39
- ),
40
- shared_volumes={CACHE_PATH: volume},
41
- retries=3,
42
- )
43
- def run_gpt2(text: str):
44
- from transformers import GPT2Tokenizer, GPT2LMHeadModel
45
- tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
46
- model = GPT2LMHeadModel.from_pretrained('gpt2')
47
- encoded_input = tokenizer(text, return_tensors='pt').input_ids
48
- output = model.generate(encoded_input, max_length=50, do_sample=True)
49
- return tokenizer.decode(output[0], skip_special_tokens=True)
50
-
51
- class Item(BaseModel):
52
- prompt: str
53
-
54
- @stub.webhook(method="POST")
55
- def get_text(item: Item):
56
- return {"prompt": run_gpt2.call(item.prompt)}
57
- ```
58
-
59
- ## Wrappers
60
-
61
- ### LLM
62
-
63
- There exists an Modal LLM wrapper, which you can access with
64
- ```python
65
- from langchain.llms import Modal
66
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/nlpcloud.md DELETED
@@ -1,17 +0,0 @@
1
- # NLPCloud
2
-
3
- This page covers how to use the NLPCloud ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific NLPCloud wrappers.
5
-
6
- ## Installation and Setup
7
- - Install the Python SDK with `pip install nlpcloud`
8
- - Get an NLPCloud api key and set it as an environment variable (`NLPCLOUD_API_KEY`)
9
-
10
- ## Wrappers
11
-
12
- ### LLM
13
-
14
- There exists an NLPCloud LLM wrapper, which you can access with
15
- ```python
16
- from langchain.llms import NLPCloud
17
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AllInOneApp/langchain/docs/ecosystem/openai.md DELETED
@@ -1,55 +0,0 @@
1
- # OpenAI
2
-
3
- This page covers how to use the OpenAI ecosystem within LangChain.
4
- It is broken into two parts: installation and setup, and then references to specific OpenAI wrappers.
5
-
6
- ## Installation and Setup
7
- - Install the Python SDK with `pip install openai`
8
- - Get an OpenAI api key and set it as an environment variable (`OPENAI_API_KEY`)
9
- - If you want to use OpenAI's tokenizer (only available for Python 3.9+), install it with `pip install tiktoken`
10
-
11
- ## Wrappers
12
-
13
- ### LLM
14
-
15
- There exists an OpenAI LLM wrapper, which you can access with
16
- ```python
17
- from langchain.llms import OpenAI
18
- ```
19
-
20
- If you are using a model hosted on Azure, you should use different wrapper for that:
21
- ```python
22
- from langchain.llms import AzureOpenAI
23
- ```
24
- For a more detailed walkthrough of the Azure wrapper, see [this notebook](../modules/llms/integrations/azure_openai_example.ipynb)
25
-
26
-
27
-
28
- ### Embeddings
29
-
30
- There exists an OpenAI Embeddings wrapper, which you can access with
31
- ```python
32
- from langchain.embeddings import OpenAIEmbeddings
33
- ```
34
- For a more detailed walkthrough of this, see [this notebook](../modules/indexes/examples/embeddings.ipynb)
35
-
36
-
37
- ### Tokenizer
38
-
39
- There are several places you can use the `tiktoken` tokenizer. By default, it is used to count tokens
40
- for OpenAI LLMs.
41
-
42
- You can also use it to count tokens when splitting documents with
43
- ```python
44
- from langchain.text_splitter import CharacterTextSplitter
45
- CharacterTextSplitter.from_tiktoken_encoder(...)
46
- ```
47
- For a more detailed walkthrough of this, see [this notebook](../modules/indexes/examples/textsplitter.ipynb)
48
-
49
- ### Moderation
50
- You can also access the OpenAI content moderation endpoint with
51
-
52
- ```python
53
- from langchain.chains import OpenAIModerationChain
54
- ```
55
- For a more detailed walkthrough of this, see [this notebook](../modules/chains/examples/moderation.ipynb)