gfhayworth commited on
Commit
7d7fb11
1 Parent(s): b63cdcc

Update app.py

Browse files

remove unused components

Files changed (1) hide show
  1. app.py +37 -41
app.py CHANGED
@@ -41,9 +41,7 @@ from sentence_transformers import SentenceTransformer, CrossEncoder, util
41
  from torch import tensor as torch_tensor
42
  from datasets import load_dataset
43
 
44
-
45
  from greg_funcs import get_llm_response
46
- from wiki_funcs import mysearch, mygreetings
47
 
48
 
49
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
@@ -68,7 +66,7 @@ tmdb_bearer_token = os.environ["TMDB_BEARER_TOKEN"]
68
 
69
  TOOLS_LIST = ['serpapi', 'wolfram-alpha', 'pal-math', 'pal-colored-objects', 'news-api', 'tmdb-api',
70
  'open-meteo-api'] # 'google-search'
71
- TOOLS_DEFAULT_LIST = ['mysearch']
72
  BUG_FOUND_MSG = "Congratulations, you've found a bug in this application!"
73
  AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. It is not necessary to hit a button or key after pasting it."
74
  MAX_TOKENS = 512
@@ -123,8 +121,6 @@ EXAMPLES = ["What is the name of the plan described by this summary of benefits?
123
  AUTHORS = """
124
  <p>This application, developed by <b>Greg Hayworth, Srikanth Tangelloju, Lincoln Snyder, Michal Piekarczyk, and Xingde Jiang</b>,
125
  demonstrates a conversational agent implemented with OpenAI GPT-3.5 and LangChain.
126
- When necessary, it leverages tools for complex math, searching the internet, and accessing news and weather.
127
- Uses talking heads from <a href='https://exh.ai/'>Ex-Human</a>.
128
  For faster inference without waiting in queue, you may duplicate the space.
129
  </p>"""
130
  # UNCOMMENT TO USE WHISPER
@@ -268,42 +264,42 @@ def transform_text(desc, express_chain, num_words, formality,
268
  return generated_text
269
 
270
 
271
- def load_chain(tools_list, llm):
272
- chain = None
273
- express_chain = None
274
- if llm:
275
- print("\ntools_list", tools_list)
276
- tool_names = tools_list
277
- # tools = load_tools(tool_names, llm=llm, news_api_key=news_api_key,
278
- # tmdb_bearer_token=tmdb_bearer_token)
279
- # tools = load_tools(tool_names, llm=llm)
280
- tools = [mysearch, mygreetings]
281
- memory = ConversationBufferMemory(memory_key="chat_history")
282
-
283
- chain = initialize_agent(
284
- tools, llm, agent="conversational-react-description", verbose=True, memory=memory)
285
- express_chain = LLMChain(llm=llm, prompt=PROMPT_TEMPLATE, verbose=True)
286
-
287
- return chain, express_chain
288
-
289
-
290
- def set_openai_api_key(api_key):
291
- """Set the api key and return chain.
292
- If no api_key, then None is returned.
293
- """
294
- # if api_key and api_key.startswith("sk-") and len(api_key) > 50:
295
- # # os.environ["OPENAI_API_KEY"] = api_key
296
- # os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
297
- # llm = OpenAI(temperature=TEMPERATURE, max_tokens=MAX_TOKENS)
298
- # chain, express_chain = load_chain(TOOLS_DEFAULT_LIST, llm)
299
- # os.environ["OPENAI_API_KEY"] = ""
300
- # return chain, express_chain, llm
301
- # return None, None, None
302
-
303
- os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
304
- llm = OpenAI(temperature=TEMPERATURE, max_tokens=MAX_TOKENS)
305
- chain, express_chain = load_chain(TOOLS_DEFAULT_LIST, llm)
306
- return chain, express_chain, llm
307
 
308
 
309
  def run_chain(chain, inp, capture_hidden_text):
 
41
  from torch import tensor as torch_tensor
42
  from datasets import load_dataset
43
 
 
44
  from greg_funcs import get_llm_response
 
45
 
46
 
47
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
 
66
 
67
  TOOLS_LIST = ['serpapi', 'wolfram-alpha', 'pal-math', 'pal-colored-objects', 'news-api', 'tmdb-api',
68
  'open-meteo-api'] # 'google-search'
69
+ TOOLS_DEFAULT_LIST = ['serpapi']
70
  BUG_FOUND_MSG = "Congratulations, you've found a bug in this application!"
71
  AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. It is not necessary to hit a button or key after pasting it."
72
  MAX_TOKENS = 512
 
121
  AUTHORS = """
122
  <p>This application, developed by <b>Greg Hayworth, Srikanth Tangelloju, Lincoln Snyder, Michal Piekarczyk, and Xingde Jiang</b>,
123
  demonstrates a conversational agent implemented with OpenAI GPT-3.5 and LangChain.
 
 
124
  For faster inference without waiting in queue, you may duplicate the space.
125
  </p>"""
126
  # UNCOMMENT TO USE WHISPER
 
264
  return generated_text
265
 
266
 
267
+ # def load_chain(tools_list, llm):
268
+ # chain = None
269
+ # express_chain = None
270
+ # if llm:
271
+ # print("\ntools_list", tools_list)
272
+ # tool_names = tools_list
273
+ # # tools = load_tools(tool_names, llm=llm, news_api_key=news_api_key,
274
+ # # tmdb_bearer_token=tmdb_bearer_token)
275
+ # # tools = load_tools(tool_names, llm=llm)
276
+ # tools = [mysearch, mygreetings]
277
+ # memory = ConversationBufferMemory(memory_key="chat_history")
278
+
279
+ # chain = initialize_agent(
280
+ # tools, llm, agent="conversational-react-description", verbose=True, memory=memory)
281
+ # express_chain = LLMChain(llm=llm, prompt=PROMPT_TEMPLATE, verbose=True)
282
+
283
+ # return chain, express_chain
284
+
285
+
286
+ # def set_openai_api_key(api_key):
287
+ # """Set the api key and return chain.
288
+ # If no api_key, then None is returned.
289
+ # """
290
+ # # if api_key and api_key.startswith("sk-") and len(api_key) > 50:
291
+ # # # os.environ["OPENAI_API_KEY"] = api_key
292
+ # # os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
293
+ # # llm = OpenAI(temperature=TEMPERATURE, max_tokens=MAX_TOKENS)
294
+ # # chain, express_chain = load_chain(TOOLS_DEFAULT_LIST, llm)
295
+ # # os.environ["OPENAI_API_KEY"] = ""
296
+ # # return chain, express_chain, llm
297
+ # # return None, None, None
298
+
299
+ # os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
300
+ # llm = OpenAI(temperature=TEMPERATURE, max_tokens=MAX_TOKENS)
301
+ # chain, express_chain = load_chain(TOOLS_DEFAULT_LIST, llm)
302
+ # return chain, express_chain, llm
303
 
304
 
305
  def run_chain(chain, inp, capture_hidden_text):