nickmuchi commited on
Commit
4936c22
1 Parent(s): 9f7471f

Update functions.py

Browse files
Files changed (1) hide show
  1. functions.py +31 -77
functions.py CHANGED
@@ -30,7 +30,7 @@ from langchain.llms import OpenAI
30
  from langchain.callbacks.base import CallbackManager
31
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
32
  from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
33
- from langchain import VectorDBQAWithSourcesChain
34
 
35
 
36
  from langchain.chat_models import ChatOpenAI
@@ -67,23 +67,29 @@ output_parser = RegexParser(
67
  output_keys=["answer", "score"],
68
  )
69
 
70
- system_template = """Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
71
- If you don't know the answer, just say that you don't know. Don't try to make up an answer.
 
72
  ALWAYS return a "SOURCES" part in your answer.
 
73
 
74
- In addition to giving an answer, also return a score of how fully it answered the user's question. This should be in the following format:
75
 
76
- Question: [question here]
77
- Helpful Answer: [answer here]
78
- Score: [score between 0 and 100]
 
79
 
80
- Begin!
81
 
82
- Context:
83
- ---------
84
- {summaries}
85
- ---------
86
- """
 
 
 
87
 
88
  messages = [
89
  SystemMessagePromptTemplate.from_template(system_template),
@@ -91,33 +97,6 @@ messages = [
91
  ]
92
  prompt = ChatPromptTemplate.from_messages(messages)
93
 
94
- #Refine Chain Type Prompt Template
95
- refine_prompt_template = (
96
- "The original question is as follows: {question}\n"
97
- "We have provided an existing answer: {existing_answer}\n"
98
- "We have the opportunity to refine the existing answer"
99
- "(only if needed) with some more context below.\n"
100
- "------------\n"
101
- "{context_str}\n"
102
- "------------\n"
103
- "Given the new context, refine the original answer to better "
104
- "answer the question. "
105
- "If the context isn't useful, return the original answer."
106
- )
107
- refine_prompt = PromptTemplate(
108
- input_variables=["question", "existing_answer", "context_str"],
109
- template=refine_prompt_template,
110
- )
111
-
112
- initial_qa_template = (
113
- "Context information is below. \n"
114
- "---------------------\n"
115
- "{context_str}"
116
- "\n---------------------\n"
117
- "Given the context information and not prior knowledge, "
118
- "answer the question: {question}\n.\n"
119
- )
120
-
121
  ###################### Functions #######################################################################################
122
 
123
  @st.experimental_singleton(suppress_st_warning=True)
@@ -204,52 +183,27 @@ def gen_embeddings(embedding_model):
204
  return embeddings
205
 
206
  @st.experimental_memo(suppress_st_warning=True)
207
- def embed_text(query,title,embedding_model,_docsearch,chain_type):
208
 
209
  '''Embed text and generate semantic search scores'''
210
 
211
- llm = OpenAI(temperature=0)
212
  chat_llm = ChatOpenAI(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)
213
 
214
  title = title.split()[0].lower()
215
-
216
- docs = _docsearch.similarity_search_with_score(query, k=3)
217
-
218
- if chain_type == 'Normal':
219
 
220
- docs = [d[0] for d in docs]
221
 
222
- # PROMPT = PromptTemplate(template=template,
223
- # input_variables=["summaries", "question"],
224
- # output_parser=output_parser)
225
-
226
- chain_type_kwargs = {"prompt": prompt}
227
- chain = VectorDBQAWithSourcesChain.from_chain_type(
228
- streaming_llm,
229
  chain_type="stuff",
230
- vectorstore=_docsearch,
231
- chain_type_kwargs=chain_type_kwargs
232
- )
233
- answer = chain({"question": query}, return_only_outputs=True)
234
- # chain = load_qa_with_sources_chain(OpenAI(temperature=0),
235
- # chain_type="stuff",
236
- # prompt=PROMPT,
237
- # )
238
-
239
-
240
- # answer = chain({"input_documents": docs, "question": query}, return_only_outputs=False)
241
-
242
-
243
- elif chain_type == 'Refined':
244
-
245
- docs = [d[0] for d in docs]
246
-
247
- initial_qa_prompt = PromptTemplate(
248
- input_variables=["context_str", "question"], template=initial_qa_template
249
- )
250
- chain = load_qa_chain(OpenAI(temperature=0), chain_type="refine", return_refine_steps=False,
251
- question_prompt=initial_qa_prompt, refine_prompt=refine_prompt)
252
- answer = chain({"input_documents": docs, "question": query}, return_only_outputs=False)
253
 
254
  return answer
255
 
 
30
  from langchain.callbacks.base import CallbackManager
31
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
32
  from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
33
+ from langchain import VectorDBQA
34
 
35
 
36
  from langchain.chat_models import ChatOpenAI
 
67
  output_keys=["answer", "score"],
68
  )
69
 
70
+ system_template="""Use only the following pieces of finance context to answer the users question thoroughly.
71
+ Do not use any information not provided in the context.
72
+ If you don't know the answer, just say that you don't know, don't try to make up an answer.
73
  ALWAYS return a "SOURCES" part in your answer.
74
+ The "SOURCES" part should be a reference to the source of the document from which you got your answer.
75
 
76
+ Example of your response should be:
77
 
78
+ ```
79
+ The answer is foo
80
+ SOURCES: xyz
81
+ ```
82
 
83
+ If there is no sources found please return the below:
84
 
85
+ ```
86
+ The answer is foo
87
+ SOURCES: Please refer to references section
88
+ ```
89
+
90
+ Begin!
91
+ ----------------
92
+ {context}"""
93
 
94
  messages = [
95
  SystemMessagePromptTemplate.from_template(system_template),
 
97
  ]
98
  prompt = ChatPromptTemplate.from_messages(messages)
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  ###################### Functions #######################################################################################
101
 
102
  @st.experimental_singleton(suppress_st_warning=True)
 
183
  return embeddings
184
 
185
  @st.experimental_memo(suppress_st_warning=True)
186
+ def embed_text(query,title,embedding_model,_docsearch):
187
 
188
  '''Embed text and generate semantic search scores'''
189
 
190
+ # llm = OpenAI(temperature=0)
191
  chat_llm = ChatOpenAI(streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]), verbose=True, temperature=0)
192
 
193
  title = title.split()[0].lower()
 
 
 
 
194
 
195
+ chain_type_kwargs = {"prompt": prompt}
196
 
197
+ chain = VectorDBQA.from_chain_type(
198
+ llm=streaming_llm,
 
 
 
 
 
199
  chain_type="stuff",
200
+ vectorstore=_docsearch,
201
+ chain_type_kwargs=chain_type_kwargs,
202
+ return_source_documents=True,
203
+ k=3
204
+ )
205
+
206
+ answer = chain({"query": query})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
 
208
  return answer
209