nickmuchi commited on
Commit
0aec319
1 Parent(s): 016722b

Update functions.py

Browse files
Files changed (1) hide show
  1. functions.py +30 -25
functions.py CHANGED
@@ -60,40 +60,44 @@ margin-bottom: 2.5rem">{}</div> """
60
 
61
  #Stuff Chain Type Prompt template
62
 
63
- # def load_prompt()
 
64
 
65
- # system_template="""Use only the following pieces of earnings context to answer the users question thoroughly.
66
- # Do not use any information not provided in the context and remember you are a finance expert.
67
- # If you don't know the answer, just say that you don't know, don't try to make up an answer.
68
- # ALWAYS return a "SOURCES" part in your answer.
69
- # The "SOURCES" part should be a reference to the source of the document from which you got your answer.
70
 
71
- # Remember, do not reference any information not given in the context.
 
72
 
73
- # Follow the below format when answering:
 
 
74
 
75
- # Question: [question here]
76
- # Helpful Answer: [answer here]
77
- # SOURCES: xyz
78
 
79
- # If there is no sources found please return the below:
 
 
80
 
81
- # ```
82
- # The answer is: foo
83
- # SOURCES: Please refer to references section
84
- # ```
85
 
86
- # Begin!
87
- # ----------------
88
- # {context}"""
 
 
 
 
89
 
90
- # messages = [
91
- # SystemMessagePromptTemplate.from_template(system_template),
92
- # HumanMessagePromptTemplate.from_template("{question}")
93
- # ]
94
- # prompt = ChatPromptTemplate.from_messages(messages)
95
 
96
- # return prompt
97
 
98
  ###################### Functions #######################################################################################
99
 
@@ -205,6 +209,7 @@ def embed_text(query,title,embedding_model,_docsearch):
205
 
206
  chain = ConversationalRetrievalChain.from_llm(chat_llm,
207
  retriever= _docsearch.as_retriever(),
 
208
  return_source_documents=True)
209
 
210
  answer = chain({"question": question, "chat_history": chat_history})
 
60
 
61
  #Stuff Chain Type Prompt template
62
 
63
+ @st.cacche_resource
64
+ def load_prompt()
65
 
66
+ system_template="""Use only the following pieces of earnings context to answer the users question accurately.
67
+ Do not use any information not provided in the earnings context and remember you are a to speak like a finance expert.
68
+ If you don't know the answer, just say 'There is no relevant answer in the given earnings call transcript',
69
+ don't try to make up an answer.
 
70
 
71
+ ALWAYS return a "SOURCES" part in your answer.
72
+ The "SOURCES" part should be a reference to the source of the document from which you got your answer.
73
 
74
+ Remember, do not reference any information not given in the context.
75
+
76
+ If the answer is not available in the given context just say 'There is no relevant answer in the given earnings call transcript'
77
 
78
+ Follow the below format when answering:
 
 
79
 
80
+ Question: [question here]
81
+ Helpful Answer: [answer here]
82
+ SOURCES: xyz
83
 
84
+ If there is no sources found please return the below:
 
 
 
85
 
86
+ ```
87
+ The answer is: foo
88
+ SOURCES: Please refer to references section
89
+ ```
90
+ Begin!
91
+ ----------------
92
+ {context}"""
93
 
94
+ messages = [
95
+ SystemMessagePromptTemplate.from_template(system_template),
96
+ HumanMessagePromptTemplate.from_template("{question}")
97
+ ]
98
+ prompt = ChatPromptTemplate.from_messages(messages)
99
 
100
+ return prompt
101
 
102
  ###################### Functions #######################################################################################
103
 
 
209
 
210
  chain = ConversationalRetrievalChain.from_llm(chat_llm,
211
  retriever= _docsearch.as_retriever(),
212
+ qa_prompt = load_prompt(),
213
  return_source_documents=True)
214
 
215
  answer = chain({"question": question, "chat_history": chat_history})