Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
trying zero-shot chain-of-thought prompting
Browse files- pages/3_qa_sources.py +1 -2
pages/3_qa_sources.py
CHANGED
@@ -203,7 +203,6 @@ def generate_chat_completion(messages, model="gpt-4", temperature=1, max_tokens=
|
|
203 |
else:
|
204 |
raise Exception(f"Error {response.status_code}: {response.text}")
|
205 |
|
206 |
-
|
207 |
model_data = [arxiv_ada_embeddings, embeddings, all_titles, all_text, all_authors]
|
208 |
|
209 |
def run_query(query, return_n = 3, show_pure_answer = False, show_all_sources = True):
|
@@ -239,7 +238,7 @@ def run_query(query, return_n = 3, show_pure_answer = False, show_all_sources =
|
|
239 |
st.markdown(lc_index.query(query))
|
240 |
st.markdown(' ')
|
241 |
st.markdown('#### context-based answer from sources:')
|
242 |
-
output = lc_index.query_with_sources(query)
|
243 |
st.markdown(output['answer'])
|
244 |
opstr = '#### Primary sources: \n'
|
245 |
st.markdown(opstr)
|
|
|
203 |
else:
|
204 |
raise Exception(f"Error {response.status_code}: {response.text}")
|
205 |
|
|
|
206 |
model_data = [arxiv_ada_embeddings, embeddings, all_titles, all_text, all_authors]
|
207 |
|
208 |
def run_query(query, return_n = 3, show_pure_answer = False, show_all_sources = True):
|
|
|
238 |
st.markdown(lc_index.query(query))
|
239 |
st.markdown(' ')
|
240 |
st.markdown('#### context-based answer from sources:')
|
241 |
+
output = lc_index.query_with_sources(query + ' Let\'s work this out in a step by step way to be sure we have the right answer.' ) #zero-shot in-context prompting from Zhou+22, Kojima+22
|
242 |
st.markdown(output['answer'])
|
243 |
opstr = '#### Primary sources: \n'
|
244 |
st.markdown(opstr)
|