ryanrwatkins commited on
Commit
25279a1
1 Parent(s): 53e540a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -123,7 +123,7 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
123
  embeddings = OpenAIEmbeddings()
124
 
125
 
126
- history = state['messages']
127
 
128
  if not prompt:
129
  return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']}", state
@@ -160,8 +160,8 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
160
  # new_docsearch = pickle.load(f)
161
 
162
  docsearch = FAISS.from_texts(texts, embeddings)
163
- query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
164
-
165
  docs = docsearch.similarity_search(query)
166
  #print(docs[0].page_content)
167
 
@@ -188,9 +188,9 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
188
  history.append(error.copy())
189
 
190
  total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
191
-
192
- chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
193
-
194
  #chat_messages = [(history[-2]['content'], history[-1]['content'])]
195
 
196
  return '', chat_messages, total_tokens_used_msg, state
 
123
  embeddings = OpenAIEmbeddings()
124
 
125
 
126
+ history = state['question']
127
 
128
  if not prompt:
129
  return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']}", state
 
160
  # new_docsearch = pickle.load(f)
161
 
162
  docsearch = FAISS.from_texts(texts, embeddings)
163
+ #query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
164
+ query = str(system_prompt + history[0] + [prompt_msg])
165
  docs = docsearch.similarity_search(query)
166
  #print(docs[0].page_content)
167
 
 
188
  history.append(error.copy())
189
 
190
  total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
191
+
192
+ chat_messages = [(prompt_msg['content'], completion['content'])]
193
+ #chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
194
  #chat_messages = [(history[-2]['content'], history[-1]['content'])]
195
 
196
  return '', chat_messages, total_tokens_used_msg, state