ryanrwatkins commited on
Commit
f478057
1 Parent(s): 25279a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -123,7 +123,7 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
123
  embeddings = OpenAIEmbeddings()
124
 
125
 
126
- history = state['question']
127
 
128
  if not prompt:
129
  return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']}", state
@@ -161,7 +161,7 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
161
 
162
  docsearch = FAISS.from_texts(texts, embeddings)
163
  #query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
164
- query = str(system_prompt + history[0] + [prompt_msg])
165
  docs = docsearch.similarity_search(query)
166
  #print(docs[0].page_content)
167
 
@@ -171,9 +171,12 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
171
 
172
  # VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=docsearch, return_source_documents=True)
173
  # https://colab.research.google.com/drive/1dzdNDZyofRB0f2KIB4gHXmIza7ehMX30?usp=sharing#scrollTo=b-ejDn_JfpWW
 
 
 
174
 
175
- history.append(prompt_msg.copy())
176
- history.append(completion.copy())
177
  #history.append(completion.choices[0].message.to_dict())
178
  #history.append(completion["result"].choices[0].message.to_dict())
179
 
@@ -189,6 +192,8 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
189
 
190
  total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
191
 
 
 
192
  chat_messages = [(prompt_msg['content'], completion['content'])]
193
  #chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
194
  #chat_messages = [(history[-2]['content'], history[-1]['content'])]
 
123
  embeddings = OpenAIEmbeddings()
124
 
125
 
126
+ history = state['messages']
127
 
128
  if not prompt:
129
  return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']}", state
 
161
 
162
  docsearch = FAISS.from_texts(texts, embeddings)
163
  #query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
164
+ query = str(system_prompt + history + [prompt_msg])
165
  docs = docsearch.similarity_search(query)
166
  #print(docs[0].page_content)
167
 
 
171
 
172
  # VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=docsearch, return_source_documents=True)
173
  # https://colab.research.google.com/drive/1dzdNDZyofRB0f2KIB4gHXmIza7ehMX30?usp=sharing#scrollTo=b-ejDn_JfpWW
174
+
175
+ get_empty_state()
176
+ state.append(completion.copy())
177
 
178
+ #history.append(prompt_msg.copy())
179
+ #history.append(completion.copy())
180
  #history.append(completion.choices[0].message.to_dict())
181
  #history.append(completion["result"].choices[0].message.to_dict())
182
 
 
192
 
193
  total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
194
 
195
+
196
+
197
  chat_messages = [(prompt_msg['content'], completion['content'])]
198
  #chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
199
  #chat_messages = [(history[-2]['content'], history[-1]['content'])]