ryanrwatkins commited on
Commit
ba47ef6
1 Parent(s): 8298332

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -90,7 +90,7 @@ current_dir = os.getcwd()
90
 
91
  prompt_templates = {"All Needs Experts": "Respond as if you are combination of all needs assessment experts."}
92
  actor_description = {"All Needs Experts": "<div style='float: left;margin: 0px 5px 0px 5px;'><img src='https://na.weshareresearch.com/wp-content/uploads/2023/04/experts2.jpg' alt='needs expert image' style='width:70px;align:top;'></div>A combination of all needs assessment experts."}
93
- prompt_template_name = []
94
 
95
 
96
  def get_empty_state():
@@ -184,7 +184,7 @@ langchain_document_loader()
184
 
185
  text_splitter = RecursiveCharacterTextSplitter(
186
  separators = ["\n\n", "\n", " ", ""],
187
- chunk_size = 1500,
188
  chunk_overlap= 50
189
  )
190
 
@@ -284,7 +284,7 @@ if create_vectorstores:
284
  documents = chunks,
285
  vectorstore_name="Vit_All_HF_Embeddings"
286
  )
287
- print("vector_store_HF:",vector_store_HF._collection.count(),"chunks.", prompt_template_name)
288
 
289
  print("")
290
 
@@ -834,8 +834,8 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
834
 
835
 
836
  completion = chain.invoke({"question":prompt})
837
- print("completion")
838
- print(completion)
839
  #chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff")
840
  #completion = chain.run(input_documents=docs, question=query)
841
 
 
90
 
91
  prompt_templates = {"All Needs Experts": "Respond as if you are combination of all needs assessment experts."}
92
  actor_description = {"All Needs Experts": "<div style='float: left;margin: 0px 5px 0px 5px;'><img src='https://na.weshareresearch.com/wp-content/uploads/2023/04/experts2.jpg' alt='needs expert image' style='width:70px;align:top;'></div>A combination of all needs assessment experts."}
93
+
94
 
95
 
96
  def get_empty_state():
 
184
 
185
  text_splitter = RecursiveCharacterTextSplitter(
186
  separators = ["\n\n", "\n", " ", ""],
187
+ chunk_size = 1000,
188
  chunk_overlap= 50
189
  )
190
 
 
284
  documents = chunks,
285
  vectorstore_name="Vit_All_HF_Embeddings"
286
  )
287
+ print("vector_store_HF:",vector_store_HF._collection.count(),"chunks.")
288
 
289
  print("")
290
 
 
834
 
835
 
836
  completion = chain.invoke({"question":prompt})
837
+ #print("completion")
838
+ #print(completion)
839
  #chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff")
840
  #completion = chain.run(input_documents=docs, question=query)
841