ryanrwatkins commited on
Commit
44ab821
1 Parent(s): fb89906

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -15,6 +15,7 @@ from langchain.chat_models import ChatOpenAI
15
  #from langchain.chains import ChatVectorDBChain
16
  from langchain.chains import RetrievalQA
17
  from langchain.document_loaders import PyPDFLoader
 
18
 
19
  # Use Chroma in Colab to create vector embeddings, I then saved them to HuggingFace so now I have to set it use them here.
20
  #from chromadb.config import Settings
@@ -29,7 +30,7 @@ def get_empty_state():
29
 
30
 
31
  #Initial prompt template, others added below from TXT file
32
- prompt_templates = {"All Needs Gurus": "I want you to act as a needs assessment expert."}
33
 
34
  def download_prompt_templates():
35
  url = "https://huggingface.co/spaces/ryanrwatkins/needs/raw/main/gurus.txt"
@@ -94,7 +95,7 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
94
  if prompt_template:
95
  system_prompt = [{ "role": "system", "content": prompt_template }]
96
 
97
- prompt_msg = { "role": "user", "content": prompt }
98
 
99
 
100
  try:
@@ -102,14 +103,15 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
102
 
103
  # completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
104
 
105
- completion = RetrievalQA.from_chain_type(llm=ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff", retriever=vectordb.as_retriever() , return_source_documents=False)
106
- completion = completion({"query": system_prompt + history[-context_length*2:] + [prompt_msg] })
 
107
  # from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
108
 
109
  history.append(prompt_msg)
110
  history.append(completion.choices[0].message.to_dict())
111
 
112
- #state['total_tokens'] += completion['usage']['total_tokens']
113
 
114
  except Exception as e:
115
  history.append(prompt_msg)
 
15
  #from langchain.chains import ChatVectorDBChain
16
  from langchain.chains import RetrievalQA
17
  from langchain.document_loaders import PyPDFLoader
18
+ from langchain.chains.question_answering import load_qa_chain
19
 
20
  # Use Chroma in Colab to create vector embeddings, I then saved them to HuggingFace so now I have to set it use them here.
21
  #from chromadb.config import Settings
 
30
 
31
 
32
  #Initial prompt template, others added below from TXT file
33
+ prompt_templates = {"All Needs Experts": "I want you to act as a needs assessment expert."}
34
 
35
  def download_prompt_templates():
36
  url = "https://huggingface.co/spaces/ryanrwatkins/needs/raw/main/gurus.txt"
 
95
  if prompt_template:
96
  system_prompt = [{ "role": "system", "content": prompt_template }]
97
 
98
+ prompt_msg = [{ "role": "user", "content": prompt }]
99
 
100
 
101
  try:
 
103
 
104
  # completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
105
 
106
+ completion_chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff" )
107
+ completion = RetrievalQA(combine_documents_chain=completion_chain, retriever=vectordb.as_retriever())
108
+ completion = completion.run({"query": prompt_msg })
109
  # from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
110
 
111
  history.append(prompt_msg)
112
  history.append(completion.choices[0].message.to_dict())
113
 
114
+ state['total_tokens'] += completion['usage']['total_tokens']
115
 
116
  except Exception as e:
117
  history.append(prompt_msg)