ryanrwatkins commited on
Commit
667b2dc
1 Parent(s): 9f50f0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -10,8 +10,10 @@ import glob
10
  from langchain.embeddings.openai import OpenAIEmbeddings
11
  from langchain.vectorstores import Chroma
12
  from langchain.text_splitter import TokenTextSplitter
13
- from langchain.llms import OpenAI
14
- from langchain.chains import ChatVectorDBChain
 
 
15
  from langchain.document_loaders import PyPDFLoader
16
 
17
  # Use Chroma in Colab to create vector embeddings, I then saved them to HuggingFace so now I have to set it use them here.
@@ -100,7 +102,7 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
100
 
101
  # completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
102
 
103
- completion = ChatVectorDBChain.from_llm(OpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), vectordb, return_source_documents=True)
104
  result = completion({"question": system_prompt + [prompt_msg], "chat_history": history[-context_length*2:]})
105
  # from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
106
 
 
10
  from langchain.embeddings.openai import OpenAIEmbeddings
11
  from langchain.vectorstores import Chroma
12
  from langchain.text_splitter import TokenTextSplitter
13
+ #from langchain.llms import OpenAI
14
+ from langchain.chat_models import ChatOpenAI
15
+ #from langchain.chains import ChatVectorDBChain
16
+ from langchain.chains import RetrievalQA
17
  from langchain.document_loaders import PyPDFLoader
18
 
19
  # Use Chroma in Colab to create vector embeddings, I then saved them to HuggingFace so now I have to set it use them here.
 
102
 
103
  # completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
104
 
105
+ completion = RetrievalQA.from_chain_type(llm=ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff", retriever=vectordb.as_retriever() , return_source_documents=True)
106
  result = completion({"question": system_prompt + [prompt_msg], "chat_history": history[-context_length*2:]})
107
  # from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
108