Th3BossC commited on
Commit
e929e0b
β€’
1 Parent(s): 1275a3a

commented gpt model

Browse files
chatBot/common/chatgpt.py CHANGED
@@ -1,43 +1,43 @@
1
- import os
2
- from chatBot.common.pdfToText import loadLatestPdf
3
- os.environ["OPENAI_API_KEY"] = "INSERTYOUROWNAPIKEYHERE"
4
- from langchain.document_loaders import PyPDFLoader
5
- from langchain.text_splitter import CharacterTextSplitter
6
- import pickle
7
- import faiss
8
- from langchain.vectorstores import FAISS
9
- from langchain.embeddings import OpenAIEmbeddings
10
- from langchain.chains import RetrievalQAWithSourcesChain
11
- from langchain.chains.question_answering import load_qa_chain
12
- from langchain import OpenAI
13
 
14
- urls = [
15
- 'http://en.espn.co.uk/f1/motorsport/story/3836.html', 'https://www.mercedes-amg-hpp.com/formula-1-engine-facts/#' , 'https://www.redbullracing.com/int-en/five-things-about-yas-marina' , 'https://www.redbull.com/gb-en/history-of-formula-1'
16
- , 'https://www.formula1.com/en/information.abu-dhabi-yas-marina-circuit-yas-island.4YtOtpaWvaxWvDBTItP7s6.html']
17
 
18
 
19
- data = loadLatestPdf()
20
 
21
 
22
- text_splitter = CharacterTextSplitter(separator='\n',
23
- chunk_size=1000,
24
- chunk_overlap=200)
25
 
26
 
27
- docs = text_splitter.split_documents(data)
28
 
29
 
30
- embeddings = OpenAIEmbeddings()
31
 
32
- vectorStore1_openAI = FAISS.from_documents(docs, embeddings)
33
 
34
- with open("faiss_store_openai.pkl", "wb") as f:
35
- pickle.dump(vectorStore1_openAI, f)
36
 
37
- with open("faiss_store_openai.pkl", "rb") as f:
38
- VectorStore = pickle.load(f)
39
 
40
 
41
- llm=OpenAI(temperature=0.8, verbose = True)
42
 
43
- gptModel = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=VectorStore.as_retriever())
 
1
+ # import os
2
+ # from chatBot.common.pdfToText import loadLatestPdf
3
+ # os.environ["OPENAI_API_KEY"] = "INSERTYOUROWNAPIKEYHERE"
4
+ # from langchain.document_loaders import PyPDFLoader
5
+ # from langchain.text_splitter import CharacterTextSplitter
6
+ # import pickle
7
+ # import faiss
8
+ # from langchain.vectorstores import FAISS
9
+ # from langchain.embeddings import OpenAIEmbeddings
10
+ # from langchain.chains import RetrievalQAWithSourcesChain
11
+ # from langchain.chains.question_answering import load_qa_chain
12
+ # from langchain import OpenAI
13
 
14
+ # urls = [
15
+ # 'http://en.espn.co.uk/f1/motorsport/story/3836.html', 'https://www.mercedes-amg-hpp.com/formula-1-engine-facts/#' , 'https://www.redbullracing.com/int-en/five-things-about-yas-marina' , 'https://www.redbull.com/gb-en/history-of-formula-1'
16
+ # , 'https://www.formula1.com/en/information.abu-dhabi-yas-marina-circuit-yas-island.4YtOtpaWvaxWvDBTItP7s6.html']
17
 
18
 
19
+ # data = loadLatestPdf()
20
 
21
 
22
+ # text_splitter = CharacterTextSplitter(separator='\n',
23
+ # chunk_size=1000,
24
+ # chunk_overlap=200)
25
 
26
 
27
+ # docs = text_splitter.split_documents(data)
28
 
29
 
30
+ # embeddings = OpenAIEmbeddings()
31
 
32
+ # vectorStore1_openAI = FAISS.from_documents(docs, embeddings)
33
 
34
+ # with open("faiss_store_openai.pkl", "wb") as f:
35
+ # pickle.dump(vectorStore1_openAI, f)
36
 
37
+ # with open("faiss_store_openai.pkl", "rb") as f:
38
+ # VectorStore = pickle.load(f)
39
 
40
 
41
+ # llm=OpenAI(temperature=0.8, verbose = True)
42
 
43
+ # gptModel = RetrievalQAWithSourcesChain.from_llm(llm=llm, retriever=VectorStore.as_retriever())
chatBot/common/utils.py CHANGED
@@ -1,4 +1,4 @@
1
- from chatBot.common.chatgpt import gptModel
2
  from chatBot.common.llama import llamaModel
3
 
4
 
@@ -6,4 +6,4 @@ def getAnswerLlama(question):
6
  return llamaModel(question)
7
 
8
  def getAnswerGpt(question):
9
- return gptModel({'question' : question}, return_only_outputs = True)['answer']
 
1
+ # from chatBot.common.chatgpt import gptModel
2
  from chatBot.common.llama import llamaModel
3
 
4
 
 
6
  return llamaModel(question)
7
 
8
  def getAnswerGpt(question):
9
+ return "answer"