Manglik-R commited on
Commit
067a3d4
1 Parent(s): 6764b27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -9
app.py CHANGED
@@ -3,21 +3,18 @@ from langchain.llms import Replicate
3
  from langchain.vectorstores import Pinecone
4
  from langchain.text_splitter import CharacterTextSplitter
5
  from langchain.document_loaders import PyPDFLoader
 
 
6
  from langchain.embeddings import HuggingFaceEmbeddings
7
  from langchain.chains import ConversationalRetrievalChain
8
  from datasets import load_dataset
9
  import os
10
- import pinecone
11
 
12
 
13
  key = os.environ.get('API')
14
- yeh = os.environ.get('pineapi')
15
  os.environ["REPLICATE_API_TOKEN"] = key
16
- pinecone.init(api_key=yeh, environment='gcp-starter')
17
-
18
 
19
  import sentence_transformers
20
- import faiss
21
 
22
  def loading_pdf():
23
  return "Loading..."
@@ -31,9 +28,8 @@ def pdf_changes(pdf_doc):
31
 
32
  embeddings = HuggingFaceEmbeddings()
33
 
34
- index_name = "chatbot"
35
- index = pinecone.Index(index_name)
36
- vectordb = Pinecone.from_documents(texts, embeddings, index_name=index_name)
37
 
38
  llm = Replicate(
39
  model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
@@ -42,7 +38,7 @@ def pdf_changes(pdf_doc):
42
  global qa
43
  qa = ConversationalRetrievalChain.from_llm(
44
  llm,
45
- vectordb.as_retriever(search_kwargs={'k': 2}),
46
  return_source_documents=True
47
  )
48
  return "Ready"
 
3
  from langchain.vectorstores import Pinecone
4
  from langchain.text_splitter import CharacterTextSplitter
5
  from langchain.document_loaders import PyPDFLoader
6
+ from langchain.llms import HuggingFaceHub
7
+ from langchain.vectorstores import Chroma
8
  from langchain.embeddings import HuggingFaceEmbeddings
9
  from langchain.chains import ConversationalRetrievalChain
10
  from datasets import load_dataset
11
  import os
 
12
 
13
 
14
  key = os.environ.get('API')
 
15
  os.environ["REPLICATE_API_TOKEN"] = key
 
 
16
 
17
  import sentence_transformers
 
18
 
19
  def loading_pdf():
20
  return "Loading..."
 
28
 
29
  embeddings = HuggingFaceEmbeddings()
30
 
31
+ db = Chroma.from_documents(texts, embeddings)
32
+ retriever = db.as_retriever(search_kwargs={'k': 2})
 
33
 
34
  llm = Replicate(
35
  model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
 
38
  global qa
39
  qa = ConversationalRetrievalChain.from_llm(
40
  llm,
41
+ retriever,
42
  return_source_documents=True
43
  )
44
  return "Ready"