ishaan-mital commited on
Commit
03949e5
1 Parent(s): 699e374

requirement.txt

Browse files
Files changed (2) hide show
  1. app.py +9 -7
  2. requirements.txt +2 -1
app.py CHANGED
@@ -3,19 +3,21 @@ import os
3
  import pinecone
4
  import time
5
  # from torch import cuda
6
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
7
  # import PyPDF2
8
  # import re
9
  from langchain.vectorstores import Pinecone
 
10
 
11
- embed_model_id = 'sentence-transformers/all-MiniLM-L6-v2'
 
12
  # device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
13
 
14
- embed_model = HuggingFaceEmbeddings(
15
- model_name=embed_model_id,
16
- # model_kwargs={'device': device},
17
- # encode_kwargs={'device': device, 'batch_size': 32}
18
- )
19
 
20
  # get API key from app.pinecone.io and environment from console
21
  pinecone.init(
 
3
  import pinecone
4
  import time
5
  # from torch import cuda
6
+ # from langchain.embeddings.huggingface import HuggingFaceEmbeddings
7
  # import PyPDF2
8
  # import re
9
  from langchain.vectorstores import Pinecone
10
+ from sentence_transformers import SentenceTransformer
11
 
12
+
13
+ embed_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
14
  # device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
15
 
16
+ # embed_model = HuggingFaceEmbeddings(
17
+ # model_name=embed_model_id,
18
+ # # model_kwargs={'device': device},
19
+ # # encode_kwargs={'device': device, 'batch_size': 32}
20
+ # )
21
 
22
  # get API key from app.pinecone.io and environment from console
23
  pinecone.init(
requirements.txt CHANGED
@@ -6,4 +6,5 @@ langchain==0.0.240
6
  datasets==2.14.0
7
  accelerate==0.21.0
8
  einops==0.6.1
9
- faiss-cpu
 
 
6
  datasets==2.14.0
7
  accelerate==0.21.0
8
  einops==0.6.1
9
+ faiss-cpu
10
+ sentence_transformers