Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import errors
Browse files- appStore/__pycache__/search.cpython-310.pyc +0 -0
- appStore/embed.py +0 -1
- appStore/search.py +2 -2
appStore/__pycache__/search.cpython-310.pyc
ADDED
Binary file (1.15 kB). View file
|
|
appStore/embed.py
CHANGED
@@ -4,7 +4,6 @@ from torch import cuda
|
|
4 |
import streamlit as st
|
5 |
from langchain_huggingface import HuggingFaceEmbeddings
|
6 |
from appStore.prep_utils import get_client
|
7 |
-
from qdrant_client.http import models
|
8 |
|
9 |
# get the device to be used eithe gpu or cpu
|
10 |
device = 'cuda' if cuda.is_available() else 'cpu'
|
|
|
4 |
import streamlit as st
|
5 |
from langchain_huggingface import HuggingFaceEmbeddings
|
6 |
from appStore.prep_utils import get_client
|
|
|
7 |
|
8 |
# get the device to be used eithe gpu or cpu
|
9 |
device = 'cuda' if cuda.is_available() else 'cpu'
|
appStore/search.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
from appStore.prep_utils import get_client
|
2 |
from langchain_qdrant import FastEmbedSparse, RetrievalMode
|
3 |
from torch import cuda
|
|
|
|
|
4 |
# get the device to be used eithe gpu or cpu
|
5 |
device = 'cuda' if cuda.is_available() else 'cpu'
|
6 |
|
7 |
-
import streamlit as st
|
8 |
-
from langchain_huggingface import HuggingFaceEmbeddings
|
9 |
|
10 |
def hybrid_search(client, query, collection_name):
|
11 |
embeddings = HuggingFaceEmbeddings(
|
|
|
1 |
from appStore.prep_utils import get_client
|
2 |
from langchain_qdrant import FastEmbedSparse, RetrievalMode
|
3 |
from torch import cuda
|
4 |
+
from qdrant_client.http import models
|
5 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
6 |
# get the device to be used eithe gpu or cpu
|
7 |
device = 'cuda' if cuda.is_available() else 'cpu'
|
8 |
|
|
|
|
|
9 |
|
10 |
def hybrid_search(client, query, collection_name):
|
11 |
embeddings = HuggingFaceEmbeddings(
|