|
import faiss |
|
import pickle |
|
import datasets |
|
import numpy as np |
|
import requests |
|
import streamlit as st |
|
from vector_engine.utils import vector_search |
|
from transformers import AutoModel, AutoTokenizer |
|
|
|
from datasets import load_dataset |
|
|
|
@st.cache |
|
def read_data(dataset_repo='dhmeltzer/asks_validation_embedded'): |
|
"""Read the data from huggingface.""" |
|
return load_dataset(dataset_repo) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@st.cache(allow_output_mutation=True) |
|
def load_faiss_index(path_to_faiss="./faiss_index_small.pickle"): |
|
"""Load and deserialize the Faiss index.""" |
|
with open(path_to_faiss, "rb") as h: |
|
data = pickle.load(h) |
|
return faiss.deserialize_index(data) |
|
|
|
def main(): |
|
|
|
data = read_data() |
|
|
|
|
|
faiss_index = load_faiss_index() |
|
|
|
import requests |
|
|
|
model_id="sentence-transformers/nli-distilbert-base" |
|
|
|
api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}" |
|
headers = {"Authorization": "Bearer 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"} |
|
|
|
def query(texts): |
|
response = requests.post(api_url, headers=headers, json={"inputs": texts, "options":{"wait_for_model":True}}) |
|
return response.json() |
|
|
|
|
|
st.title("Vector-based searches with Sentence Transformers and Faiss") |
|
|
|
|
|
user_input = st.text_area("Search box", "ELI5 Dataset") |
|
|
|
|
|
st.sidebar.markdown("**Filters**") |
|
|
|
filter_scores = st.sidebar.slider("Citations", 0, 250, 0) |
|
num_results = st.sidebar.slider("Number of search results", 1, 50, 1) |
|
|
|
vector = query([user_input]) |
|
|
|
if user_input: |
|
|
|
_, I = faiss_index.search(np.array(vector).astype("float32"), k=num_results) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
frame = data |
|
st.write(user_input) |
|
|
|
for id_ in I.flatten().tolist(): |
|
f = frame[id_] |
|
|
|
|
|
|
|
|
|
|
|
st.write( |
|
f"""**{f['title']}** |
|
**text**: {f['selftext']} |
|
""" |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|