import streamlit as st
import numpy as np
from html import escape
import torch
from transformers import AutoModel, AutoTokenizer
text_tokenizer = AutoTokenizer.from_pretrained('arman-aminian/clip-text-farsi')
text_encoder = AutoModel.from_pretrained('arman-aminian/clip-text-farsi').eval()
image_embeddings = torch.load('image_embeddings.pt')
image_links = np.load('image_links (1).npy', allow_pickle=True)
def image_search(query, top_k=10):
with torch.no_grad():
text_embedding = text_encoder(**text_tokenizer(query, return_tensors='pt')).pooler_output
_, indices = torch.cosine_similarity(image_embeddings, text_embedding).sort(descending=True)
return [image_links[i] for i in indices[:top_k]]
def get_html(url_list):
html = "
"
for url in url_list:
html2 = f"
"
html = html + html2
html += "
"
return html
def main():
st.markdown('''
''',
unsafe_allow_html=True)
st.sidebar.markdown('''
# FARSI IMAGE SEARCH
Enter the query. We search for you among [25 thousand photos](https://unsplash.com/) and bring you the most relevant photos.
The thing about this model is that it searches for you among the raw photos and these photos do not have explanations next to them. For a better understanding, we will give some examples of the applications that this model can have
- For example, you can search in Farsi in your phone's photo gallery
- When you are writing a blog or any text, you can put related photos between your texts. In this way, you give the paragraph for which you want a photo to the model, so that it will find a photo related to it for you
Note: We used a small collection (25 thousand) of images to keep this program real-time, but obviously the quality of the image search depends heavily on the size of the image database and this version is just an initial demo to familiarize you with the model.
''')
_, c, _ = st.columns((1, 3, 1))
query = c.text_input('Search here')
if len(query) > 0:
results = image_search(query)
st.markdown(get_html(results), unsafe_allow_html=True)
if __name__ == '__main__':
main()