from fashion_clip.fashion_clip import FashionCLIP import pickle import subprocess import streamlit as st import numpy as np from PIL import Image import os from streamlit_image_select import image_select os.environ["CUDA_VISIBLE_DEVICES"] ="" import torch torch.cuda.is_available = lambda : False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') st.sidebar.write("# Shoping muse") #query = st.sidebar.text_input("Enter some text", "A red dress") #prompt = st.chat_input("Say something") st.write("Shoping MUSE") def horizontal_scroll_images(images): with st.beta_container(): for img_path in images: st.image(img_path, use_column_width=True) def horizontal_scroll_images(images,image_width=300): cols = st.columns(len(images)) for col, img_path in zip(cols, images): col.image(img_path, use_column_width=True) #def horizontal_scroll_images(images, image_width=300): # cols = st.columns(len(images)) # for col, img_path in zip(cols, images): # col.image(img_path, width=image_width) new_size = (800, 600) # Set your desired width and height @st.cache_resource def load_embedding_file(): with open("embeddings_and_paths.pkl", "rb") as filino: data = pickle.load(filino) images = data["images_path"] embeddings = data["embeddings"] return images, embeddings fclip = FashionCLIP('fashion-clip') if not os.path.exists("clothing-dataset"): subprocess.run("git clone https://github.com/alexeygrigorev/clothing-dataset", shell=True) #st.write("## Simple FashionCLIP search engine") #query = st.text_input("Enter a description of the clothing item you want to find", "a red dress") #query = prompt images, image_embeddings = load_embedding_file() image_cnt=8 def append_message(sender, message): chat_history.append((sender, message)) def chatbot_interface(): st.sidebar.title("Chatbot Interface") user_input = st.sidebar.text_input("You:", key="user_input") if st.sidebar.button("Send"): append_message("You", user_input) # Replace the following line with your chatbot logic to generate a response append_message("Chatbot", f"Bot response to: {user_input}") query=user_input text_embedding = fclip.encode_text([query], 32)[0] arr=text_embedding.dot(image_embeddings.T) id_of_matched_object1=(-arr).argsort()[:image_cnt] id_of_matched_object = np.argmax(arr) image = Image.open(images[id_of_matched_object]) #st.image(image) image=[] for k in id_of_matched_object1: image.append(Image.open(images[k]).resize(new_size)) img = image_select( label="Results", images=image, captions=[str(query) + "result "] * (image_cnt), ) st.sidebar.markdown("---") # Display the chat history st.sidebar.title("Chat History") for sender, message in chat_history: st.sidebar.text(f"{sender}: {message}") # Initialize the chat history chat_history = [] # Main content area st.title("Muse Chatbot") # Display the chatbot interface inside a box in the sidebar st.sidebar.markdown("## Chatbot Box") #text_embedding = fclip.encode_text([query], 32)[0] #arr=text_embedding.dot(image_embeddings.T) #id_of_matched_object1=(-arr).argsort()[:image_cnt] #id_of_matched_object = np.argmax(arr) #image = Image.open(images[id_of_matched_object]) #st.image(image) #image=[] #for k in id_of_matched_object1: # image.append(Image.open(images[k]).resize(new_size)) #img = image_select( # label="Results", # images=image, # captions=[str(query) + "result "] * (image_cnt), #) #st.title("Horizontal Scroll of Images") # Specify the width of the images #image_width = 300 #horizontal_scroll_images(image) #print(image) #st.image(image , use_column_width=True, caption=["some generic text"] * (image_cnt)) chatbot_interface()