rishikaboinapally commited on
Commit
bbff3bb
1 Parent(s): 0221965

Add application file

Browse files
Files changed (3) hide show
  1. .gitignore +19 -0
  2. main.py +75 -0
  3. requirements.txt +6 -0
.gitignore ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python specific
2
+ *.pyc
3
+ __pycache__/
4
+
5
+ # Virtual environment files (if using virtualenv or similar)
6
+ venv/
7
+ env/
8
+
9
+ # Model checkpoints or large data files
10
+ *.h5
11
+ *.pkl
12
+
13
+ # IDE files
14
+ .vscode/
15
+ .idea/
16
+
17
+ # Gradio uploads folder
18
+ uploads/
19
+
main.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import pickle
4
+ import tensorflow as tf
5
+ from PIL import Image
6
+ from tensorflow.keras.preprocessing import image
7
+ from tensorflow.keras.layers import GlobalMaxPooling2D
8
+ from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
9
+ from sklearn.neighbors import NearestNeighbors
10
+ from numpy.linalg import norm
11
+ import gradio as gr
12
+
13
+ # Function to load embeddings and filenames
14
+ def load_data():
15
+ # Load embeddings and filenames from the same directory as main.py
16
+ embeddings_path = os.path.join(os.path.dirname(__file__), 'embeddings.pkl')
17
+ filenames_path = os.path.join(os.path.dirname(__file__), 'filenames.pkl')
18
+
19
+ feature_list = np.array(pickle.load(open(embeddings_path, 'rb')))
20
+ filenames = pickle.load(open(filenames_path, 'rb'))
21
+
22
+ return feature_list, filenames
23
+
24
+ # Load the feature list and filenames
25
+ feature_list, filenames = load_data()
26
+
27
+ # Load the pre-trained model
28
+ model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
29
+ model.trainable = False
30
+ model = tf.keras.Sequential([
31
+ model,
32
+ GlobalMaxPooling2D()
33
+ ])
34
+
35
+ def feature_extraction(img_path, model):
36
+ img = image.load_img(img_path, target_size=(224, 224))
37
+ img_array = image.img_to_array(img)
38
+ expanded_img_array = np.expand_dims(img_array, axis=0)
39
+ preprocessed_img = preprocess_input(expanded_img_array)
40
+ result = model.predict(preprocessed_img).flatten()
41
+ normalized_result = result / norm(result)
42
+ return normalized_result
43
+
44
+ def recommend(features, feature_list):
45
+ neighbors = NearestNeighbors(n_neighbors=6, algorithm='brute', metric='euclidean')
46
+ neighbors.fit(feature_list)
47
+ distances, indices = neighbors.kneighbors([features])
48
+ return indices
49
+
50
+ def resize_image(img, size):
51
+ img = img.resize(size, Image.LANCZOS)
52
+ return img
53
+
54
+ def predict(input_image):
55
+ img = input_image.convert("RGB") # Ensure image is in RGB mode
56
+ img_array = np.array(img)
57
+ expanded_img_array = np.expand_dims(img_array, axis=0)
58
+ preprocessed_img = preprocess_input(expanded_img_array)
59
+ features = model.predict(preprocessed_img).flatten()
60
+ normalized_features = features / norm(features)
61
+ indices = recommend(normalized_features, feature_list)
62
+ result_images = [filenames[idx] for idx in indices[0]]
63
+ resized_images = [resize_image(Image.open(img_path), (224, 224)) for img_path in result_images]
64
+ return resized_images
65
+
66
+ iface = gr.Interface(
67
+ fn=predict,
68
+ inputs=gr.Image(type="pil", label="Upload an image"),
69
+ outputs=[gr.Image(type="pil", label=f"Recommendation {i+1}") for i in range(5)],
70
+ title="Amazon Lens",
71
+ description="Upload an image of clothing, and the system will recommend similar items."
72
+ )
73
+
74
+ if __name__ == "__main__":
75
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio
2
+ scikit-learn=1.2.2
3
+ numpy=1.26.4
4
+ Pillow=9.5.0
5
+ tensorflow=2.15.0
6
+ pickleshare=0.7.5