File size: 2,694 Bytes
fac94f4
 
 
 
 
 
 
 
3690df0
8f3f09e
 
 
 
 
 
 
 
 
fac94f4
23d7305
 
 
 
 
 
 
b9535f2
 
23d7305
fac94f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3690df0
fac94f4
 
 
3690df0
 
fac94f4
 
 
 
3690df0
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
# -*- coding: utf-8 -*-
"""HW3.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1H-R9L74rpYOoQJOnTLLbUpcNpd9Tty_D
"""
import streamlit as st
# import tensorflow as tf
# from sklearn.datasets import load_sample_image
# import os
# import tensorflow.keras.applications.resnet50 as resnet50
# from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
# from tensorflow.keras.preprocessing.image import load_img, img_to_array
# import numpy as np
# from PIL import Image
# from sklearn.neighbors import NearestNeighbors

st.markdown("This is a image classification program, please enter the image that you would like to process.")
st.markdown("Please keep in mind that the dataset is very small (around 100-2000 imgs only")
path = ['Coretta_Scott_King','Saddam_Hussein','Augustin_Calleri','Peter_Hunt']

select_path = st.selectbox('Which of the three photos would you like to process', options = path)
st.write("You've select", select_path)

!wget http://vis-www.cs.umass.edu/lfw/lfw.tgz
!tar -xvf /content/lfw.tgz

directory = '/content/lfw'
model = resnet50.ResNet50(weights='imagenet', include_top=False, pooling='avg')
feature_dict = {}
image_files = []
target_size = (224, 224)
i = 0

# Sample at most 2000 images because the whole entire dataset 
# costs too much cpu power and ram

def preprocess_image(image_path, target_size):
    img = load_img(os.path.join(directory,image_path),target_size=target_size)
    x = img_to_array(img)
    x = tf.expand_dims(x, axis = 0)
    x = preprocess_input(x)
    features = model.predict(x)
    return features

for dir in os.listdir(directory):
    i += 1
    new_dir = '/content/lfw/'+dir
    if os.path.isdir(new_dir):
        for files in os.listdir(new_dir):
            feature_dict[dir] = preprocess_image(new_dir+'/'+files, target_size).flatten()
    if i >= 100:
        break

# for file, features in feature_dict.items():
#     print(file, features)

feature_map = np.array(list(feature_dict.values()))

NearNeigh = NearestNeighbors(n_neighbors=10,algorithm='auto').fit(feature_map)
img = feature_dict[select_path].reshape(1,-1)
distance,indices = NearNeigh.kneighbors(img)
st.write('Similar images for', select_path)
for i,index in enumerate(indices[0]):
    similar_img_path = list(feature.keys())[index]
    print(i+1,similar_img_path)
# for image_path in feature_dict:
#   img = feature_dict[image_path].reshape(1,-1)
#   distance,indices = NearNeigh.kneighbors(img)
#   print('Similar images for', image_path)
#   for i, index in enumerate(indices[0]):
#     similar_img_path = list(feature_dict.keys())[index]
#     print(i+1,similar_img_path)