Andy Lau commited on
Commit
52e39b5
1 Parent(s): fbb81e4

wip app.py

Browse files
Files changed (1) hide show
  1. app.py +180 -1
app.py CHANGED
@@ -1,7 +1,56 @@
 
1
  import streamlit as st
2
  import numpy as np
3
  import pandas as pd
4
  import PIL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  # ---- Title Screen -----------
7
  st.title('Image Optimization: Email Industry')
@@ -13,8 +62,138 @@ st.image(img)
13
 
14
  st.markdown('Adding an image to an email campaign that will provide optimal engagement metrics can be challenging. How do you know which image to upload to your HTML, that will make an impact or significantly move the needle? And why would this image garner the best engagement? This model seeks to help campaign engineers understand which images affect their user engagement rate the most. The specific model is implemented using ResNet 18 and ResNet 34 for image embeddings extraction, and then we used these image embeddings as further inputs into a Gradient Boosted Tree model to generate probabilities on a user-specified target variable. The base model was adapted to car images and accurately predicted the user engagement rates with 91% accuracy. This model is adaptable for any large-scale marketing campaign using images. This model will identify the best images for optimal engagement for an email marketing campaign and serve engagement metrics prior to campaign launch. The model serves up several different images in milliseconds, so the campaign engineer understands which image to select in the campaign for optimized engagement.')
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
 
17
 
 
 
 
 
18
 
 
 
 
 
 
 
 
19
 
20
- #
 
1
+ from tkinter.tix import COLUMN
2
  import streamlit as st
3
  import numpy as np
4
  import pandas as pd
5
  import PIL
6
+ import torch
7
+ import pickle
8
+ import boto3
9
+
10
+ # import time
11
+ import torchvision.transforms as transforms
12
+
13
+ class SaveFeatures():
14
+ features=None
15
+ def __init__(self, m):
16
+ self.hook = m.register_forward_hook(self.hook_fn)
17
+ self.features = None
18
+ def hook_fn(self, module, input, output):
19
+ out = output.detach().cpu().numpy()
20
+ if isinstance(self.features, type(None)):
21
+ self.features = out
22
+ else:
23
+ self.features = np.row_stack((self.features, out))
24
+ def remove(self):
25
+ self.hook.remove()
26
+
27
+
28
+ def read_image_from_s3(bucket, key, region_name='us-west-2'):
29
+ """Load image file from s3.
30
+
31
+ Parameters
32
+ ----------
33
+ bucket: string
34
+ Bucket name
35
+ key : string
36
+ Path in s3
37
+
38
+ Returns
39
+ -------
40
+ np array
41
+ Image array
42
+ """
43
+ s3 = boto3.resource('s3', region_name=region_name)
44
+ bucket = s3.Bucket(bucket)
45
+ object = bucket.Object(key)
46
+ response = object.get()
47
+ file_stream = response['Body']
48
+ im = Image.open(file_stream).convert('RGB')
49
+ return im
50
+
51
+
52
+ def make_predictions():
53
+ print("hi")
54
 
55
  # ---- Title Screen -----------
56
  st.title('Image Optimization: Email Industry')
 
62
 
63
  st.markdown('Adding an image to an email campaign that will provide optimal engagement metrics can be challenging. How do you know which image to upload to your HTML, that will make an impact or significantly move the needle? And why would this image garner the best engagement? This model seeks to help campaign engineers understand which images affect their user engagement rate the most. The specific model is implemented using ResNet 18 and ResNet 34 for image embeddings extraction, and then we used these image embeddings as further inputs into a Gradient Boosted Tree model to generate probabilities on a user-specified target variable. The base model was adapted to car images and accurately predicted the user engagement rates with 91% accuracy. This model is adaptable for any large-scale marketing campaign using images. This model will identify the best images for optimal engagement for an email marketing campaign and serve engagement metrics prior to campaign launch. The model serves up several different images in milliseconds, so the campaign engineer understands which image to select in the campaign for optimized engagement.')
64
 
65
+ uploaded_file = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
66
+
67
+ if uploaded_file is not None:
68
+ upload_img = PIL.Image.open(uploaded_file)
69
+ st.image(upload_img, caption='Uploaded Image', width=300)
70
+ else:
71
+ upload_img = None
72
+ # st.write("")
73
+ # st.write("Classifying...")
74
+ # label = predict_label(image)
75
+ # st.write('%s (%.2f%%)' % (label[0], label[1]*100))
76
+
77
+
78
+ # Drop down menu
79
+
80
+ target_variables = ['Open Rate',
81
+ 'Click Through Open Rate',
82
+ 'Revenue Generated per Email',
83
+ 'Conversion Rate']
84
+ campaign_types = ['Abandoned Cart',
85
+ 'Newsletter',
86
+ 'Promotional',
87
+ 'Survey',
88
+ 'Transactional',
89
+ 'Webinar',
90
+ 'Engagement',
91
+ 'Review_Request',
92
+ 'Product_Announcement']
93
+
94
+ industry_types =['Energy',
95
+ 'Entertainment',
96
+ 'Finance and Banking',
97
+ 'Healthcare',
98
+ 'Hospitality',
99
+ 'Real Estate', 'Retail', 'Software and Technology']
100
+
101
+
102
+ target = st.selectbox('Target Variables',target_variables, index=0)
103
+ campaign = st.selectbox('Campaign Types',campaign_types, index=0)
104
+ industry = st.selectbox('Industry Types',industry_types, index=0)
105
+
106
+
107
+ if st.button('Generate Predictions'):
108
+ if upload_img is None:
109
+ st.error('Please upload an image')
110
+ else:
111
+ placeholder = st.empty()
112
+ placeholder.write("Loading Data...")
113
+
114
+ # Starting Predictions
115
+
116
+ data = pd.read_csv('data/wrangled_data_v2.csv', index_col=0)
117
+ data_mod = data.copy()
118
+ data_mod = data[(data.campain_type == campaign) & (data.industry == industry)]
119
+
120
+ embeddings_df = pd.read_csv('data/embeddings_df.csv',index_col=0)
121
+ embeddings_df = embeddings_df.iloc[data.index]
122
+
123
+
124
+ # Transform to tensor
125
+ # transforming user input PIL Image to tensor
126
+
127
+ # single_img_path = list(uploaded_image.value.keys())[0]
128
+ single_image = upload_img.convert('RGB') # converting grayscale images to RGB
129
+ # st.image(single_image, caption='Uploaded Image', width=300)
130
+
131
+ my_transforms = transforms.Compose([
132
+ transforms.Resize((224,224)),
133
+ transforms.ToTensor()
134
+ ])
135
+
136
+ image_tensor = my_transforms(single_image).unsqueeze(0) # transforming into tensor, unsqueeze to match input batch size dimensions
137
+
138
+
139
+
140
+ placeholder.write('Loading Model...')
141
+
142
+ model_path = 'model/my_checkpoint1.pth'
143
+ model = torch.load(model_path,map_location=torch.device('cpu'))
144
+ model.eval()
145
+ image_imbeddings = SaveFeatures(list(model._modules.items())[-1][1])
146
+
147
+ with torch.no_grad():
148
+ outputs = model(image_tensor) # switched for cpu: image_tensor.cuda() (no cuda)
149
+ img_embeddings = image_imbeddings.features[0]
150
+
151
+
152
+ xgb_model = pickle.load(open("model/xgb_grid_model.pkl", "rb"))
153
+ col_names = ['Abarth', 'Cab', 'Convertible', 'Coupe', 'GS', 'Hatchback', 'IPL', 'Minivan', 'R', 'SRT-8', 'SRT8', 'SS', 'SUV', 'Sedan', 'SuperCab', 'Superleggera', 'Type-S', 'Van', 'Wagon', 'XKR', 'Z06', 'ZR1']
154
+ img_df = pd.DataFrame([img_embeddings], columns=col_names)
155
+
156
+ #####
157
+ # Getting Probabilities for Subsetted Dataframe
158
+ full_df_probs = xgb_model.predict_proba(embeddings_df)
159
+ full_df_probs = [i[1] for i in full_df_probs]
160
+ prob_series = pd.Series(full_df_probs, index= embeddings_df.index)
161
+
162
+ # 2 from each
163
+ top_10 = prob_series.sort_values(ascending=False)[:20]
164
+ random_4_from_top_10 = top_10.sample(replace=False,n=2)
165
+
166
+ # 2 from top 10 to 100
167
+ top_10_100 = prob_series.sort_values(ascending=False)[20:100]
168
+ random_4_from_top_10_100 = top_10_100.sample(replace=False,n=2)
169
+
170
+ alternate_probs = pd.concat([random_4_from_top_10, random_4_from_top_10_100], axis=0)
171
+
172
+ ######
173
+ # Making predictions on user input and displaying results:
174
+ img_pred = xgb_model.predict(img_df)[0]
175
+ img_proba = xgb_model.predict_proba(img_df)[0][1]
176
+
177
+ ######
178
+ # making dictionary for max probability for recommendation
179
+ max_prob_dict = {}
180
+ max_prob_dict['current_image'] = img_proba
181
+ for i in range(len(alternate_probs)):
182
+ max_prob_dict['Alternate Image '+ str(i+1)] = alternate_probs.values[i]
183
 
184
+ st.write('Below are the probabilities if alternate recommended images were used')
185
 
186
+ img_index = alternate_probs.index[0]
187
+ img_path = data.iloc[img_index][0]
188
+ bucket = 'sagemaker-us-west-2-647020561811'
189
+ key = 'sagemaker/Marlov-Image/'
190
 
191
+ s3 = boto3.resource('s3')
192
+ bucket = s3.Bucket(bucket)
193
+ # for obj in bucket.objects.filter(Prefix=key):
194
+ for obj in bucket.objects.all():
195
+ key = obj.key
196
+ body = obj.get()['Body'].read()
197
+ # alt_img = read_image_from_s3(bucket,key,img_path)
198
 
199
+ placeholder.empty()