Datasets:
eustache-crto
commited on
Commit
•
607b465
1
Parent(s):
21d76f0
Upload 2 files
Browse files- Experiments.ipynb +725 -0
- criteo_attribution_dataset.tsv.gz +3 -0
Experiments.ipynb
ADDED
@@ -0,0 +1,725 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# Attribution Modeling Increases Efficiency of Bidding in Display Advertising\n",
|
8 |
+
"Eustache Diemert*, Julien Meynet* (Criteo Research), Damien Lefortier (Facebook), Pierre Galland (Criteo)\n",
|
9 |
+
"*authors contributed equally.\n",
|
10 |
+
"\n",
|
11 |
+
"This work was published in:\n",
|
12 |
+
"[2017 AdKDD & TargetAd Workshop, in conjunction with\n",
|
13 |
+
"The 23rd ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD 2017)](https://adkdd17.wixsite.com/adkddtargetad2017)"
|
14 |
+
]
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"cell_type": "markdown",
|
18 |
+
"metadata": {},
|
19 |
+
"source": [
|
20 |
+
" * This code includes all needed material to reproduce results reported in the paper. This dataset can also be used for further research like: testing alternative attribution models, offline metrics, etc..\n",
|
21 |
+
" * For details about the content of the Dataset, refer to the README file"
|
22 |
+
]
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"cell_type": "markdown",
|
26 |
+
"metadata": {},
|
27 |
+
"source": [
|
28 |
+
"# Preprocessing"
|
29 |
+
]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"cell_type": "code",
|
33 |
+
"execution_count": null,
|
34 |
+
"metadata": {
|
35 |
+
"collapsed": false
|
36 |
+
},
|
37 |
+
"outputs": [],
|
38 |
+
"source": [
|
39 |
+
"%pylab inline\n",
|
40 |
+
"import pandas as pd\n",
|
41 |
+
"plt.style.use('ggplot')\n",
|
42 |
+
"from scipy.optimize import minimize"
|
43 |
+
]
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"cell_type": "code",
|
47 |
+
"execution_count": null,
|
48 |
+
"metadata": {
|
49 |
+
"collapsed": true
|
50 |
+
},
|
51 |
+
"outputs": [],
|
52 |
+
"source": [
|
53 |
+
"DATA_FILE='criteo_attribution_dataset.tsv.gz'\n",
|
54 |
+
"df = pd.read_csv(DATA_FILE, sep='\\t', compression='gzip')"
|
55 |
+
]
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"cell_type": "code",
|
59 |
+
"execution_count": null,
|
60 |
+
"metadata": {
|
61 |
+
"collapsed": true
|
62 |
+
},
|
63 |
+
"outputs": [],
|
64 |
+
"source": [
|
65 |
+
"df['day'] = np.floor(df.timestamp / 86400.).astype(int)"
|
66 |
+
]
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"cell_type": "code",
|
70 |
+
"execution_count": null,
|
71 |
+
"metadata": {
|
72 |
+
"collapsed": false
|
73 |
+
},
|
74 |
+
"outputs": [],
|
75 |
+
"source": [
|
76 |
+
"df.day.hist(bins=len(df.day.unique()))"
|
77 |
+
]
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"cell_type": "code",
|
81 |
+
"execution_count": null,
|
82 |
+
"metadata": {
|
83 |
+
"collapsed": true
|
84 |
+
},
|
85 |
+
"outputs": [],
|
86 |
+
"source": [
|
87 |
+
"df['gap_click_sale'] = -1\n",
|
88 |
+
"df.loc[df.conversion == 1, 'gap_click_sale'] = df.conversion_timestamp - df.timestamp"
|
89 |
+
]
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"cell_type": "code",
|
93 |
+
"execution_count": null,
|
94 |
+
"metadata": {
|
95 |
+
"collapsed": true
|
96 |
+
},
|
97 |
+
"outputs": [],
|
98 |
+
"source": [
|
99 |
+
"FEATURES = ['campaign', 'cat1', 'cat2', 'cat3', 'cat4', 'cat5', 'cat6', \n",
|
100 |
+
" 'cat7', 'cat8']\n",
|
101 |
+
"INFOS = ['cost', 'cpo', 'time_since_last_click']"
|
102 |
+
]
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"cell_type": "markdown",
|
106 |
+
"metadata": {},
|
107 |
+
"source": [
|
108 |
+
"## Labels"
|
109 |
+
]
|
110 |
+
},
|
111 |
+
{
|
112 |
+
"cell_type": "code",
|
113 |
+
"execution_count": null,
|
114 |
+
"metadata": {
|
115 |
+
"collapsed": true
|
116 |
+
},
|
117 |
+
"outputs": [],
|
118 |
+
"source": [
|
119 |
+
"df['last_click'] = df.attribution * (df.click_pos == df.click_nb - 1).astype(int)\n",
|
120 |
+
"df['first_click'] = df.attribution * (df.click_pos == 0).astype(int)\n",
|
121 |
+
"df['all_clicks'] = df.attribution\n",
|
122 |
+
"df['uniform'] = df.attribution / (df.click_nb).astype(float)\n",
|
123 |
+
"INFOS += ['last_click', 'first_click', 'all_clicks', 'uniform']"
|
124 |
+
]
|
125 |
+
},
|
126 |
+
{
|
127 |
+
"cell_type": "markdown",
|
128 |
+
"metadata": {},
|
129 |
+
"source": [
|
130 |
+
"# Learning / Validation"
|
131 |
+
]
|
132 |
+
},
|
133 |
+
{
|
134 |
+
"cell_type": "code",
|
135 |
+
"execution_count": null,
|
136 |
+
"metadata": {
|
137 |
+
"collapsed": true
|
138 |
+
},
|
139 |
+
"outputs": [],
|
140 |
+
"source": [
|
141 |
+
"from sklearn.linear_model import LogisticRegression\n",
|
142 |
+
"from sklearn.feature_extraction import FeatureHasher\n",
|
143 |
+
"from sklearn.metrics import log_loss"
|
144 |
+
]
|
145 |
+
},
|
146 |
+
{
|
147 |
+
"cell_type": "code",
|
148 |
+
"execution_count": null,
|
149 |
+
"metadata": {
|
150 |
+
"collapsed": true
|
151 |
+
},
|
152 |
+
"outputs": [],
|
153 |
+
"source": [
|
154 |
+
"def bootstrap(data, num_samples, statistic, alpha):\n",
|
155 |
+
" \"\"\"Returns bootstrap estimate of 100.0*(1-alpha) CI for statistic.\"\"\"\n",
|
156 |
+
" n = len(data)\n",
|
157 |
+
" data = np.array(data)\n",
|
158 |
+
" stats = []\n",
|
159 |
+
" for _ in range(num_samples):\n",
|
160 |
+
" idx = np.random.randint(0, n, n)\n",
|
161 |
+
" samples = data[idx]\n",
|
162 |
+
" stats += [statistic(samples)]\n",
|
163 |
+
" stats = np.array(sorted(stats))\n",
|
164 |
+
" return (stats[int((alpha/2.0)*num_samples)],\n",
|
165 |
+
" stats[int((1-alpha/2.0)*num_samples)])"
|
166 |
+
]
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"cell_type": "markdown",
|
170 |
+
"metadata": {},
|
171 |
+
"source": [
|
172 |
+
"## Attribution model\n",
|
173 |
+
"Learns exponential decay lambda parameter"
|
174 |
+
]
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"cell_type": "code",
|
178 |
+
"execution_count": null,
|
179 |
+
"metadata": {
|
180 |
+
"collapsed": true
|
181 |
+
},
|
182 |
+
"outputs": [],
|
183 |
+
"source": [
|
184 |
+
"def attr_nllh(l,x,y):\n",
|
185 |
+
" loss = 0.0\n",
|
186 |
+
" lamb = l[0]\n",
|
187 |
+
" n = x.shape[0]\n",
|
188 |
+
" for i in range(n):\n",
|
189 |
+
" if y[i] == 1:\n",
|
190 |
+
" loss += lamb*x[i]\n",
|
191 |
+
" else:\n",
|
192 |
+
" loss -= np.log(1 - np.exp(-lamb*x[i])) \n",
|
193 |
+
" return loss/float(n)\n",
|
194 |
+
"\n",
|
195 |
+
"def attr_nllh_grad(l,x,y):\n",
|
196 |
+
" grad = 0.0\n",
|
197 |
+
" lamb = l[0]\n",
|
198 |
+
" n = x.shape[0]\n",
|
199 |
+
" for i in range(n):\n",
|
200 |
+
" grad += x[i]*y[i] / (1 - np.exp(-lamb*x[i]))\n",
|
201 |
+
" return np.array([grad/float(n)])\n",
|
202 |
+
"\n",
|
203 |
+
"\n",
|
204 |
+
"def optimize_lambda(tts, attrib):\n",
|
205 |
+
" return minimize(attr_nllh, 1e-3, method='L-BFGS-B', jac=attr_nllh_grad, \n",
|
206 |
+
" options={'disp': True, 'maxiter': 20 }, bounds=((1e-15, 1e-4),), \n",
|
207 |
+
" args=(tts,attrib)).x[0]\n",
|
208 |
+
"\n",
|
209 |
+
"def learn_attribution_model(df_view, test_day, learning_duration, \n",
|
210 |
+
" verbose=False, ci=False, rescale=1., \n",
|
211 |
+
" optimizer=optimize_lambda):\n",
|
212 |
+
" df_train = df_view[(df_view.day >= test_day - learning_duration) & (df_view.day < test_day)]\n",
|
213 |
+
" df_conv = df_train[df_train.click_pos == df_train.click_nb - 1]\n",
|
214 |
+
" x = df_conv.gap_click_sale.values\n",
|
215 |
+
" y = df_conv.attribution.values \n",
|
216 |
+
" \n",
|
217 |
+
" avg_tts = x.mean()\n",
|
218 |
+
" tts_ci = bootstrap(x, 100, np.mean, .05)\n",
|
219 |
+
" tts_ci = tts_ci[1] - tts_ci[0]\n",
|
220 |
+
"\n",
|
221 |
+
" lamb = optimize_lambda(x, y)\n",
|
222 |
+
" \n",
|
223 |
+
" lambs = []\n",
|
224 |
+
" n_bootstraps = 30\n",
|
225 |
+
" alpha=.05\n",
|
226 |
+
" if ci:\n",
|
227 |
+
" for _ in range(n_bootstraps):\n",
|
228 |
+
" idx = np.random.randint(0, x.shape[0], x.shape)\n",
|
229 |
+
" xx = x[idx]\n",
|
230 |
+
" yy = y[idx]\n",
|
231 |
+
" lambs += [optimize_lambda(xx, yy)]\n",
|
232 |
+
"\n",
|
233 |
+
" if verbose:\n",
|
234 |
+
" print('\\t\\t-avg_tts', avg_tts, '+/-', tts_ci, \n",
|
235 |
+
" ' = ', avg_tts / 3600., 'hours = ', avg_tts / 86400., 'days')\n",
|
236 |
+
" if ci:\n",
|
237 |
+
" print('\\t\\t-lambda', lamb, '+/-', (lambs[int((1-alpha/2.)*n_bootstraps)] - lambs[int((alpha/2.)*n_bootstraps)]))\n",
|
238 |
+
" else:\n",
|
239 |
+
" print('\\t\\t-lambda', lamb)\n",
|
240 |
+
" \n",
|
241 |
+
" return avg_tts, lamb"
|
242 |
+
]
|
243 |
+
},
|
244 |
+
{
|
245 |
+
"cell_type": "code",
|
246 |
+
"execution_count": null,
|
247 |
+
"metadata": {
|
248 |
+
"collapsed": false
|
249 |
+
},
|
250 |
+
"outputs": [],
|
251 |
+
"source": [
|
252 |
+
"global_avg_tts, global_lamb = learn_attribution_model(df, 21, 20)"
|
253 |
+
]
|
254 |
+
},
|
255 |
+
{
|
256 |
+
"cell_type": "markdown",
|
257 |
+
"metadata": {},
|
258 |
+
"source": [
|
259 |
+
"## Compute AA attributions on full dataset\n",
|
260 |
+
"As explained in the paper, the exponential decay parameter is satble throughout the days. For reducing computation complexity we compute the exponential-based attributions on the full dataset."
|
261 |
+
]
|
262 |
+
},
|
263 |
+
{
|
264 |
+
"cell_type": "code",
|
265 |
+
"execution_count": null,
|
266 |
+
"metadata": {
|
267 |
+
"collapsed": true
|
268 |
+
},
|
269 |
+
"outputs": [],
|
270 |
+
"source": [
|
271 |
+
"def compute_aa_attributions(test_info, normalize=True):\n",
|
272 |
+
" test_info['idx'] = test_info.index\n",
|
273 |
+
" converted = test_info[test_info.all_clicks==1]\n",
|
274 |
+
" # to propoerly compute normalized attribution factors, we have to reconstruct the timelines for each conversion\n",
|
275 |
+
" conversion_ids = converted['conversion_id'].unique()\n",
|
276 |
+
" # now reconstruct timeline and apply attribution\n",
|
277 |
+
" by_conversion = converted[['conversion_id', 'timestamp', 'idx', 'bf_pred', 'time_since_last_click', 'last_click']].groupby('conversion_id')\n",
|
278 |
+
" new_clicks_data = []\n",
|
279 |
+
" \n",
|
280 |
+
" s_attr = []\n",
|
281 |
+
" s_attr_lc = []\n",
|
282 |
+
" # for each conversion compute attribution for each click\n",
|
283 |
+
" for conv, evts in by_conversion:\n",
|
284 |
+
" sorted_clicks = sorted(evts.values.tolist(), key=lambda x: x[1])\n",
|
285 |
+
" bf_pred = [_[3] for _ in sorted_clicks]\n",
|
286 |
+
" sum_bf = np.sum(bf_pred)\n",
|
287 |
+
" sum_lc = np.sum([_[5] for _ in sorted_clicks])\n",
|
288 |
+
" sum_attr = 0.0\n",
|
289 |
+
" for pos, (_, _, idx_, bf_, tslc_, lc_) in enumerate(sorted_clicks):\n",
|
290 |
+
" aa_attr = bf_pred[pos]\n",
|
291 |
+
" if normalize:\n",
|
292 |
+
" if sum_bf>0.0:\n",
|
293 |
+
" aa_attr/=sum_bf\n",
|
294 |
+
" else:\n",
|
295 |
+
" aa_attr = 0.0\n",
|
296 |
+
" sum_attr += aa_attr\n",
|
297 |
+
" new_clicks_data.append((idx_, aa_attr))\n",
|
298 |
+
" s_attr.append(sum_attr)\n",
|
299 |
+
" s_attr_lc.append(sum_lc)\n",
|
300 |
+
" \n",
|
301 |
+
" # now for each click, apply attribution from computed data\n",
|
302 |
+
" new_clicks_df = pd.DataFrame(columns=['click_idx', 'aa_attribution'])\n",
|
303 |
+
" cidx, attr = zip(*new_clicks_data)\n",
|
304 |
+
" new_clicks_df['click_idx'] = cidx\n",
|
305 |
+
" new_clicks_df['aa_attribution'] = attr\n",
|
306 |
+
" new_clicks_df = new_clicks_df.set_index('click_idx')\n",
|
307 |
+
" joined = test_info.join(new_clicks_df)\n",
|
308 |
+
" joined['aa_attribution'] = joined['aa_attribution'].fillna(value = 0.0)\n",
|
309 |
+
" return joined['aa_attribution']"
|
310 |
+
]
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"cell_type": "code",
|
314 |
+
"execution_count": null,
|
315 |
+
"metadata": {
|
316 |
+
"collapsed": false
|
317 |
+
},
|
318 |
+
"outputs": [],
|
319 |
+
"source": [
|
320 |
+
"#learn global attribution model\n",
|
321 |
+
"avg_tts, lamb = learn_attribution_model(df, 21, 20)"
|
322 |
+
]
|
323 |
+
},
|
324 |
+
{
|
325 |
+
"cell_type": "code",
|
326 |
+
"execution_count": null,
|
327 |
+
"metadata": {
|
328 |
+
"collapsed": false
|
329 |
+
},
|
330 |
+
"outputs": [],
|
331 |
+
"source": [
|
332 |
+
"# compute the bid factor from aa attribution for each display\n",
|
333 |
+
"gap_test = df.time_since_last_click.values\n",
|
334 |
+
"previous_tslc_mask = (df.time_since_last_click >=0).astype(float)\n",
|
335 |
+
"attr_pred = np.exp(-lamb*gap_test)\n",
|
336 |
+
"attr_pred *= previous_tslc_mask\n",
|
337 |
+
"bf_pred = 1 - attr_pred\n",
|
338 |
+
"df['bf_pred'] = bf_pred\n",
|
339 |
+
"df['AA_normed'] = compute_aa_attributions(df, normalize=True)\n",
|
340 |
+
"df['AA_not_normed'] = compute_aa_attributions(df, normalize=False)\n",
|
341 |
+
"INFOS += ['bf_pred', 'AA_normed', 'AA_not_normed']"
|
342 |
+
]
|
343 |
+
},
|
344 |
+
{
|
345 |
+
"cell_type": "markdown",
|
346 |
+
"metadata": {},
|
347 |
+
"source": [
|
348 |
+
"## Validation Code\n",
|
349 |
+
"Utility methods for performing validation (test on 1 day, learn on previous x days and slide)"
|
350 |
+
]
|
351 |
+
},
|
352 |
+
{
|
353 |
+
"cell_type": "code",
|
354 |
+
"execution_count": null,
|
355 |
+
"metadata": {
|
356 |
+
"collapsed": true
|
357 |
+
},
|
358 |
+
"outputs": [],
|
359 |
+
"source": [
|
360 |
+
"def get_train_test_slice(df_view, test_day, learning_duration, label, features=None, \n",
|
361 |
+
" hash_space=2**24, nrows=None, infos=None):\n",
|
362 |
+
" df_test = df_view[df_view.day == test_day]\n",
|
363 |
+
" if nrows is not None:\n",
|
364 |
+
" df_test = df_test[:nrows]\n",
|
365 |
+
" if features is None:\n",
|
366 |
+
" features = FEATURES\n",
|
367 |
+
" if infos is None:\n",
|
368 |
+
" infos = INFOS\n",
|
369 |
+
" df_train = df_view[(df_view.day >= test_day - learning_duration) & (df_view.day < test_day)]\n",
|
370 |
+
" if nrows is not None:\n",
|
371 |
+
" df_train = df_train[:nrows]\n",
|
372 |
+
" \n",
|
373 |
+
" X_train = df_train[features]\n",
|
374 |
+
" X_test = df_test[features]\n",
|
375 |
+
" \n",
|
376 |
+
" hasher = FeatureHasher(n_features=hash_space, non_negative=1)\n",
|
377 |
+
" \n",
|
378 |
+
" def to_dict_values(df_view):\n",
|
379 |
+
" return [dict([(_[0]+str(_[1]),1) for _ in zip(features,l)]) for l in df_view.values]\n",
|
380 |
+
" \n",
|
381 |
+
" X_train_h = hasher.fit_transform(to_dict_values(X_train))\n",
|
382 |
+
" X_test_h = hasher.transform(to_dict_values(X_test))\n",
|
383 |
+
" y_train = df_train[label]\n",
|
384 |
+
" y_test = df_test[label]\n",
|
385 |
+
" return (X_train_h, y_train), (X_test_h, y_test), df_test[infos], df_train.last_click.mean()"
|
386 |
+
]
|
387 |
+
},
|
388 |
+
{
|
389 |
+
"cell_type": "markdown",
|
390 |
+
"metadata": {
|
391 |
+
"collapsed": true
|
392 |
+
},
|
393 |
+
"source": [
|
394 |
+
"### Compute Utilities"
|
395 |
+
]
|
396 |
+
},
|
397 |
+
{
|
398 |
+
"cell_type": "code",
|
399 |
+
"execution_count": null,
|
400 |
+
"metadata": {
|
401 |
+
"collapsed": true
|
402 |
+
},
|
403 |
+
"outputs": [],
|
404 |
+
"source": [
|
405 |
+
"from scipy.special import gammainc\n",
|
406 |
+
"def empirical_utility(a, v, c, p):\n",
|
407 |
+
" won = np.array(p*v > c, dtype=np.int)\n",
|
408 |
+
" return (a*v)*won, -c*won\n",
|
409 |
+
"\n",
|
410 |
+
"def expected_utility(a, v, c, p, beta=1000):\n",
|
411 |
+
" return a*v*gammainc(beta*c+1, beta*p*v) - ((beta*c+1)/beta)*gammainc(beta*c+2, beta*p*v)"
|
412 |
+
]
|
413 |
+
},
|
414 |
+
{
|
415 |
+
"cell_type": "code",
|
416 |
+
"execution_count": null,
|
417 |
+
"metadata": {
|
418 |
+
"collapsed": true
|
419 |
+
},
|
420 |
+
"outputs": [],
|
421 |
+
"source": [
|
422 |
+
"def evaluate_utility(y_pred, utilities, betas, test_info):\n",
|
423 |
+
" partial_score = dict()\n",
|
424 |
+
" for utility in utilities:\n",
|
425 |
+
" attribution = test_info[utility]\n",
|
426 |
+
" for beta in betas:\n",
|
427 |
+
" if np.isinf(beta):\n",
|
428 |
+
" est_utility = empirical_utility(attribution, test_info.cpo, test_info.cost, y_pred)\n",
|
429 |
+
" else:\n",
|
430 |
+
" est_utility = expected_utility(attribution, test_info.cpo, test_info.cost, y_pred, beta=beta)\n",
|
431 |
+
" beta_str = str(beta) if not np.isinf(beta) else 'inf'\n",
|
432 |
+
" partial_score['utility-'+utility+'-beta'+beta_str] = est_utility\n",
|
433 |
+
" return partial_score"
|
434 |
+
]
|
435 |
+
},
|
436 |
+
{
|
437 |
+
"cell_type": "code",
|
438 |
+
"execution_count": null,
|
439 |
+
"metadata": {
|
440 |
+
"collapsed": true
|
441 |
+
},
|
442 |
+
"outputs": [],
|
443 |
+
"source": [
|
444 |
+
"def get_naive_baseline(y_train, X_test):\n",
|
445 |
+
" return np.mean(y_train)*np.ones(X_test.shape[0])"
|
446 |
+
]
|
447 |
+
},
|
448 |
+
{
|
449 |
+
"cell_type": "code",
|
450 |
+
"execution_count": null,
|
451 |
+
"metadata": {
|
452 |
+
"collapsed": true
|
453 |
+
},
|
454 |
+
"outputs": [],
|
455 |
+
"source": [
|
456 |
+
"def evaluate_day_for_bidder(df_view, test_day, learning_duration, bidder, utilities, betas,\n",
|
457 |
+
" hash_space=None, features=None, clf=None, AA_bidder_label=None, recalibrate=True):\n",
|
458 |
+
" score = dict()\n",
|
459 |
+
" bid_profile = dict()\n",
|
460 |
+
" label = bidder\n",
|
461 |
+
" if bidder == 'AA':\n",
|
462 |
+
" label = AA_bidder_label\n",
|
463 |
+
" # get data slice\n",
|
464 |
+
" (X_train, y_train), (X_test, y_test), test_info, y_train_lc_mean = get_train_test_slice(df_view,\n",
|
465 |
+
" test_day,\n",
|
466 |
+
" learning_duration,\n",
|
467 |
+
" label=label, \n",
|
468 |
+
" hash_space = hash_space,\n",
|
469 |
+
" features=features) \n",
|
470 |
+
" \n",
|
471 |
+
" # learn the model\n",
|
472 |
+
" clf.fit(X_train, y_train)\n",
|
473 |
+
" \n",
|
474 |
+
" # get test predictions\n",
|
475 |
+
" y_pred = clf.predict_proba(X_test)[:,1] \n",
|
476 |
+
" \n",
|
477 |
+
" # if aa bidder: modulate the bids by bid_factor computed from attribution model\n",
|
478 |
+
" if bidder == 'AA':\n",
|
479 |
+
" y_pred *= test_info['bf_pred']\n",
|
480 |
+
" \n",
|
481 |
+
" # compute the loss\n",
|
482 |
+
" loss = log_loss(y_test, y_pred, normalize=0)\n",
|
483 |
+
" \n",
|
484 |
+
" # loss of baseline model\n",
|
485 |
+
" baseline_loss = log_loss(y_test, get_naive_baseline(y_train, X_test), normalize=0)\n",
|
486 |
+
" score['nllh'] = loss\n",
|
487 |
+
" score['nllh_naive'] = baseline_loss\n",
|
488 |
+
" \n",
|
489 |
+
" # do we recalibrate output? (i.e recalibrate mean prediction). This is usually done by a control system.\n",
|
490 |
+
" if recalibrate:\n",
|
491 |
+
" y_pred *= (y_train_lc_mean / y_pred.mean())\n",
|
492 |
+
" \n",
|
493 |
+
" #how many displays are won?\n",
|
494 |
+
" won = (y_pred*test_info.cpo > test_info.cost).astype(int)\n",
|
495 |
+
" score['won'] = np.sum(won)\n",
|
496 |
+
" score['n_auctions'] = y_pred.shape[0]\n",
|
497 |
+
" \n",
|
498 |
+
" # compute the scores on this slice\n",
|
499 |
+
" score.update(evaluate_utility(y_pred, utilities, betas, test_info))\n",
|
500 |
+
" \n",
|
501 |
+
" #store bid profiles\n",
|
502 |
+
" bid_profile['time_since_last_click'] = test_info.time_since_last_click\n",
|
503 |
+
" bid_profile['bid'] = y_pred\n",
|
504 |
+
" \n",
|
505 |
+
" return score, bid_profile"
|
506 |
+
]
|
507 |
+
},
|
508 |
+
{
|
509 |
+
"cell_type": "markdown",
|
510 |
+
"metadata": {},
|
511 |
+
"source": [
|
512 |
+
"#### Simple utility functions to manipulate scores"
|
513 |
+
]
|
514 |
+
},
|
515 |
+
{
|
516 |
+
"cell_type": "code",
|
517 |
+
"execution_count": null,
|
518 |
+
"metadata": {
|
519 |
+
"collapsed": true
|
520 |
+
},
|
521 |
+
"outputs": [],
|
522 |
+
"source": [
|
523 |
+
"def merge_utility_score(score):\n",
|
524 |
+
" updates = dict()\n",
|
525 |
+
" for k,v in score.items():\n",
|
526 |
+
" if not 'utility' in k:\n",
|
527 |
+
" continue\n",
|
528 |
+
" if 'inf' in k:\n",
|
529 |
+
" revenue, cost = v\n",
|
530 |
+
" updates[k] = np.sum(cost) + np.sum(revenue)\n",
|
531 |
+
" updates[k+'~revenue'] = np.sum(revenue)\n",
|
532 |
+
" updates[k+'~cost'] = np.sum(cost)\n",
|
533 |
+
" v = revenue + cost\n",
|
534 |
+
" else:\n",
|
535 |
+
" updates[k] = np.sum(v)\n",
|
536 |
+
" bounds = bootstrap(v, 100, np.sum, .05)\n",
|
537 |
+
" delta = (bounds[1]-bounds[0])/2.\n",
|
538 |
+
" updates[k+'-delta'] = delta\n",
|
539 |
+
" score.update(updates)"
|
540 |
+
]
|
541 |
+
},
|
542 |
+
{
|
543 |
+
"cell_type": "code",
|
544 |
+
"execution_count": null,
|
545 |
+
"metadata": {
|
546 |
+
"collapsed": true
|
547 |
+
},
|
548 |
+
"outputs": [],
|
549 |
+
"source": [
|
550 |
+
"def update_score(partial_score, score):\n",
|
551 |
+
" for k, v in partial_score.items():\n",
|
552 |
+
" if 'utility' in k:\n",
|
553 |
+
" if 'inf' in k:\n",
|
554 |
+
" revenue, cost = v\n",
|
555 |
+
" print('\\t\\t', k, np.sum(cost)+np.sum(revenue))\n",
|
556 |
+
" current_revenue, current_cost = score.get(k, (np.array([]),np.array([])))\n",
|
557 |
+
" score[k] = (\n",
|
558 |
+
" np.append(current_revenue, revenue),\n",
|
559 |
+
" np.append(current_cost, cost)\n",
|
560 |
+
" )\n",
|
561 |
+
" else:\n",
|
562 |
+
" print('\\t\\t', k, np.sum(v))\n",
|
563 |
+
" score[k] = np.append(score.get(k, np.array([])), v)\n",
|
564 |
+
" else:\n",
|
565 |
+
" print('\\t\\t', k, v)\n",
|
566 |
+
" score[k] = score.get(k, 0) + v"
|
567 |
+
]
|
568 |
+
},
|
569 |
+
{
|
570 |
+
"cell_type": "markdown",
|
571 |
+
"metadata": {
|
572 |
+
"collapsed": true
|
573 |
+
},
|
574 |
+
"source": [
|
575 |
+
"### Evaluate several bidders on several utility metric variants"
|
576 |
+
]
|
577 |
+
},
|
578 |
+
{
|
579 |
+
"cell_type": "code",
|
580 |
+
"execution_count": null,
|
581 |
+
"metadata": {
|
582 |
+
"collapsed": true
|
583 |
+
},
|
584 |
+
"outputs": [],
|
585 |
+
"source": [
|
586 |
+
"from datetime import datetime, timedelta\n",
|
587 |
+
"def evaluate_slices(df_view,\n",
|
588 |
+
" bidders=['last_click', 'first_click', 'AA'],\n",
|
589 |
+
" utilities=['last_click','first_click', 'AA_normed', 'AA_not_normed'],\n",
|
590 |
+
" betas=[np.inf, 10, 1000],\n",
|
591 |
+
" test_days=[22],\n",
|
592 |
+
" learning_duration=21,\n",
|
593 |
+
" hash_space=2**24,\n",
|
594 |
+
" features=None,\n",
|
595 |
+
" AA_bidder_label='all_clicks',\n",
|
596 |
+
" clf = LogisticRegression(solver='lbfgs', n_jobs=4),\n",
|
597 |
+
" recalibrate = True):\n",
|
598 |
+
" bid_profiles = []\n",
|
599 |
+
" scores = []\n",
|
600 |
+
" for bidder in bidders:\n",
|
601 |
+
" print ('*'*80)\n",
|
602 |
+
" print(\"EVALUATING BIDDER:\", bidder)\n",
|
603 |
+
" score = dict()\n",
|
604 |
+
" bid_profile = dict()\n",
|
605 |
+
" for test_day in test_days:\n",
|
606 |
+
" start = datetime.now()\n",
|
607 |
+
" print('\\t- day:', test_day)\n",
|
608 |
+
" partial_score, partial_bid_profile = evaluate_day_for_bidder(\n",
|
609 |
+
" df_view, test_day, learning_duration, bidder, \n",
|
610 |
+
" utilities, betas,\n",
|
611 |
+
" hash_space=hash_space, features=features, clf=clf, \n",
|
612 |
+
" AA_bidder_label=AA_bidder_label, recalibrate=recalibrate\n",
|
613 |
+
" )\n",
|
614 |
+
" update_score(partial_score, score)\n",
|
615 |
+
" for k, v in partial_bid_profile.items():\n",
|
616 |
+
" bid_profile[k] = np.append(bid_profile.get(k, np.array([])), v)\n",
|
617 |
+
" print('\\t- took', datetime.now() - start)\n",
|
618 |
+
" score['bidder'] = bidder\n",
|
619 |
+
" bid_profile['bidder'] = bidder\n",
|
620 |
+
" score['nllh_comp_vn'] = (score['nllh_naive'] - score['nllh']) / np.abs(score['nllh_naive'])\n",
|
621 |
+
" score['win_rate'] = score['won'] / score['n_auctions']\n",
|
622 |
+
" merge_utility_score(score)\n",
|
623 |
+
" scores.append(score)\n",
|
624 |
+
" bid_profiles.append(bid_profile)\n",
|
625 |
+
" return pd.DataFrame(scores), pd.DataFrame(bid_profiles)"
|
626 |
+
]
|
627 |
+
},
|
628 |
+
{
|
629 |
+
"cell_type": "markdown",
|
630 |
+
"metadata": {},
|
631 |
+
"source": [
|
632 |
+
"## Run & Results"
|
633 |
+
]
|
634 |
+
},
|
635 |
+
{
|
636 |
+
"cell_type": "code",
|
637 |
+
"execution_count": null,
|
638 |
+
"metadata": {
|
639 |
+
"collapsed": true,
|
640 |
+
"scrolled": true
|
641 |
+
},
|
642 |
+
"outputs": [],
|
643 |
+
"source": [
|
644 |
+
"#full run\n",
|
645 |
+
"if False:\n",
|
646 |
+
" scores, bid_profiles = evaluate_slices(df,\n",
|
647 |
+
" bidders=['last_click',\n",
|
648 |
+
" 'first_click',\n",
|
649 |
+
" 'AA'],\n",
|
650 |
+
" utilities=['last_click',\n",
|
651 |
+
" 'first_click',\n",
|
652 |
+
" 'AA_normed',\n",
|
653 |
+
" 'AA_not_normed'],\n",
|
654 |
+
" test_days=range(22,29),\n",
|
655 |
+
" learning_duration=21,\n",
|
656 |
+
" hash_space = 2**18,\n",
|
657 |
+
" AA_bidder_label='all_clicks')"
|
658 |
+
]
|
659 |
+
},
|
660 |
+
{
|
661 |
+
"cell_type": "code",
|
662 |
+
"execution_count": null,
|
663 |
+
"metadata": {
|
664 |
+
"collapsed": false,
|
665 |
+
"scrolled": true
|
666 |
+
},
|
667 |
+
"outputs": [],
|
668 |
+
"source": [
|
669 |
+
"#simple debug run\n",
|
670 |
+
"if True:\n",
|
671 |
+
" scores, bid_profiles = evaluate_slices(df,\n",
|
672 |
+
" bidders=['last_click',\n",
|
673 |
+
" 'AA'],\n",
|
674 |
+
" utilities=['last_click',\n",
|
675 |
+
" 'AA_normed'],\n",
|
676 |
+
" test_days=range(22,23),\n",
|
677 |
+
" learning_duration=5,\n",
|
678 |
+
" hash_space = 2**13,\n",
|
679 |
+
" AA_bidder_label='all_clicks')"
|
680 |
+
]
|
681 |
+
},
|
682 |
+
{
|
683 |
+
"cell_type": "code",
|
684 |
+
"execution_count": null,
|
685 |
+
"metadata": {
|
686 |
+
"collapsed": false
|
687 |
+
},
|
688 |
+
"outputs": [],
|
689 |
+
"source": [
|
690 |
+
"scores"
|
691 |
+
]
|
692 |
+
},
|
693 |
+
{
|
694 |
+
"cell_type": "code",
|
695 |
+
"execution_count": null,
|
696 |
+
"metadata": {
|
697 |
+
"collapsed": true
|
698 |
+
},
|
699 |
+
"outputs": [],
|
700 |
+
"source": []
|
701 |
+
}
|
702 |
+
],
|
703 |
+
"metadata": {
|
704 |
+
"anaconda-cloud": {},
|
705 |
+
"kernelspec": {
|
706 |
+
"display_name": "Python 3",
|
707 |
+
"language": "python",
|
708 |
+
"name": "python3"
|
709 |
+
},
|
710 |
+
"language_info": {
|
711 |
+
"codemirror_mode": {
|
712 |
+
"name": "ipython",
|
713 |
+
"version": 3
|
714 |
+
},
|
715 |
+
"file_extension": ".py",
|
716 |
+
"mimetype": "text/x-python",
|
717 |
+
"name": "python",
|
718 |
+
"nbconvert_exporter": "python",
|
719 |
+
"pygments_lexer": "ipython3",
|
720 |
+
"version": "3.5.3"
|
721 |
+
}
|
722 |
+
},
|
723 |
+
"nbformat": 4,
|
724 |
+
"nbformat_minor": 1
|
725 |
+
}
|
criteo_attribution_dataset.tsv.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94ac7a465564349bc7ba008602211d5990a3c53cc133abc0aadef61ea2391a98
|
3 |
+
size 653015824
|