Keziaa commited on
Commit
8aef260
1 Parent(s): 71ae818

Upload 10 files

Browse files
HuggingFace/app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import joblib
5
+
6
+
7
+ with open('model_lin_reg.pkl', 'rb') as file_1:
8
+ model_lin_reg= joblib.load(file_1)
9
+
10
+ with open('model_scaler.pkl', 'rb') as file_2:
11
+ model_scaler=joblib.load(file_2)
12
+
13
+ with open('model_encoder.pkl', 'rb') as file_3:
14
+ model_encoder= joblib.load(file_3)
15
+
16
+ with open('list_num_cols.txt', 'rb') as file_4:
17
+ num_cols= joblib.load(file_4)
18
+
19
+ with open('list_cat_cols.txt', 'rb') as file_5:
20
+ cat_cols= joblib.load(file_5)
21
+
22
+
23
+ hour = st.slider('Masukan Jam : ',0, 24)
24
+ distance = st.number_input('Masukan Jarak dalam Mile : ')
25
+ cab_type = st.radio('Lyft/Uber : ',('Lyft', 'Uber'))
26
+ name = st.selectbox('Masukan Jenis Layanan : ',('Shared', 'Lux', 'UberPool', 'Lyft XL', 'Black', 'Lyft', 'UberXL',
27
+ 'UberX', 'WAV', 'Lux Black', 'Black SUV', 'Lux Black XL'))
28
+ destination = st.selectbox('Masukan Tujuan : ',('North Station', 'Fenway', 'West End', 'Back Bay',
29
+ 'Haymarket Square', 'Theatre District', 'South Station',
30
+ 'Northeastern University', 'North End', 'Financial District',
31
+ 'Beacon Hill', 'Boston University'))
32
+ icon = st.selectbox('Masukan Cuaca Sekarang : ',(' cloudy ', ' partly-cloudy-day ', ' rain ', ' clear-night ',
33
+ ' partly-cloudy-night ', ' fog ', ' clear-day '))
34
+
35
+ if st.button('Predict'):
36
+ data_inf = pd.DataFrame({'hour' : hour, 'distance' : distance, 'cab_type' : cab_type, 'name' : name, 'destination' : destination, 'icon' : icon})
37
+ data_inf_scaled = model_scaler.transform(data_inf[num_cols])
38
+ data_inf_encoded1 = model_encoder.transform(data_inf[cat_cols])
39
+
40
+ data_inf_fix = np.concatenate([data_inf_scaled,data_inf_encoded1],axis=1)
41
+ hasil = model_lin_reg.predict(data_inf_fix)
42
+ hasil
HuggingFace/h8dsft_P1G1_Inference_KeziaIntan.ipynb ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "attachments": {},
5
+ "cell_type": "markdown",
6
+ "metadata": {},
7
+ "source": [
8
+ "## Model Inference"
9
+ ]
10
+ },
11
+ {
12
+ "attachments": {},
13
+ "cell_type": "markdown",
14
+ "metadata": {},
15
+ "source": [
16
+ "import the library that we use"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 1,
22
+ "metadata": {},
23
+ "outputs": [],
24
+ "source": [
25
+ "import numpy as np\n",
26
+ "import pandas as pd\n",
27
+ "import joblib"
28
+ ]
29
+ },
30
+ {
31
+ "attachments": {},
32
+ "cell_type": "markdown",
33
+ "metadata": {},
34
+ "source": [
35
+ "### Load Model"
36
+ ]
37
+ },
38
+ {
39
+ "attachments": {},
40
+ "cell_type": "markdown",
41
+ "metadata": {},
42
+ "source": [
43
+ "load the model that we saved"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "code",
48
+ "execution_count": 2,
49
+ "metadata": {},
50
+ "outputs": [],
51
+ "source": [
52
+ "# load all files\n",
53
+ "\n",
54
+ "with open('model_lin_reg.pkl', 'rb') as file_1:\n",
55
+ " model_lin_reg= joblib.load(file_1)\n",
56
+ "\n",
57
+ "with open('model_scaler.pkl', 'rb') as file_2:\n",
58
+ " model_scaler=joblib.load(file_2)\n",
59
+ "\n",
60
+ "with open('model_encoder.pkl', 'rb') as file_3:\n",
61
+ " model_encoder= joblib.load(file_3)\n",
62
+ "\n",
63
+ "with open('list_num_cols.txt', 'rb') as file_4:\n",
64
+ " num_cols= joblib.load(file_4)\n",
65
+ "\n",
66
+ "with open('list_cat_cols.txt', 'rb') as file_5:\n",
67
+ " cat_cols= joblib.load(file_5)"
68
+ ]
69
+ },
70
+ {
71
+ "attachments": {},
72
+ "cell_type": "markdown",
73
+ "metadata": {},
74
+ "source": [
75
+ "### Get Data for Model Inference"
76
+ ]
77
+ },
78
+ {
79
+ "attachments": {},
80
+ "cell_type": "markdown",
81
+ "metadata": {},
82
+ "source": [
83
+ "getting random sample for inference"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 3,
89
+ "metadata": {},
90
+ "outputs": [
91
+ {
92
+ "data": {
93
+ "text/html": [
94
+ "<div>\n",
95
+ "<style scoped>\n",
96
+ " .dataframe tbody tr th:only-of-type {\n",
97
+ " vertical-align: middle;\n",
98
+ " }\n",
99
+ "\n",
100
+ " .dataframe tbody tr th {\n",
101
+ " vertical-align: top;\n",
102
+ " }\n",
103
+ "\n",
104
+ " .dataframe thead th {\n",
105
+ " text-align: right;\n",
106
+ " }\n",
107
+ "</style>\n",
108
+ "<table border=\"1\" class=\"dataframe\">\n",
109
+ " <thead>\n",
110
+ " <tr style=\"text-align: right;\">\n",
111
+ " <th></th>\n",
112
+ " <th>hour</th>\n",
113
+ " <th>distance</th>\n",
114
+ " <th>cab_type</th>\n",
115
+ " <th>name</th>\n",
116
+ " <th>destination</th>\n",
117
+ " <th>icon</th>\n",
118
+ " </tr>\n",
119
+ " </thead>\n",
120
+ " <tbody>\n",
121
+ " <tr>\n",
122
+ " <th>0</th>\n",
123
+ " <td>1</td>\n",
124
+ " <td>0.316562</td>\n",
125
+ " <td>Uber</td>\n",
126
+ " <td>Lux Black XL</td>\n",
127
+ " <td>South Station</td>\n",
128
+ " <td>clear-day</td>\n",
129
+ " </tr>\n",
130
+ " <tr>\n",
131
+ " <th>1</th>\n",
132
+ " <td>23</td>\n",
133
+ " <td>3.954514</td>\n",
134
+ " <td>Uber</td>\n",
135
+ " <td>Black SUV</td>\n",
136
+ " <td>South Station</td>\n",
137
+ " <td>clear-night</td>\n",
138
+ " </tr>\n",
139
+ " <tr>\n",
140
+ " <th>2</th>\n",
141
+ " <td>12</td>\n",
142
+ " <td>6.066497</td>\n",
143
+ " <td>Uber</td>\n",
144
+ " <td>UberPool</td>\n",
145
+ " <td>Theatre District</td>\n",
146
+ " <td>partly-cloudy-night</td>\n",
147
+ " </tr>\n",
148
+ " <tr>\n",
149
+ " <th>3</th>\n",
150
+ " <td>1</td>\n",
151
+ " <td>2.545956</td>\n",
152
+ " <td>Uber</td>\n",
153
+ " <td>UberPool</td>\n",
154
+ " <td>Northeastern University</td>\n",
155
+ " <td>clear-night</td>\n",
156
+ " </tr>\n",
157
+ " <tr>\n",
158
+ " <th>4</th>\n",
159
+ " <td>8</td>\n",
160
+ " <td>1.456490</td>\n",
161
+ " <td>Lyft</td>\n",
162
+ " <td>Lux Black XL</td>\n",
163
+ " <td>Theatre District</td>\n",
164
+ " <td>partly-cloudy-night</td>\n",
165
+ " </tr>\n",
166
+ " <tr>\n",
167
+ " <th>5</th>\n",
168
+ " <td>16</td>\n",
169
+ " <td>5.954119</td>\n",
170
+ " <td>Uber</td>\n",
171
+ " <td>UberX</td>\n",
172
+ " <td>Boston University</td>\n",
173
+ " <td>rain</td>\n",
174
+ " </tr>\n",
175
+ " <tr>\n",
176
+ " <th>6</th>\n",
177
+ " <td>2</td>\n",
178
+ " <td>2.345548</td>\n",
179
+ " <td>Lyft</td>\n",
180
+ " <td>Lux Black</td>\n",
181
+ " <td>North Station</td>\n",
182
+ " <td>fog</td>\n",
183
+ " </tr>\n",
184
+ " <tr>\n",
185
+ " <th>7</th>\n",
186
+ " <td>17</td>\n",
187
+ " <td>5.626220</td>\n",
188
+ " <td>Lyft</td>\n",
189
+ " <td>Lux Black XL</td>\n",
190
+ " <td>North Station</td>\n",
191
+ " <td>rain</td>\n",
192
+ " </tr>\n",
193
+ " <tr>\n",
194
+ " <th>8</th>\n",
195
+ " <td>12</td>\n",
196
+ " <td>1.640055</td>\n",
197
+ " <td>Lyft</td>\n",
198
+ " <td>Lux</td>\n",
199
+ " <td>Boston University</td>\n",
200
+ " <td>cloudy</td>\n",
201
+ " </tr>\n",
202
+ " <tr>\n",
203
+ " <th>9</th>\n",
204
+ " <td>19</td>\n",
205
+ " <td>7.819139</td>\n",
206
+ " <td>Lyft</td>\n",
207
+ " <td>Black SUV</td>\n",
208
+ " <td>South Station</td>\n",
209
+ " <td>fog</td>\n",
210
+ " </tr>\n",
211
+ " <tr>\n",
212
+ " <th>10</th>\n",
213
+ " <td>18</td>\n",
214
+ " <td>7.116573</td>\n",
215
+ " <td>Uber</td>\n",
216
+ " <td>Black SUV</td>\n",
217
+ " <td>North End</td>\n",
218
+ " <td>clear-day</td>\n",
219
+ " </tr>\n",
220
+ " <tr>\n",
221
+ " <th>11</th>\n",
222
+ " <td>23</td>\n",
223
+ " <td>4.357456</td>\n",
224
+ " <td>Uber</td>\n",
225
+ " <td>Shared</td>\n",
226
+ " <td>West End</td>\n",
227
+ " <td>clear-day</td>\n",
228
+ " </tr>\n",
229
+ " <tr>\n",
230
+ " <th>12</th>\n",
231
+ " <td>4</td>\n",
232
+ " <td>3.194852</td>\n",
233
+ " <td>Lyft</td>\n",
234
+ " <td>Lyft</td>\n",
235
+ " <td>North End</td>\n",
236
+ " <td>rain</td>\n",
237
+ " </tr>\n",
238
+ " <tr>\n",
239
+ " <th>13</th>\n",
240
+ " <td>10</td>\n",
241
+ " <td>1.256166</td>\n",
242
+ " <td>Uber</td>\n",
243
+ " <td>Lux Black XL</td>\n",
244
+ " <td>Financial District</td>\n",
245
+ " <td>rain</td>\n",
246
+ " </tr>\n",
247
+ " <tr>\n",
248
+ " <th>14</th>\n",
249
+ " <td>19</td>\n",
250
+ " <td>2.904539</td>\n",
251
+ " <td>Uber</td>\n",
252
+ " <td>Lux</td>\n",
253
+ " <td>Beacon Hill</td>\n",
254
+ " <td>rain</td>\n",
255
+ " </tr>\n",
256
+ " <tr>\n",
257
+ " <th>15</th>\n",
258
+ " <td>13</td>\n",
259
+ " <td>6.218676</td>\n",
260
+ " <td>Uber</td>\n",
261
+ " <td>Lyft XL</td>\n",
262
+ " <td>South Station</td>\n",
263
+ " <td>fog</td>\n",
264
+ " </tr>\n",
265
+ " <tr>\n",
266
+ " <th>16</th>\n",
267
+ " <td>16</td>\n",
268
+ " <td>7.757917</td>\n",
269
+ " <td>Uber</td>\n",
270
+ " <td>Lyft</td>\n",
271
+ " <td>North End</td>\n",
272
+ " <td>clear-day</td>\n",
273
+ " </tr>\n",
274
+ " <tr>\n",
275
+ " <th>17</th>\n",
276
+ " <td>7</td>\n",
277
+ " <td>1.756099</td>\n",
278
+ " <td>Uber</td>\n",
279
+ " <td>Lyft XL</td>\n",
280
+ " <td>Financial District</td>\n",
281
+ " <td>rain</td>\n",
282
+ " </tr>\n",
283
+ " <tr>\n",
284
+ " <th>18</th>\n",
285
+ " <td>19</td>\n",
286
+ " <td>6.055154</td>\n",
287
+ " <td>Lyft</td>\n",
288
+ " <td>Black SUV</td>\n",
289
+ " <td>North Station</td>\n",
290
+ " <td>cloudy</td>\n",
291
+ " </tr>\n",
292
+ " <tr>\n",
293
+ " <th>19</th>\n",
294
+ " <td>20</td>\n",
295
+ " <td>2.920731</td>\n",
296
+ " <td>Uber</td>\n",
297
+ " <td>UberX</td>\n",
298
+ " <td>North Station</td>\n",
299
+ " <td>cloudy</td>\n",
300
+ " </tr>\n",
301
+ " <tr>\n",
302
+ " <th>20</th>\n",
303
+ " <td>17</td>\n",
304
+ " <td>1.809670</td>\n",
305
+ " <td>Uber</td>\n",
306
+ " <td>Lyft</td>\n",
307
+ " <td>Haymarket Square</td>\n",
308
+ " <td>rain</td>\n",
309
+ " </tr>\n",
310
+ " <tr>\n",
311
+ " <th>21</th>\n",
312
+ " <td>10</td>\n",
313
+ " <td>2.911586</td>\n",
314
+ " <td>Uber</td>\n",
315
+ " <td>Lyft XL</td>\n",
316
+ " <td>South Station</td>\n",
317
+ " <td>clear-day</td>\n",
318
+ " </tr>\n",
319
+ " <tr>\n",
320
+ " <th>22</th>\n",
321
+ " <td>15</td>\n",
322
+ " <td>4.871693</td>\n",
323
+ " <td>Lyft</td>\n",
324
+ " <td>Lux Black</td>\n",
325
+ " <td>Financial District</td>\n",
326
+ " <td>partly-cloudy-night</td>\n",
327
+ " </tr>\n",
328
+ " <tr>\n",
329
+ " <th>23</th>\n",
330
+ " <td>17</td>\n",
331
+ " <td>2.090028</td>\n",
332
+ " <td>Lyft</td>\n",
333
+ " <td>Lux Black XL</td>\n",
334
+ " <td>Theatre District</td>\n",
335
+ " <td>partly-cloudy-day</td>\n",
336
+ " </tr>\n",
337
+ " <tr>\n",
338
+ " <th>24</th>\n",
339
+ " <td>14</td>\n",
340
+ " <td>0.646278</td>\n",
341
+ " <td>Uber</td>\n",
342
+ " <td>Shared</td>\n",
343
+ " <td>Beacon Hill</td>\n",
344
+ " <td>rain</td>\n",
345
+ " </tr>\n",
346
+ " <tr>\n",
347
+ " <th>25</th>\n",
348
+ " <td>2</td>\n",
349
+ " <td>7.818779</td>\n",
350
+ " <td>Lyft</td>\n",
351
+ " <td>UberX</td>\n",
352
+ " <td>Northeastern University</td>\n",
353
+ " <td>cloudy</td>\n",
354
+ " </tr>\n",
355
+ " <tr>\n",
356
+ " <th>26</th>\n",
357
+ " <td>17</td>\n",
358
+ " <td>5.054022</td>\n",
359
+ " <td>Uber</td>\n",
360
+ " <td>WAV</td>\n",
361
+ " <td>Theatre District</td>\n",
362
+ " <td>partly-cloudy-day</td>\n",
363
+ " </tr>\n",
364
+ " <tr>\n",
365
+ " <th>27</th>\n",
366
+ " <td>17</td>\n",
367
+ " <td>2.589397</td>\n",
368
+ " <td>Uber</td>\n",
369
+ " <td>Lux Black</td>\n",
370
+ " <td>Back Bay</td>\n",
371
+ " <td>cloudy</td>\n",
372
+ " </tr>\n",
373
+ " <tr>\n",
374
+ " <th>28</th>\n",
375
+ " <td>3</td>\n",
376
+ " <td>0.619987</td>\n",
377
+ " <td>Lyft</td>\n",
378
+ " <td>Lux Black XL</td>\n",
379
+ " <td>Boston University</td>\n",
380
+ " <td>clear-night</td>\n",
381
+ " </tr>\n",
382
+ " <tr>\n",
383
+ " <th>29</th>\n",
384
+ " <td>11</td>\n",
385
+ " <td>2.514690</td>\n",
386
+ " <td>Uber</td>\n",
387
+ " <td>Black SUV</td>\n",
388
+ " <td>Fenway</td>\n",
389
+ " <td>rain</td>\n",
390
+ " </tr>\n",
391
+ " </tbody>\n",
392
+ "</table>\n",
393
+ "</div>"
394
+ ],
395
+ "text/plain": [
396
+ " hour distance cab_type name destination \\\n",
397
+ "0 1 0.316562 Uber Lux Black XL South Station \n",
398
+ "1 23 3.954514 Uber Black SUV South Station \n",
399
+ "2 12 6.066497 Uber UberPool Theatre District \n",
400
+ "3 1 2.545956 Uber UberPool Northeastern University \n",
401
+ "4 8 1.456490 Lyft Lux Black XL Theatre District \n",
402
+ "5 16 5.954119 Uber UberX Boston University \n",
403
+ "6 2 2.345548 Lyft Lux Black North Station \n",
404
+ "7 17 5.626220 Lyft Lux Black XL North Station \n",
405
+ "8 12 1.640055 Lyft Lux Boston University \n",
406
+ "9 19 7.819139 Lyft Black SUV South Station \n",
407
+ "10 18 7.116573 Uber Black SUV North End \n",
408
+ "11 23 4.357456 Uber Shared West End \n",
409
+ "12 4 3.194852 Lyft Lyft North End \n",
410
+ "13 10 1.256166 Uber Lux Black XL Financial District \n",
411
+ "14 19 2.904539 Uber Lux Beacon Hill \n",
412
+ "15 13 6.218676 Uber Lyft XL South Station \n",
413
+ "16 16 7.757917 Uber Lyft North End \n",
414
+ "17 7 1.756099 Uber Lyft XL Financial District \n",
415
+ "18 19 6.055154 Lyft Black SUV North Station \n",
416
+ "19 20 2.920731 Uber UberX North Station \n",
417
+ "20 17 1.809670 Uber Lyft Haymarket Square \n",
418
+ "21 10 2.911586 Uber Lyft XL South Station \n",
419
+ "22 15 4.871693 Lyft Lux Black Financial District \n",
420
+ "23 17 2.090028 Lyft Lux Black XL Theatre District \n",
421
+ "24 14 0.646278 Uber Shared Beacon Hill \n",
422
+ "25 2 7.818779 Lyft UberX Northeastern University \n",
423
+ "26 17 5.054022 Uber WAV Theatre District \n",
424
+ "27 17 2.589397 Uber Lux Black Back Bay \n",
425
+ "28 3 0.619987 Lyft Lux Black XL Boston University \n",
426
+ "29 11 2.514690 Uber Black SUV Fenway \n",
427
+ "\n",
428
+ " icon \n",
429
+ "0 clear-day \n",
430
+ "1 clear-night \n",
431
+ "2 partly-cloudy-night \n",
432
+ "3 clear-night \n",
433
+ "4 partly-cloudy-night \n",
434
+ "5 rain \n",
435
+ "6 fog \n",
436
+ "7 rain \n",
437
+ "8 cloudy \n",
438
+ "9 fog \n",
439
+ "10 clear-day \n",
440
+ "11 clear-day \n",
441
+ "12 rain \n",
442
+ "13 rain \n",
443
+ "14 rain \n",
444
+ "15 fog \n",
445
+ "16 clear-day \n",
446
+ "17 rain \n",
447
+ "18 cloudy \n",
448
+ "19 cloudy \n",
449
+ "20 rain \n",
450
+ "21 clear-day \n",
451
+ "22 partly-cloudy-night \n",
452
+ "23 partly-cloudy-day \n",
453
+ "24 rain \n",
454
+ "25 cloudy \n",
455
+ "26 partly-cloudy-day \n",
456
+ "27 cloudy \n",
457
+ "28 clear-night \n",
458
+ "29 rain "
459
+ ]
460
+ },
461
+ "execution_count": 3,
462
+ "metadata": {},
463
+ "output_type": "execute_result"
464
+ }
465
+ ],
466
+ "source": [
467
+ "# generate random data\n",
468
+ "random = np.random.default_rng(seed=2895)\n",
469
+ "hour = random.integers(0, 24, size=30)\n",
470
+ "distance = random.uniform(0.02, 7.86, size=30)\n",
471
+ "cab_type = random.choice(['Lyft', 'Uber'], size=30)\n",
472
+ "name = random.choice(['Shared', 'Lux', 'UberPool', 'Lyft XL', 'Black', 'Lyft', 'UberXL',\n",
473
+ " 'UberX', 'WAV', 'Lux Black', 'Black SUV', 'Lux Black XL'], size=30)\n",
474
+ "destination = random.choice(['North Station', 'Fenway', 'West End', 'Back Bay',\n",
475
+ " 'Haymarket Square', 'Theatre District', 'South Station',\n",
476
+ " 'Northeastern University', 'North End', 'Financial District',\n",
477
+ " 'Beacon Hill', 'Boston University'], size=30)\n",
478
+ "icon = random.choice([' cloudy ', ' partly-cloudy-day ', ' rain ', ' clear-night ',\n",
479
+ " ' partly-cloudy-night ', ' fog ', ' clear-day '], size=30)\n",
480
+ "data_inf = pd.DataFrame({'hour' : hour, 'distance' : distance, 'cab_type' : cab_type, 'name' : name, 'destination' : destination, 'icon' : icon})\n",
481
+ "data_inf"
482
+ ]
483
+ },
484
+ {
485
+ "attachments": {},
486
+ "cell_type": "markdown",
487
+ "metadata": {},
488
+ "source": [
489
+ "### Inferencing"
490
+ ]
491
+ },
492
+ {
493
+ "attachments": {},
494
+ "cell_type": "markdown",
495
+ "metadata": {},
496
+ "source": [
497
+ "define the numerical and categorical columns from data inference"
498
+ ]
499
+ },
500
+ {
501
+ "cell_type": "code",
502
+ "execution_count": 4,
503
+ "metadata": {},
504
+ "outputs": [],
505
+ "source": [
506
+ "#split the data\n",
507
+ "data_inf_num = data_inf[num_cols]\n",
508
+ "data_inf_cat = data_inf[cat_cols]"
509
+ ]
510
+ },
511
+ {
512
+ "attachments": {},
513
+ "cell_type": "markdown",
514
+ "metadata": {},
515
+ "source": [
516
+ "scaling and encoding the data inference"
517
+ ]
518
+ },
519
+ {
520
+ "cell_type": "code",
521
+ "execution_count": 5,
522
+ "metadata": {},
523
+ "outputs": [],
524
+ "source": [
525
+ "data_inf_scale = model_scaler.transform(data_inf_num)\n",
526
+ "data_inf_encode = model_encoder.transform(data_inf_cat)"
527
+ ]
528
+ },
529
+ {
530
+ "attachments": {},
531
+ "cell_type": "markdown",
532
+ "metadata": {},
533
+ "source": [
534
+ "concate the data that we scaled and encoded"
535
+ ]
536
+ },
537
+ {
538
+ "cell_type": "code",
539
+ "execution_count": 6,
540
+ "metadata": {},
541
+ "outputs": [],
542
+ "source": [
543
+ "#concate the data\n",
544
+ "data_inf_final = np.concatenate([data_inf_scale,data_inf_encode], axis=1)"
545
+ ]
546
+ },
547
+ {
548
+ "attachments": {},
549
+ "cell_type": "markdown",
550
+ "metadata": {},
551
+ "source": [
552
+ "now predict the inference data"
553
+ ]
554
+ },
555
+ {
556
+ "cell_type": "code",
557
+ "execution_count": 7,
558
+ "metadata": {},
559
+ "outputs": [
560
+ {
561
+ "data": {
562
+ "text/plain": [
563
+ "array([13.76511146, 19.99589464, 9.31714736, 8.03002182, 12.48483861,\n",
564
+ " 7.64271504, 14.43800148, 13.99893239, 15.81599645, 19.69485696,\n",
565
+ " 21.13920469, 10.32030018, 11.48120646, 14.12233045, 17.99273736,\n",
566
+ " 12.63970579, 14.82575042, 11.03057652, 19.0527246 , 6.53215817,\n",
567
+ " 12.68647047, 11.43272228, 15.35915507, 12.71168744, 8.99184479,\n",
568
+ " 6.60013128, 4.03796725, 16.23093043, 12.17054192, 19.48791409])"
569
+ ]
570
+ },
571
+ "execution_count": 7,
572
+ "metadata": {},
573
+ "output_type": "execute_result"
574
+ }
575
+ ],
576
+ "source": [
577
+ "#predict\n",
578
+ "y_pred_inf= model_lin_reg.predict(data_inf_final)\n",
579
+ "y_pred_inf"
580
+ ]
581
+ }
582
+ ],
583
+ "metadata": {
584
+ "kernelspec": {
585
+ "display_name": "Python 3",
586
+ "language": "python",
587
+ "name": "python3"
588
+ },
589
+ "language_info": {
590
+ "codemirror_mode": {
591
+ "name": "ipython",
592
+ "version": 3
593
+ },
594
+ "file_extension": ".py",
595
+ "mimetype": "text/x-python",
596
+ "name": "python",
597
+ "nbconvert_exporter": "python",
598
+ "pygments_lexer": "ipython3",
599
+ "version": "3.9.5 (tags/v3.9.5:0a7dcbd, May 3 2021, 17:27:52) [MSC v.1928 64 bit (AMD64)]"
600
+ },
601
+ "orig_nbformat": 4,
602
+ "vscode": {
603
+ "interpreter": {
604
+ "hash": "e31aef8222fb7c235d2ed8e74ce17e973738f89b37261e7466b7a63a6dfb1214"
605
+ }
606
+ }
607
+ },
608
+ "nbformat": 4,
609
+ "nbformat_minor": 2
610
+ }
HuggingFace/h8dsft_P1G1_KeziaIntan.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
HuggingFace/list_cat_cols.txt ADDED
Binary file (41 Bytes). View file
 
HuggingFace/list_cat_columns.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ['icon', 'name', 'cab_type']
HuggingFace/list_num_cols.txt ADDED
Binary file (26 Bytes). View file
 
HuggingFace/list_num_columns.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ ['distance']
HuggingFace/model_encoder.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e53d92e2853797ce168048094e46bf055852cc761b7d54f8b1d342d5e970169
3
+ size 1761
HuggingFace/model_lin_reg.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:791885b36c69edf45f5cb102ff477bc9a3396ffe54d5135609fdc25c7f084723
3
+ size 600
HuggingFace/model_scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0c162d1831cdb44de404b4d712a190bbdb81f8902ee576d1c001bdf0ad6e704
3
+ size 1023