FredZhang7 commited on
Commit
0e4a345
1 Parent(s): b6e1a5f

Create feature_extraction.py

Browse files
Files changed (1) hide show
  1. feature_extraction.py +475 -0
feature_extraction.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system('pip install -q dnspython python-whois bs4 requests pandas pyOpenSSL')
3
+
4
+
5
+ import dns.resolver, dns.rdatatype
6
+ import requests
7
+ from bs4 import BeautifulSoup
8
+ from collections import Counter
9
+ import whois
10
+ from datetime import datetime
11
+ import time
12
+ import csv
13
+ import ssl
14
+ import socket
15
+ from urllib.request import urlparse
16
+ import OpenSSL.crypto
17
+ import pandas as pd
18
+ import random
19
+
20
+
21
+ def generate_user_agent() -> str:
22
+ a = random.randint(63, 89)
23
+ b = random.randint(1, 3200)
24
+ c = random.randint(1, 140)
25
+ user_agent = f'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{a}.0.{b}.{c} Safari/537.3'
26
+ return user_agent
27
+
28
+
29
+ headers = {
30
+ 'User-Agent': generate_user_agent()
31
+ }
32
+
33
+
34
+ def count_domain_occurrences(soup: BeautifulSoup, domain: str) -> int:
35
+ """
36
+ Returns the number of occurrences of the domain in the website's page source.
37
+ """
38
+ try:
39
+ domain_count = soup.prettify().count(domain)
40
+ return domain_count
41
+ except Exception as e:
42
+ print(f"count_domain_occurrences: {str(e)}")
43
+ return 0
44
+
45
+
46
+ def get_certificate_info(url: str) -> tuple[str, int]:
47
+ """
48
+ Returns the issuer and age of the certificate if found. None, None otherwise
49
+ """
50
+
51
+ try:
52
+ if not url.startswith("https://"):
53
+ raise ValueError("URL must use HTTPS protocol")
54
+
55
+ hostname = url.split("https://")[1].split("/")[0]
56
+ ip_addresses = socket.getaddrinfo(hostname, 443)
57
+ ip_address = ip_addresses[0][4][0]
58
+
59
+ context = ssl.create_default_context()
60
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
61
+ ssl_conn = context.wrap_socket(sock, server_hostname=hostname)
62
+ ssl_conn.connect((ip_address, 443))
63
+ cert = ssl_conn.getpeercert()
64
+
65
+ if 'notAfter' not in cert:
66
+ raise ValueError("Certificate information not found")
67
+
68
+ issuer = cert['issuer'][0][0][1]
69
+ not_after = cert['notAfter']
70
+
71
+ not_after_date = datetime.strptime(not_after, '%b %d %H:%M:%S %Y %Z')
72
+ certificate_age = (datetime.now() - not_after_date).days
73
+
74
+ return issuer, certificate_age
75
+
76
+ except Exception as e:
77
+ print(f"get_certificate_info error: {str(e)}")
78
+
79
+ return None, None
80
+
81
+
82
+ def check_sfh(soup: BeautifulSoup, domain: str) -> float:
83
+ """
84
+ Return 1 if SFH is "about: blank" or is empty, 0.5 if SFH refers to a different domain, and 0 otherwise
85
+ """
86
+ try:
87
+ form = soup.find('form', {'method': 'post'})
88
+
89
+ if not form:
90
+ return 0
91
+
92
+ sfh = form.get('action')
93
+
94
+ if not sfh or sfh == 'about:blank':
95
+ return 1
96
+
97
+ sfh_domain = urlparse(sfh).netloc
98
+
99
+ if sfh_domain != domain:
100
+ return 0.5
101
+ except Exception as e:
102
+ print(f"check_sfh: {str(e)}")
103
+ pass
104
+
105
+ return 0
106
+
107
+
108
+ def age_of_domain(w: whois.WhoisEntry) -> int:
109
+ """
110
+ Returns the age of domain in days, None if error
111
+ """
112
+ try:
113
+ creation_date = w.creation_date
114
+
115
+ if creation_date is None:
116
+ # Domain creation date is not available, try using updated_date as a fallback
117
+ updated_date = w.updated_date
118
+ if updated_date is None:
119
+ return -1
120
+ if type(updated_date) == list:
121
+ creation_date = min(updated_date)
122
+ else:
123
+ creation_date = updated_date
124
+
125
+ if type(creation_date) == list:
126
+ creation_date = min(creation_date)
127
+
128
+ num_days = (datetime.now() - creation_date).days
129
+
130
+ return num_days
131
+ except Exception as e:
132
+ print('age_of_domain error: ' + str(e))
133
+ return None
134
+
135
+
136
+ def use_iframe(soup: BeautifulSoup) -> int:
137
+ """
138
+ Returns 1 if iframe is present, 0 otherwise
139
+ """
140
+ iframes = soup.find_all('iframe')
141
+ if len(iframes) > 0:
142
+ return 1
143
+
144
+ return 0
145
+
146
+
147
+ def popup_window_has_text_field(soup: BeautifulSoup) -> int:
148
+ """
149
+ Returns 1 if a pop-up window with text field exists, 0 otherwise
150
+ """
151
+ popups = soup.find_all('div', {'class': 'popup'})
152
+ for popup in popups:
153
+ if popup.find('input', {'type': 'text'}):
154
+ return 1
155
+
156
+ return 0
157
+
158
+
159
+ def abnormal_url(url: str, w: whois.WhoisEntry) -> int:
160
+ """
161
+ Returns 1 if the hostname is not in the URL, 0 otherwise.
162
+ """
163
+ host_name = w.domain.split('.')[0]
164
+ if host_name not in url:
165
+ return 1
166
+ else:
167
+ return 0
168
+
169
+
170
+ def dns_record(domain: str) -> tuple[int, int, int]:
171
+ """
172
+ Returns TTL, IP address count and TXT record presence in a tuple of integers.
173
+ Returns None, None, None if dns record not found.
174
+ """
175
+ try:
176
+ answers = dns.resolver.resolve(domain)
177
+ TTL = answers.rrset.ttl
178
+ IP_addresses = len(answers)
179
+ TXT_records = any(answer.rdtype == dns.rdatatype.TXT for answer in answers)
180
+ TXT_records = 1 if TXT_records else 0
181
+
182
+ return TTL, IP_addresses, TXT_records
183
+ except dns.resolver.NXDOMAIN:
184
+ return None, None, None
185
+ except Exception as e:
186
+ print(f"dns_record error: {str(e)}")
187
+ return None, None, None
188
+
189
+
190
+
191
+ def not_indexed_by_google(url: str) -> int:
192
+ """
193
+ Returns 1 if not indexed by Google, 0 if indexed, -1 if error
194
+ """
195
+ response = make_request(url, headers, timeout=10, retries=3)
196
+ if response is None:
197
+ return -1
198
+
199
+ if "did not match any documents" in response.text:
200
+ return 1
201
+ else:
202
+ return 0
203
+
204
+
205
+ def right_click_disabled(soup: BeautifulSoup) -> int:
206
+ """
207
+ Returns 1 if right click is disabled, 0 otherwise.
208
+ """
209
+ for script in soup.find_all('script'):
210
+ if 'event.button==2' in script.text:
211
+ return 1
212
+ return 0
213
+
214
+
215
+ def mouseover_changes(soup: BeautifulSoup) -> int:
216
+ """
217
+ Returns 1 if the mouseover event changes the status bar, 0 otherwise
218
+ """
219
+ onMouseOver_elements = soup.find_all(onmouseover=True)
220
+ for element in onMouseOver_elements:
221
+ if "window.status" in str(element):
222
+ return 1
223
+ return 0
224
+
225
+
226
+ def redirects(response: requests.Response) -> int:
227
+ """
228
+ Returns the number of redirects
229
+ """
230
+ return len(response.history)
231
+
232
+
233
+ def meta_script_link_percentage(soup: BeautifulSoup) -> tuple[float, float, float]:
234
+ """
235
+ Returns the percentage of meta, script, and link tags that have a link
236
+ """
237
+ meta_tags = soup.find_all('meta')
238
+ script_tags = soup.find_all('script')
239
+ link_tags = soup.find_all('link')
240
+
241
+ meta_links = sum([1 for tag in meta_tags if tag.has_attr('href')])
242
+ script_links = sum([1 for tag in script_tags if tag.has_attr('src')])
243
+ link_links = sum([1 for tag in link_tags if tag.has_attr('href')])
244
+
245
+ total_links = meta_links + script_links + link_links
246
+ if total_links == 0:
247
+ return 0, 0, 0
248
+ meta_percentage = (meta_links / total_links)
249
+ script_percentage = (script_links / total_links)
250
+ link_percentage = (link_links / total_links)
251
+
252
+ return meta_percentage, script_percentage, link_percentage
253
+
254
+
255
+ def url_anchor_percentage(soup: BeautifulSoup) -> float:
256
+ """
257
+ Returns the percentage of anchor links on the page with different domain names,
258
+ excluding anchor links with JavaScript or invalid URLs.
259
+ """
260
+ total_links = 0
261
+ anchor_links = 0
262
+
263
+ first_a_tag = soup.find('a')
264
+ if first_a_tag is None:
265
+ return 0
266
+
267
+ domain = urlparse(first_a_tag.get('href')).netloc
268
+ if not domain:
269
+ return 0
270
+
271
+ for a_tag in soup.find_all('a'):
272
+ href = a_tag.get('href')
273
+ if href:
274
+ if href.startswith('javascript:') or href.startswith('#'):
275
+ continue
276
+
277
+ link_domain = urlparse(href).netloc
278
+ if link_domain and link_domain != domain:
279
+ anchor_links += 1
280
+ total_links += 1
281
+
282
+ if total_links == 0:
283
+ return 0
284
+
285
+ return anchor_links / total_links
286
+
287
+
288
+ def request_url_percentage(soup: BeautifulSoup, domain: str) -> float:
289
+ """
290
+ Returns the percentage of external domains in the URL
291
+ """
292
+ links = [link.get('href') for link in soup.find_all('a')]
293
+ images = [img.get('src') for img in soup.find_all('img')]
294
+ videos = [video.get('src') for video in soup.find_all('video')]
295
+ sounds = [sound.get('src') for sound in soup.find_all('audio')]
296
+ external_links = []
297
+
298
+ for link in links + images + videos + sounds:
299
+ if link is None:
300
+ continue
301
+ parsed_domain = urlparse(link).netloc
302
+ if parsed_domain != '' and parsed_domain != domain:
303
+ external_links.append(link)
304
+
305
+ external_domains = [urlparse(link).netloc for link in external_links]
306
+ domain_counts = Counter(external_domains)
307
+
308
+ total_links = len(external_domains)
309
+ if total_links == 0:
310
+ return 1
311
+ external_links_count = domain_counts[domain]
312
+
313
+ return (external_links_count / total_links)
314
+
315
+
316
+ def has_suspicious_port(domain: str) -> int:
317
+ """
318
+ Returns 1 if any of the ports are of the preferred status, 0 otherwise.
319
+ """
320
+ preferred_ports = {
321
+ 21: "Close",
322
+ 22: "Close",
323
+ 23: "Close",
324
+ 80: "Open",
325
+ 443: "Open",
326
+ 445: "Close",
327
+ 1433: "Close",
328
+ 1521: "Close",
329
+ 3306: "Close",
330
+ 3389: "Close"
331
+ }
332
+ for port, status in preferred_ports.items():
333
+ try:
334
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
335
+ sock.settimeout(0.1)
336
+ result = sock.connect_ex((domain, port))
337
+ if result == 0:
338
+ if status == "Open":
339
+ return 1
340
+ else:
341
+ return 0
342
+ except:
343
+ pass
344
+
345
+ return 0
346
+
347
+
348
+ def external_favicons(soup: BeautifulSoup, domain: str) -> int:
349
+ """
350
+ Returns the number of favicons loaded from external domains.
351
+ """
352
+ favicon_links = soup.find_all('link', {'rel': 'icon'})
353
+ external_favicons = 0
354
+
355
+ for link in favicon_links:
356
+ href = link.get('href')
357
+
358
+ if href:
359
+ href_domain = urlparse(href).netloc
360
+
361
+ if href_domain != domain:
362
+ external_favicons += 1
363
+
364
+ return external_favicons
365
+
366
+
367
+ def domain_registeration_length(w: whois.WhoisEntry) -> int:
368
+ """"
369
+ Returns the number of days since the domain was registered, None if error
370
+ """
371
+ try:
372
+ domain = w.domain_name
373
+ expiration_date = w.expiration_date
374
+ if type(expiration_date) == list:
375
+ expiration_date = expiration_date[0]
376
+ if expiration_date is not None:
377
+ time_to_expire = (expiration_date - datetime.now()).days
378
+ return time_to_expire
379
+ else:
380
+ return 0
381
+ except Exception as e:
382
+ print('domain_registeration_length error: ' + str(e))
383
+ return None
384
+
385
+
386
+ def check_email_submission(soup: BeautifulSoup) -> int:
387
+ """
388
+ Returns 1 if "mail()" or "mailto:" is used, 0 otherwise.
389
+ """
390
+ try:
391
+ forms = soup.find_all('form')
392
+ for form in forms:
393
+ if 'mail(' in str(form) or 'mailto:' in str(form):
394
+ return 1
395
+ return 0
396
+ except:
397
+ return 0
398
+
399
+
400
+ def make_request(url: str, headers: dict, timeout: int, retries: int) -> requests.Response:
401
+ for i in range(retries):
402
+ try:
403
+ response = requests.get(url, headers=headers, timeout=timeout, allow_redirects=True)
404
+ response.raise_for_status()
405
+ return response
406
+ except requests.exceptions.RequestException as e:
407
+ retry_delay = 2**i
408
+ print(f'\033[34mRequestException for {url}: {e}. Retrying in {retry_delay} seconds...\033[0m')
409
+ time.sleep(retry_delay)
410
+ except Exception as e:
411
+ print(f'\033[31mError making request for {url}: {e}\033[0m')
412
+ return None
413
+ print(f'\033[31mFailed to make request after {retries} retries.\033[0m')
414
+ return None
415
+
416
+
417
+ def collect_data(url: str, is_malicious: bool):
418
+ start_time = time.time()
419
+
420
+ try:
421
+ response = make_request(url, headers, timeout=10, retries=3)
422
+ if response is None:
423
+ return
424
+ redirects_value = redirects(response)
425
+ except Exception as e:
426
+ print(f'Error making request: {e}')
427
+ return
428
+ not_indexed_by_google_value = not_indexed_by_google(url)
429
+ issuer, certificate_age = get_certificate_info(url)
430
+
431
+ try:
432
+ soup = BeautifulSoup(response.content, 'html.parser')
433
+ email_submission_value = check_email_submission(soup)
434
+ url_anchor_percentage_value = url_anchor_percentage(soup)
435
+ meta_percentage, script_percentage, link_percentage = meta_script_link_percentage(soup)
436
+ mouseover_changes_value = mouseover_changes(soup)
437
+ right_click_disabled_value = right_click_disabled(soup)
438
+ popup_window_has_text_field_value = popup_window_has_text_field(soup)
439
+ use_iframe_value = use_iframe(soup)
440
+ except Exception as e:
441
+ print('soup error, double check your code: ' + str(e))
442
+ return
443
+
444
+ try:
445
+ parsed_url = urlparse(url)
446
+ domain = parsed_url.netloc
447
+ has_suspicious_port_value = has_suspicious_port(domain)
448
+ request_url_percentage_value = request_url_percentage(soup, domain)
449
+ external_favicons_value = external_favicons(soup, domain)
450
+ TTL, ip_address_count, TXT_record = dns_record(domain)
451
+ check_sfh_value = check_sfh(soup, domain)
452
+ count_domain_occurrences_value = count_domain_occurrences(soup, domain)
453
+ except Exception as e:
454
+ print('urlparse error, double check your code: ' + str(e))
455
+ return
456
+
457
+ try:
458
+ w = whois.whois(domain)
459
+ domain_registeration_length_value = domain_registeration_length(w)
460
+ abnormal_url_value = abnormal_url(url, w)
461
+ age_of_domain_value = age_of_domain(w)
462
+ except Exception as e:
463
+ print('whois error: ' + str(e))
464
+ domain_registeration_length_value = -1
465
+ abnormal_url_value = 1
466
+ age_of_domain_value = -1
467
+
468
+
469
+ print(f"{url} took {time.time() - start_time} seconds to complete")
470
+
471
+ row = [url, redirects_value, not_indexed_by_google_value, issuer, certificate_age, email_submission_value, request_url_percentage_value, url_anchor_percentage_value, meta_percentage, script_percentage, link_percentage, mouseover_changes_value, right_click_disabled_value, popup_window_has_text_field_value, use_iframe_value, has_suspicious_port_value, external_favicons_value, TTL, ip_address_count, TXT_record, check_sfh_value, count_domain_occurrences_value, domain_registeration_length_value, abnormal_url_value, age_of_domain_value, is_malicious]
472
+
473
+ with open('phishing_detection_dataset.csv', mode='a', newline='') as file:
474
+ writer = csv.writer(file)
475
+ writer.writerow(row)