difraud commited on
Commit
858ed11
1 Parent(s): 3ad361c

Create difraud.py

Browse files
Files changed (1) hide show
  1. difraud.py +120 -0
difraud.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ import sys
5
+ import datasets
6
+ from datasets.tasks import TextClassification
7
+
8
+
9
+ # TODO: Add BibTeX citation
10
+ _CITATION = """
11
+ TODO: Add citation here
12
+ """
13
+
14
+ _DESCRIPTION = """
15
+ DIFrauD -- (Domain Independent Fraud Detection) is a corpus of deceptive and truthful texts from 7 domains:
16
+
17
+ "fake_news",
18
+ "job_scams",
19
+ "phishing",
20
+ "political_statements",
21
+ "product_reviews",
22
+ "sms",
23
+ "twitter_rumours"
24
+
25
+ To load a specific domain, pass it as the "name" parameter to load_dataset()
26
+ """
27
+
28
+ class DIFrauD(datasets.GeneratorBasedBuilder):
29
+ """Domain Independent Fraud Detection benchmarks -- a Large multi-domain english corpus of truthful and deceptive texts"""
30
+
31
+ BUILDER_CONFIGS = [
32
+ datasets.BuilderConfig(name="fake_news", description="Fake News domain"),
33
+ datasets.BuilderConfig(name="job_scams", description="Online Job Scams"),
34
+ datasets.BuilderConfig(name="phishing", description="Email phishing attacks"),
35
+ datasets.BuilderConfig(name="political_statements", description="Statements by various politicians"),
36
+ datasets.BuilderConfig(name="product_reviews", description="Amazon product reviews"),
37
+ datasets.BuilderConfig(name="sms", description="SMS spam and phishing attacks"),
38
+ datasets.BuilderConfig(name="twitter_rumours",
39
+ description="Collection of rumours from twitter spanning several years and topics"),
40
+ ]
41
+
42
+ DEFAULT_CONFIG_NAME = "phishing"
43
+
44
+ def _info(self):
45
+ self.features = datasets.Features(
46
+ {
47
+ "text": datasets.Value("string"),
48
+ "label": datasets.ClassLabel(num_classes=2, names=['non-deceptive', 'deceptive']),
49
+ }
50
+ )
51
+ return datasets.DatasetInfo(
52
+ config_name=self.config.name,
53
+ # This is the description that will appear on the datasets page.
54
+ description=_DESCRIPTION,
55
+ # This defines the different columns of the dataset and their types
56
+ features=self.features, # Here we define them above because they are different between the two configurations
57
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
58
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
59
+ supervised_keys=("text", "label"),
60
+ # specify standard binary classification task for datasets to setup easier
61
+ task_templates=[TextClassification(text_column="text", label_column="label")],
62
+ # Homepage of the dataset for documentation
63
+ # homepage=_HOMEPAGE,
64
+ # License for the dataset if available
65
+ # license=_LICENSE,
66
+ # Citation for the dataset
67
+ citation=_CITATION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
72
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
73
+
74
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
75
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
76
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
77
+ urls = {
78
+ "train": self.config.name+"/train.jsonl",
79
+ "test": self.config.name+"/test.jsonl",
80
+ "validation": self.config.name+"/validation.jsonl",
81
+ }
82
+ data_dir = dl_manager.download_and_extract(urls)
83
+
84
+ return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TRAIN,
87
+ # These kwargs will be passed to _generate_examples
88
+ gen_kwargs={
89
+ "filepath": os.path.join(data_dir['train']),
90
+ "split": "train",
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.VALIDATION,
95
+ gen_kwargs={
96
+ "filepath": os.path.join(data_dir['validation']),
97
+ "split": "validation",
98
+ },
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TEST,
102
+ gen_kwargs={
103
+ "filepath": os.path.join(data_dir['test']),
104
+ "split": "test"
105
+ },
106
+ ),
107
+ ]
108
+
109
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
110
+ def _generate_examples(self, filepath, split):
111
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
112
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
113
+ with open(filepath, encoding="utf-8") as f:
114
+ for key, row in enumerate(f):
115
+ data = json.loads(row)
116
+ yield key, {
117
+ "text": data["text"],
118
+ "label": int(data["label"]),
119
+ }
120
+