|
import csv |
|
import json |
|
import os |
|
import sys |
|
import datasets |
|
from datasets.tasks import TextClassification |
|
|
|
|
|
|
|
_CITATION = """ |
|
TODO: Add citation here |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
DIFrauD -- (Domain Independent Fraud Detection) is a corpus of deceptive and truthful texts from 7 domains: |
|
|
|
"fake_news", |
|
"job_scams", |
|
"phishing", |
|
"political_statements", |
|
"product_reviews", |
|
"sms", |
|
"twitter_rumours" |
|
|
|
To load a specific domain, pass it as the "name" parameter to load_dataset() |
|
""" |
|
|
|
class DIFrauD(datasets.GeneratorBasedBuilder): |
|
"""Domain Independent Fraud Detection benchmarks -- a Large multi-domain english corpus of truthful and deceptive texts""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="fake_news", description="Fake News domain"), |
|
datasets.BuilderConfig(name="job_scams", description="Online Job Scams"), |
|
datasets.BuilderConfig(name="phishing", description="Email phishing attacks"), |
|
datasets.BuilderConfig(name="political_statements", description="Statements by various politicians"), |
|
datasets.BuilderConfig(name="product_reviews", description="Amazon product reviews"), |
|
datasets.BuilderConfig(name="sms", description="SMS spam and phishing attacks"), |
|
datasets.BuilderConfig(name="twitter_rumours", |
|
description="Collection of rumours from twitter spanning several years and topics"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "phishing" |
|
|
|
def _info(self): |
|
self.features = datasets.Features( |
|
{ |
|
"text": datasets.Value("string"), |
|
"label": datasets.ClassLabel(num_classes=2, names=['non-deceptive', 'deceptive']), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
config_name=self.config.name, |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=self.features, |
|
|
|
|
|
supervised_keys=("text", "label"), |
|
|
|
task_templates=[TextClassification(text_column="text", label_column="label")], |
|
|
|
|
|
|
|
|
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = { |
|
"train": self.config.name+"/train.jsonl", |
|
"test": self.config.name+"/test.jsonl", |
|
"validation": self.config.name+"/validation.jsonl", |
|
} |
|
data_dir = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir['train']), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir['validation']), |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir['test']), |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for key, row in enumerate(f): |
|
data = json.loads(row) |
|
yield key, { |
|
"text": data["text"], |
|
"label": int(data["label"]), |
|
} |
|
|
|
|