pepa commited on
Commit
3745598
1 Parent(s): a7ca549

Upload bg-fake-news.py

Browse files
Files changed (1) hide show
  1. bg-fake-news.py +98 -0
bg-fake-news.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import textwrap
3
+
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _DESCRIPTION = "Fake news detection dataset."
9
+
10
+ _CITATION = """\
11
+ @InProceedings{huggingface:dataset,
12
+ title = {A great new dataset},
13
+ author={huggingface, Inc.
14
+ },
15
+ year={2020}
16
+ }
17
+ """
18
+
19
+ _FEATURES = datasets.Features({
20
+ "title": datasets.Value("string"),
21
+ "url": datasets.Value("string"),
22
+ "date_published": datasets.Value("string"),
23
+ "content": datasets.Value("string"),
24
+ "fake_news": datasets.features.ClassLabel(names=["fake", "real"])
25
+ })
26
+
27
+ class FakeNewsConfig(datasets.BuilderConfig):
28
+ """BuilderConfig for FakeNews"""
29
+
30
+ def __init__(self, data_url, citation, url, text_features, **kwargs):
31
+ """
32
+ Args:
33
+ text_features: `dict[string, string]`, map from the name of the feature
34
+ dict for each text field to the name of the column in the tsv file
35
+ label_column:
36
+ label_classes
37
+ **kwargs: keyword arguments forwarded to super.
38
+ """
39
+ super(FakeNewsConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
40
+ self.text_features = text_features
41
+ self.data_url = data_url
42
+ self.citation = citation
43
+ self.url = url
44
+
45
+
46
+ class FakeNewsConfig(datasets.GeneratorBasedBuilder):
47
+
48
+ VERSION = datasets.Version("0.1.0")
49
+
50
+ DEFAULT_CONFIG_NAME = "default"
51
+
52
+ BUILDER_CONFIGS = [FakeNewsConfig(
53
+ name=DEFAULT_CONFIG_NAME,
54
+ description=_DESCRIPTION,
55
+ citation=textwrap.dedent(_CITATION),
56
+ text_features=_FEATURES,
57
+ data_url="https://gitlab.com/datasciencesociety/case_fake_news/-/blob/master/data/main_data_fake_news.csv",
58
+ url="https://gitlab.com/datasciencesociety/case_fake_news/-/blob/master/data/main_data_fake_news.csv",
59
+ )
60
+ ]
61
+
62
+ def _info(self):
63
+ return datasets.DatasetInfo(
64
+ description=_DESCRIPTION,
65
+ features=_FEATURES,
66
+ supervised_keys=None,
67
+ citation=_CITATION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager=None, config=None):
71
+ data_dir = dl_manager.download(self.config.data_url)
72
+ split_filenames = {
73
+ datasets.Split.TRAIN: "train.jsonl",
74
+ datasets.Split.VALIDATION: "dev.jsonl",
75
+ datasets.Split.TEST: "test.jsonl",
76
+ }
77
+ return [
78
+ datasets.SplitGenerator(
79
+ name=split,
80
+ gen_kwargs={
81
+ "filepath": dl_manager.iter_archive(data_dir),
82
+ "filename": split_filenames[split],
83
+ },
84
+ )
85
+ for split in split_filenames
86
+ ]
87
+
88
+ def _generate_examples(self, filepath=None, filename=None):
89
+ idx = 0
90
+
91
+ for path, file in filepath:
92
+ if path.endswith(filename):
93
+ lines = (line.decode("utf-8") for line in file)
94
+ for line in lines:
95
+ idx += 1
96
+ example = json.loads(line)
97
+
98
+ yield idx, example