laugustyniak commited on
Commit
6e80c25
1 Parent(s): 8fd6083

add loader and readme

Browse files
Files changed (2) hide show
  1. README.md +0 -0
  2. political_advertising_loader.py +110 -0
README.md ADDED
Binary file (2.7 kB). View file
 
political_advertising_loader.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ import datasets
4
+ import pandas as pd
5
+
6
+ _CITATION = """\
7
+ @inproceedings{augustyniak-etal-2020-political,
8
+ title = "Political Advertising Dataset: the use case of the Polish 2020 Presidential Elections",
9
+ author = "Augustyniak, Lukasz and
10
+ Rajda, Krzysztof and
11
+ Kajdanowicz, Tomasz and
12
+ Bernaczyk, Micha{\l}",
13
+ booktitle = "Proceedings of the The Fourth Widening Natural Language Processing Workshop",
14
+ month = jul,
15
+ year = "2020",
16
+ address = "Seattle, USA",
17
+ publisher = "Association for Computational Linguistics",
18
+ url = "https://www.aclweb.org/anthology/2020.winlp-1.28",
19
+ pages = "110--114"
20
+ }
21
+ """
22
+
23
+ _DESCRIPTION = "Polish Political Advertising Dataset"
24
+
25
+ _HOMEPAGE = "https://github.com/laugustyniak/misinformation"
26
+
27
+ DATA_PATH = Path(".")
28
+
29
+
30
+ class PoliticalAdvertisingConfig(datasets.BuilderConfig):
31
+ def __init__(self, **kwargs):
32
+ super(PoliticalAdvertisingConfig, self).__init__(**kwargs)
33
+
34
+
35
+ class PoliticalAdvertisingDataset(datasets.GeneratorBasedBuilder):
36
+ VERSION = datasets.Version("1.0.0")
37
+
38
+ TRAIN_FILE = DATA_PATH / "train.json"
39
+ VAL_FILE = DATA_PATH / "dev.json"
40
+ TEST_FILE = DATA_PATH / "test.json"
41
+
42
+ BUILDER_CONFIGS = [
43
+ datasets.BuilderConfig(name="political-advertising-pl", version=VERSION)
44
+ ]
45
+
46
+ def _info(self):
47
+ features = datasets.Features(
48
+ {
49
+ "id": datasets.Value("string"),
50
+ "tokens": datasets.Sequence(datasets.Value("string")),
51
+ "tags": datasets.Sequence(
52
+ datasets.features.ClassLabel(
53
+ names=[
54
+ "O",
55
+ "B-DEFENSE_AND_SECURITY",
56
+ "I-DEFENSE_AND_SECURITY",
57
+ "B-EDUCATION",
58
+ "I-EDUCATION",
59
+ "B-FOREIGN_POLICY",
60
+ "I-FOREIGN_POLICY",
61
+ "B-HEALHCARE",
62
+ "I-HEALHCARE",
63
+ "B-IMMIGRATION",
64
+ "I-IMMIGRATION",
65
+ "B-INFRASTRUCTURE_AND_ENVIROMENT",
66
+ "I-INFRASTRUCTURE_AND_ENVIROMENT",
67
+ "B-POLITICAL_AND_LEGAL_SYSTEM",
68
+ "I-POLITICAL_AND_LEGAL_SYSTEM",
69
+ "B-SOCIETY",
70
+ "I-SOCIETY",
71
+ "B-WELFARE",
72
+ "I-WELFARE",
73
+ ]
74
+ )
75
+ ),
76
+ }
77
+ )
78
+
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=features,
82
+ supervised_keys=None,
83
+ homepage=_HOMEPAGE,
84
+ citation=_CITATION,
85
+ )
86
+
87
+ def _split_generators(self, dl_manager):
88
+ return [
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": str(self.TRAIN_FILE)}
91
+ ),
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TEST, gen_kwargs={"filepath": str(self.TEST_FILE)}
94
+ ),
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.VALIDATION,
97
+ gen_kwargs={"filepath": str(self.VAL_FILE)},
98
+ ),
99
+ ]
100
+
101
+ def _generate_examples(self, filepath: str):
102
+ df = pd.read_json(filepath)
103
+ for row_id, row in df.iterrows():
104
+ yield row_id, {
105
+ "id": row.id,
106
+ "text": row.text,
107
+ "tags": row.tags,
108
+ "url": row.url,
109
+ "tweet_id": row.tweet_id,
110
+ }