holylovenia commited on
Commit
4e22ee4
1 Parent(s): 7587130

Upload x_fact.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. x_fact.py +164 -0
x_fact.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+
6
+ from nusacrowd.nusa_datasets.x_fact.utils.x_fact_utils import \
7
+ load_x_fact_dataset
8
+ from nusacrowd.utils.configs import NusantaraConfig
9
+ from nusacrowd.utils.constants import Tasks
10
+
11
+ _CITATION = """\
12
+ @inproceedings{gupta2021xfact,
13
+ title={{X-FACT: A New Benchmark Dataset for Multilingual Fact Checking}},
14
+ author={Gupta, Ashim and Srikumar, Vivek},
15
+ booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics",
16
+ month = jul,
17
+ year = "2021",
18
+ address = "Online",
19
+ publisher = "Association for Computational Linguistics",
20
+ }
21
+ """
22
+ _DATASETNAME = "x_fact"
23
+
24
+ _DESCRIPTION = """\
25
+ X-FACT: the largest publicly available multilingual dataset for factual verification of naturally existing realworld claims.
26
+ """
27
+
28
+ _HOMEPAGE = "https://github.com/utahnlp/x-fact"
29
+
30
+ _LANGUAGES = [
31
+ 'ara', 'aze', 'ben', 'deu', 'spa',
32
+ 'fas', 'fra', 'guj', 'hin', 'ind',
33
+ 'ita', 'kat', 'mar', 'nor', 'nld',
34
+ 'pan', 'pol', 'por', 'ron', 'rus',
35
+ 'sin', 'srp', 'sqi', 'tam', 'tur'
36
+ ]
37
+ _LOCAL = False
38
+
39
+ _LICENSE = "MIT"
40
+
41
+ _URLS = {
42
+ "train": "https://raw.githubusercontent.com/utahnlp/x-fact/main/data/x-fact-including-en/train.all.tsv",
43
+ "validation": "https://raw.githubusercontent.com/utahnlp/x-fact/main/data/x-fact-including-en/dev.all.tsv",
44
+ "test": {
45
+ "in_domain": "https://raw.githubusercontent.com/utahnlp/x-fact/main/data/x-fact-including-en/test.all.tsv",
46
+ "out_domain": "https://raw.githubusercontent.com/utahnlp/x-fact/main/data/x-fact-including-en/ood.tsv",
47
+ },
48
+ }
49
+
50
+ _SUPPORTED_TASKS = [Tasks.FACT_CHECKING]
51
+
52
+ _SOURCE_VERSION = "1.0.0"
53
+
54
+ _NUSANTARA_VERSION = "1.0.0"
55
+
56
+
57
+ class XFact(datasets.GeneratorBasedBuilder):
58
+ """X-FACT: the largest publicly available multilingual dataset for factual verification of naturally existing realworld claims."""
59
+
60
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
61
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
62
+
63
+ BUILDER_CONFIGS = [
64
+ NusantaraConfig(
65
+ name="x_fact_source",
66
+ version=SOURCE_VERSION,
67
+ description="x_fact source schema",
68
+ schema="source",
69
+ subset_id="x_fact",
70
+ ),
71
+ ]
72
+
73
+ DEFAULT_CONFIG_NAME = "x_fact_source"
74
+
75
+ def _info(self) -> datasets.DatasetInfo:
76
+
77
+ if self.config.schema == "source":
78
+ features = datasets.Features(
79
+ {
80
+ "language": datasets.Value("string"),
81
+ "site": datasets.Value("string"),
82
+ "evidence_1": datasets.Value("string"),
83
+ "evidence_2": datasets.Value("string"),
84
+ "evidence_3": datasets.Value("string"),
85
+ "evidence_4": datasets.Value("string"),
86
+ "evidence_5": datasets.Value("string"),
87
+ "link_1": datasets.Value("string"),
88
+ "link_2": datasets.Value("string"),
89
+ "link_3": datasets.Value("string"),
90
+ "link_4": datasets.Value("string"),
91
+ "link_5": datasets.Value("string"),
92
+ "claimDate": datasets.Value("string"),
93
+ "reviewDate": datasets.Value("string"),
94
+ "claimant": datasets.Value("string"),
95
+ "claim": datasets.Value("string"),
96
+ "label": datasets.Value("string"),
97
+ }
98
+ )
99
+
100
+ return datasets.DatasetInfo(
101
+ description=_DESCRIPTION,
102
+ features=features,
103
+ homepage=_HOMEPAGE,
104
+ license=_LICENSE,
105
+ citation=_CITATION,
106
+ )
107
+
108
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
109
+ return [
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TRAIN,
112
+ gen_kwargs={
113
+ "filepath": _URLS["train"],
114
+ "split": "train",
115
+ },
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.VALIDATION,
119
+ gen_kwargs={
120
+ "filepath": _URLS["validation"],
121
+ "split": "dev",
122
+ },
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.splits.NamedSplit("TEST_IN_DOMAIN"),
126
+ gen_kwargs={
127
+ "filepath": _URLS["test"]["in_domain"],
128
+ "split": "test_in_domain",
129
+ },
130
+ ),
131
+ datasets.SplitGenerator(
132
+ name=datasets.splits.NamedSplit("TEST_OUT_DOMAIN"),
133
+ gen_kwargs={
134
+ "filepath": _URLS["test"]["out_domain"],
135
+ "split": "test_out_domain",
136
+ },
137
+ ),
138
+ ]
139
+
140
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
141
+
142
+ df = load_x_fact_dataset(filepath)
143
+ if self.config.schema == "source":
144
+ for row in df.itertuples():
145
+ entry = {
146
+ "language": row.language,
147
+ "site": row.site,
148
+ "evidence_1": row.evidence_1,
149
+ "evidence_2": row.evidence_2,
150
+ "evidence_3": row.evidence_3,
151
+ "evidence_4": row.evidence_4,
152
+ "evidence_5": row.evidence_5,
153
+ "link_1": row.link_1,
154
+ "link_2": row.link_2,
155
+ "link_3": row.link_3,
156
+ "link_4": row.link_4,
157
+ "link_5": row.link_5,
158
+ "claimDate": row.claimDate,
159
+ "reviewDate": row.reviewDate,
160
+ "claimant": row.claimant,
161
+ "claim": row.claim,
162
+ "label": row.label,
163
+ }
164
+ yield row.index, entry