Datasets:

Sub-tasks:
fact-checking
Languages:
Arabic
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
ArXiv:
Tags:
stance-detection
License:
mkon commited on
Commit
c34cba5
1 Parent(s): 30f0ffc

add dataloader

Browse files
Files changed (1) hide show
  1. ans-stance.py +101 -0
ans-stance.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Mads Kongsbak and Leon Derczynski
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """DataLoader for ANS, an Arabic News Stance corpus"""
15
+
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+ _CITATION = """\
24
+ @inproceedings{,
25
+ title = "Stance Prediction and Claim Verification: An {A}rabic Perspective",
26
+ author = "Khouja, Jude",
27
+ booktitle = "Proceedings of the Third Workshop on Fact Extraction and {VER}ification ({FEVER})",
28
+ year = "2020",
29
+ address = "Seattle, USA",
30
+ publisher = "Association for Computational Linguistics",
31
+ }
32
+ """
33
+
34
+ _DESCRIPTION = """\
35
+ The dataset is a collection of news titles in arabic along with paraphrased and corrupted titles. The stance prediction version is a 3-class classification task. Data contains three columns: s1, s2, stance.
36
+ """
37
+
38
+ _HOMEPAGE = ""
39
+
40
+ _LICENSE = "apache-2.0"
41
+
42
+ class ANSStanceConfig(datasets.BuilderConfig):
43
+
44
+ def __init__(self, **kwargs):
45
+ super(ANSStanceConfig, self).__init__(**kwargs)
46
+
47
+ class ANSStance(datasets.GeneratorBasedBuilder):
48
+ """ANS dataset made in triples of (s1, s2, stance)"""
49
+
50
+ VERSION = datasets.Version("1.0.0")
51
+
52
+ BUILDER_CONFIGS = [
53
+ ANSStanceConfig(name="stance", version=VERSION, description=""),
54
+ ]
55
+
56
+ def _info(self):
57
+ features = datasets.Features(
58
+ {
59
+ "id": datasets.Value("string"),
60
+ "s1": datasets.Value("string"),
61
+ "s2": datasets.Value("string"),
62
+ "stance": datasets.features.ClassLabel(
63
+ names=[
64
+ "disagree",
65
+ "agree",
66
+ "other"
67
+ ]
68
+ )
69
+ }
70
+ )
71
+
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=features,
75
+ homepage=_HOMEPAGE,
76
+ license=_LICENSE,
77
+ citation=_CITATION,
78
+ )
79
+
80
+ def _split_generators(self, dl_manager):
81
+ train_text = dl_manager.download_and_extract("ans_train.csv")
82
+ valid_text = dl_manager.download_and_extract("ans_dev.csv")
83
+ test_text = dl_manager.download_and_extract("ans_test.csv")
84
+
85
+ return [
86
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_text, "split": "train"}),
87
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_text, "split": "validation"}),
88
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_text, "split": "test"}),
89
+ ]
90
+
91
+ def _generate_examples(self, filepath, split):
92
+ with open(filepath, encoding="utf-8") as f:
93
+ reader = csv.DictReader(f, delimiter=",")
94
+ guid = 0
95
+ for instance in reader:
96
+ instance["s1"] = instance.pop("s1")
97
+ instance["s2"] = instance.pop("s2")
98
+ instance["stance"] = instance.pop("stance")
99
+ instance['id'] = str(guid)
100
+ yield guid, instance
101
+ guid += 1