sileod commited on
Commit
5aadf1c
1 Parent(s): 10b61f0

Create discourse_marker_qa.py

Browse files
Files changed (1) hide show
  1. discourse_marker_qa.py +99 -0
discourse_marker_qa.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Discourse marker/connective prediction as multiple choice questions based on the Discovery dataset"""
15
+
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+ import datasets
21
+
22
+
23
+ _CITATION = """\
24
+ @inproceedings{sileo-etal-2019-mining,
25
+ title = "Mining Discourse Markers for Unsupervised Sentence Representation Learning",
26
+ author = "Sileo, Damien and
27
+ Van De Cruys, Tim and
28
+ Pradel, Camille and
29
+ Muller, Philippe",
30
+ booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)",
31
+ month = jun,
32
+ year = "2019",
33
+ address = "Minneapolis, Minnesota",
34
+ publisher = "Association for Computational Linguistics",
35
+ url = "https://aclanthology.org/N19-1351",
36
+ doi = "10.18653/v1/N19-1351",
37
+ pages = "3477--3486",
38
+ abstract = "Current state of the art systems in NLP heavily rely on manually annotated datasets, which are expensive to construct. Very little work adequately exploits unannotated data {--} such as discourse markers between sentences {--} mainly because of data sparseness and ineffective extraction methods. In the present work, we propose a method to automatically discover sentence pairs with relevant discourse markers, and apply it to massive amounts of data. Our resulting dataset contains 174 discourse markers with at least 10k examples each, even for rare markers such as {``}coincidentally{''} or {``}amazingly{''}. We use the resulting data as supervision for learning transferable sentence embeddings. In addition, we show that even though sentence representation learning through prediction of discourse marker yields state of the art results across different transfer tasks, it{'}s not clear that our models made use of the semantic relation between sentences, thus leaving room for further improvements.",
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """\
43
+ Discourse marker/connective prediction as multiple choice questions based on the Discovery dataset
44
+ """
45
+
46
+ _HOMEPAGE = ""
47
+
48
+ _LICENSE = "apache-2.0"
49
+
50
+ _URL = "https://sileod.s3.eu-west-3.amazonaws.com/huggingface/discourse_marker_mcqa_test.json"
51
+
52
+ class DiscoveryQA(datasets.GeneratorBasedBuilder):
53
+
54
+ VERSION = datasets.Version("1.1.0")
55
+
56
+ def _info(self):
57
+
58
+ features = datasets.Features(
59
+ {
60
+ "context": datasets.Value("string"),
61
+ "answer_0": datasets.Value("string"),
62
+ "answer_1": datasets.Value("string"),
63
+ "answer_2": datasets.Value("string"),
64
+ "answer_3": datasets.Value("string"),
65
+ "answer_4": datasets.Value("string"),
66
+ "answer_5": datasets.Value("string"),
67
+ "answer_6": datasets.Value("string"),
68
+ "answer_7": datasets.Value("string"),
69
+ "answer_8": datasets.Value("string"),
70
+ "answer_9": datasets.Value("string"),
71
+ "label": datasets.Value("int32")
72
+ }
73
+ )
74
+
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=features, # Here we define them above because they are different between the two configurations
78
+ homepage=_HOMEPAGE,
79
+ license=_LICENSE,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ data_dir = dl_manager.download(_URL)
85
+ return [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TEST,
88
+ # These kwargs will be passed to _generate_examples
89
+ gen_kwargs={
90
+ "filepath": data_dir,
91
+ "split": "test"
92
+ },
93
+ ),
94
+ ]
95
+
96
+ def _generate_examples(self, filepath, split):
97
+ with open(filepath, encoding="utf-8") as f:
98
+ for key, row in enumerate(f):
99
+ yield key, dict(json.loads(row))