Datasets:

Modalities:
Text
Formats:
parquet
Size:
n<1K
ArXiv:
Tags:
License:
ianporada commited on
Commit
d779409
1 Parent(s): b6cddd6

Delete mwsc_raw.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. mwsc_raw.py +0 -121
mwsc_raw.py DELETED
@@ -1,121 +0,0 @@
1
- """A modification of the Winograd Schema Challenge to ensure answers are a single context word"""
2
-
3
- import os
4
- import re
5
-
6
- import datasets
7
-
8
-
9
- _CITATION = """\
10
- @article{McCann2018decaNLP,
11
- title={The Natural Language Decathlon: Multitask Learning as Question Answering},
12
- author={Bryan McCann and Nitish Shirish Keskar and Caiming Xiong and Richard Socher},
13
- journal={arXiv preprint arXiv:1806.08730},
14
- year={2018}
15
- }
16
- """
17
-
18
- _DESCRIPTION = """\
19
- Examples taken from the Winograd Schema Challenge modified to ensure that answers are a single word from the context.
20
- This modified Winograd Schema Challenge (MWSC) ensures that scores are neither inflated nor deflated by oddities in phrasing.
21
- """
22
-
23
- _DATA_URL = "https://raw.githubusercontent.com/salesforce/decaNLP/1e9605f246b9e05199b28bde2a2093bc49feeeaa/local_data/schema.txt"
24
- # Alternate: https://s3.amazonaws.com/research.metamind.io/decaNLP/data/schema.txt
25
-
26
-
27
- class MWSC(datasets.GeneratorBasedBuilder):
28
- """MWSC: modified Winograd Schema Challenge"""
29
-
30
- VERSION = datasets.Version("0.1.0")
31
-
32
- def _info(self):
33
- return datasets.DatasetInfo(
34
- description=_DESCRIPTION,
35
- features=datasets.Features(
36
- {
37
- "sentence": datasets.Value("string"),
38
- "question": datasets.Value("string"),
39
- "options": datasets.features.Sequence(datasets.Value("string")),
40
- "answer": datasets.Value("string"),
41
- }
42
- ),
43
- # If there's a common (input, target) tuple from the features,
44
- # specify them here. They'll be used if as_supervised=True in
45
- # builder.as_dataset.
46
- supervised_keys=None,
47
- # Homepage of the dataset for documentation
48
- homepage="http://decanlp.com",
49
- citation=_CITATION,
50
- )
51
-
52
- def _split_generators(self, dl_manager):
53
- """Returns SplitGenerators."""
54
- schemas_file = dl_manager.download_and_extract(_DATA_URL)
55
-
56
- if os.path.isdir(schemas_file):
57
- # During testing the download manager mock gives us a directory
58
- schemas_file = os.path.join(schemas_file, "schema.txt")
59
-
60
- return [
61
- datasets.SplitGenerator(
62
- name=datasets.Split.TRAIN,
63
- gen_kwargs={"filepath": schemas_file, "split": "train"},
64
- ),
65
- datasets.SplitGenerator(
66
- name=datasets.Split.TEST,
67
- gen_kwargs={"filepath": schemas_file, "split": "test"},
68
- ),
69
- datasets.SplitGenerator(
70
- name=datasets.Split.VALIDATION,
71
- gen_kwargs={"filepath": schemas_file, "split": "dev"},
72
- ),
73
- ]
74
-
75
- def _get_both_schema(self, context):
76
- """Split [option1/option2] into 2 sentences.
77
- From https://github.com/salesforce/decaNLP/blob/1e9605f246b9e05199b28bde2a2093bc49feeeaa/text/torchtext/datasets/generic.py#L815-L827"""
78
- pattern = r"\[.*\]"
79
- variations = [x[1:-1].split("/") for x in re.findall(pattern, context)]
80
- splits = re.split(pattern, context)
81
- results = []
82
- for which_schema in range(2):
83
- vs = [v[which_schema] for v in variations]
84
- context = ""
85
- for idx in range(len(splits)):
86
- context += splits[idx]
87
- if idx < len(vs):
88
- context += vs[idx]
89
- results.append(context)
90
- return results
91
-
92
- def _generate_examples(self, filepath, split):
93
- """Yields examples."""
94
-
95
- schemas = []
96
- with open(filepath, encoding="utf-8") as schema_file:
97
- schema = []
98
- for line in schema_file:
99
- if len(line.split()) == 0:
100
- schemas.append(schema)
101
- schema = []
102
- continue
103
- else:
104
- schema.append(line.strip())
105
-
106
- # Train/test/dev split from decaNLP code
107
- splits = {}
108
- traindev = schemas[:-50]
109
- splits["test"] = schemas[-50:]
110
- splits["train"] = traindev[:40]
111
- splits["dev"] = traindev[40:]
112
-
113
- idx = 0
114
- for schema in splits[split]:
115
- sentence, question, answers = schema
116
- sentence = self._get_both_schema(sentence)
117
- question = self._get_both_schema(question)
118
- answers = answers.split("/")
119
- for i in range(2):
120
- yield idx, {"sentence": sentence[i], "question": question[i], "options": answers, "answer": answers[i]}
121
- idx += 1