|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Write & Improve (Yannakoudakis et al., 2018) is an online web platform that assists non-native |
|
English students with their writing. Specifically, students from around the world submit letters, |
|
stories, articles and essays in response to various prompts, and the W&I system provides instant |
|
feedback. Since W&I went live in 2014, W&I annotators have manually annotated some of these |
|
submissions and assigned them a CEFR level. |
|
|
|
The LOCNESS corpus (Granger, 1998) consists of essays written by native English students. |
|
It was originally compiled by researchers at the Centre for English Corpus Linguistics at the |
|
University of Louvain. Since native English students also sometimes make mistakes, we asked |
|
the W&I annotators to annotate a subsection of LOCNESS so researchers can test the effectiveness |
|
of their systems on the full range of English levels and abilities.""" |
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import json |
|
from pathlib import Path |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{bryant-etal-2019-bea, |
|
title = "The {BEA}-2019 Shared Task on Grammatical Error Correction", |
|
author = "Bryant, Christopher and |
|
Felice, Mariano and |
|
Andersen, {\\O}istein E. and |
|
Briscoe, Ted", |
|
booktitle = "Proceedings of the Fourteenth Workshop on Innovative Use of NLP for Building Educational Applications", |
|
month = aug, |
|
year = "2019", |
|
address = "Florence, Italy", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://www.aclweb.org/anthology/W19-4406", |
|
doi = "10.18653/v1/W19-4406", |
|
pages = "52--75", |
|
abstract = "This paper reports on the BEA-2019 Shared Task on Grammatical Error Correction (GEC). As with the CoNLL-2014 shared task, participants are required to correct all types of errors in test data. One of the main contributions of the BEA-2019 shared task is the introduction of a new dataset, the Write{\\&}Improve+LOCNESS corpus, which represents a wider range of native and learner English levels and abilities. Another contribution is the introduction of tracks, which control the amount of annotated data available to participants. Systems are evaluated in terms of ERRANT F{\\_}0.5, which allows us to report a much wider range of performance statistics. The competition was hosted on Codalab and remains open for further submissions on the blind test set.", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Write & Improve (Yannakoudakis et al., 2018) is an online web platform that assists non-native |
|
English students with their writing. Specifically, students from around the world submit letters, |
|
stories, articles and essays in response to various prompts, and the W&I system provides instant |
|
feedback. Since W&I went live in 2014, W&I annotators have manually annotated some of these |
|
submissions and assigned them a CEFR level. |
|
""" |
|
|
|
_HOMEPAGE = "https://www.cl.cam.ac.uk/research/nl/bea2019st/#data" |
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
_URL = "https://www.cl.cam.ac.uk/research/nl/bea2019st/data/wi+locness_v2.1.bea19.tar.gz" |
|
|
|
|
|
class WiLocness(datasets.GeneratorBasedBuilder): |
|
"""\ |
|
Write & Improve (Yannakoudakis et al., 2018) is an online web platform that assists non-native |
|
English students with their writing. Specifically, students from around the world submit letters, |
|
stories, articles and essays in response to various prompts, and the W&I system provides instant |
|
feedback. Since W&I went live in 2014, W&I annotators have manually annotated some of these |
|
submissions and assigned them a CEFR level.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="wi", |
|
version=VERSION, |
|
description="This part of the dataset includes the Write & Improve data for levels A, B and C", |
|
), |
|
datasets.BuilderConfig( |
|
name="locness", |
|
version=VERSION, |
|
description="This part of the dataset includes the Locness part of the W&I-Locness dataset", |
|
), |
|
] |
|
|
|
|
|
|
|
def _info(self): |
|
if self.config.name == "wi": |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"userid": datasets.Value("string"), |
|
"cefr": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"edits": datasets.Sequence( |
|
{ |
|
"start": datasets.Value("int32"), |
|
"end": datasets.Value("int32"), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
) |
|
elif self.config.name == "locness": |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"cefr": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"edits": datasets.Sequence( |
|
{ |
|
"start": datasets.Value("int32"), |
|
"end": datasets.Value("int32"), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
} |
|
) |
|
else: |
|
assert False |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
data_dir = Path(dl_manager.download_and_extract(_URL)) / "wi+locness" / "json" |
|
|
|
if self.config.name == "wi": |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"filepath": data_dir, "split": "train"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={"filepath": data_dir, "split": "validation"}, |
|
), |
|
] |
|
elif self.config.name == "locness": |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={"filepath": data_dir, "split": "validation"}, |
|
), |
|
] |
|
else: |
|
assert False |
|
|
|
def _generate_examples(self, filepath, split): |
|
""" Yields examples. """ |
|
|
|
if split == "validation": |
|
split = "dev" |
|
|
|
if self.config.name == "wi": |
|
levels = ["A", "B", "C"] |
|
elif self.config.name == "locness": |
|
levels = ["N"] |
|
else: |
|
assert False |
|
|
|
for level in levels: |
|
with open(filepath / f"{level}.{split}.json", "r", encoding="utf-8") as fp: |
|
for line in fp: |
|
o = json.loads(line) |
|
|
|
edits = [] |
|
for (start, end, text) in o["edits"][0][1:][0]: |
|
edits.append({"start": start, "end": end, "text": text}) |
|
|
|
out = { |
|
"id": o["id"], |
|
"cefr": o["cefr"], |
|
"text": o["text"], |
|
"edits": edits, |
|
} |
|
|
|
if self.config.name == "wi": |
|
out["userid"] = o.get("userid", "") |
|
|
|
yield o["id"], out |
|
|