|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""MERLIN Written Learner Corpus for Czech, German, Italian 1.1.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{boyd-etal-2014-merlin, |
|
title = "The {MERLIN} corpus: Learner language and the {CEFR}", |
|
author = {Boyd, Adriane and |
|
Hana, Jirka and |
|
Nicolas, Lionel and |
|
Meurers, Detmar and |
|
Wisniewski, Katrin and |
|
Abel, Andrea and |
|
Sch{\"o}ne, Karin and |
|
{\v{S}}tindlov{\'a}, Barbora and |
|
Vettori, Chiara}, |
|
editor = "Calzolari, Nicoletta and |
|
Choukri, Khalid and |
|
Declerck, Thierry and |
|
Loftsson, Hrafn and |
|
Maegaard, Bente and |
|
Mariani, Joseph and |
|
Moreno, Asuncion and |
|
Odijk, Jan and |
|
Piperidis, Stelios", |
|
booktitle = "Proceedings of the Ninth International Conference on Language Resources and Evaluation ({LREC}'14)", |
|
month = may, |
|
year = "2014", |
|
address = "Reykjavik, Iceland", |
|
publisher = "European Language Resources Association (ELRA)", |
|
url = "http://www.lrec-conf.org/proceedings/lrec2014/pdf/606_Paper.pdf", |
|
pages = "1281--1288", |
|
abstract = "The MERLIN corpus is a written learner corpus for Czech, German,and Italian that has been designed to illustrate the Common European Framework of Reference for Languages (CEFR) with authentic learner data. The corpus contains 2,290 learner texts produced in standardized language certifications covering CEFR levels A1-C1. The MERLIN annotation scheme includes a wide range of language characteristics that enable research into the empirical foundations of the CEFR scales and provide language teachers, test developers, and Second Language Acquisition researchers with concrete examples of learner performance and progress across multiple proficiency levels. For computational linguistics, it provide a range of authentic learner data for three target languages, supporting a broadening of the scope of research in areas such as automatic proficiency classification or native language identification. The annotated corpus and related information will be freely available as a corpus resource and through a freely accessible, didactically-oriented online platform.", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The MERLIN corpus is a written learner corpus for Czech, German, and Italian that has been |
|
designed to illustrate the Common European Framework of Reference for Languages (CEFR) with |
|
authentic learner data. The corpus contains learner texts produced in standardized language |
|
certifications covering CEFR levels A1-C1. The MERLIN annotation scheme includes a wide |
|
range of language characteristics that provide researchers with concrete examples of learner |
|
performance and progress across multiple proficiency levels. |
|
""" |
|
|
|
_HOMEPAGE = "https://merlin-platform.eu/" |
|
|
|
_LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" |
|
|
|
_URLS = { |
|
"multilingual": "https://clarin.eurac.edu/repository/xmlui/bitstream/handle/20.500.12124/6/merlin-text-v1.1.zip", |
|
"german": "https://clarin.eurac.edu/repository/xmlui/bitstream/handle/20.500.12124/6/merlin-text-v1.1.zip", |
|
"italian": "https://clarin.eurac.edu/repository/xmlui/bitstream/handle/20.500.12124/6/merlin-text-v1.1.zip", |
|
"czech": "https://clarin.eurac.edu/repository/xmlui/bitstream/handle/20.500.12124/6/merlin-text-v1.1.zip", |
|
} |
|
|
|
class MerlinDataset(datasets.GeneratorBasedBuilder): |
|
"""Merlin dataset including three languages.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="multilingual", version=VERSION, description="Merlin dataset including three languages."), |
|
datasets.BuilderConfig(name="german", version=VERSION, description="Merlin dataset German."), |
|
datasets.BuilderConfig(name="italian", version=VERSION, description="Merlin dataset Italian."), |
|
datasets.BuilderConfig(name="czech", version=VERSION, description="Merlin dataset Czech."), |
|
] |
|
|
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"author": datasets.Value("string"), |
|
"language": datasets.ClassLabel(num_classes=3, names=["Czech", "German", "Italian"]), |
|
"level": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']), |
|
"level_grammar": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']), |
|
"level_ortography": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']), |
|
"level_vocabulary_range": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']), |
|
"level_vocabulary_control": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']), |
|
"level_coherence": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']), |
|
"level_appropriateness": datasets.ClassLabel(num_classes=6, names=['A1', 'A2', 'B1', 'B2', 'C1', 'C2']), |
|
"text": datasets.Value("string"), |
|
"text_target": datasets.Value("string"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS[self.config.name] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
filepath = os.path.join(data_dir, "merlin-text-v1.1/meta_ltext_THs") |
|
if self.config.name != "multilingual": |
|
filepath = os.path.join(filepath, self.config.name) |
|
print(f"Genereting split from {filepath}") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": filepath, |
|
"split": "train", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
import re |
|
|
|
file_list = [] |
|
for path, _, files in os.walk(filepath): |
|
for file in files: |
|
file_list.append(os.path.join(path, file)) |
|
|
|
print(f"Reading {len(file_list)} files") |
|
|
|
for f in file_list: |
|
raw_text = open(f, "r").read() |
|
|
|
language = re.findall(r'(Test language: )(.*?)(\n)', raw_text)[0][1] |
|
author_id = re.findall(r'(Author ID: )(.*?)(\n)', raw_text)[0][1] |
|
level = re.findall(r'(CEFR level of test: )(.*?)(\n)', raw_text)[0][1] |
|
level_grammar = re.findall(r'(Grammatical accuracy: )(.*?)(\n)', raw_text)[0][1] |
|
level_ortography = re.findall(r'(Orthography: )(.*?)(\n)', raw_text)[0][1] |
|
level_vocabulary_range = re.findall(r'(Vocabulary range: )(.*?)(\n)', raw_text)[0][1] |
|
level_vocabulary_control = re.findall(r'(Vocabulary control: )(.*?)(\n)', raw_text)[0][1] |
|
level_coherence = re.findall(r'(Coherence/Cohesion: )(.*?)(\n)', raw_text)[0][1] |
|
level_appropriateness = re.findall(r'(Sociolinguistic appropriateness: )(.*?)(\n)', raw_text)[0][1] |
|
text = re.findall(r'(Learner text: \n\n)(.*?)(\n\n----------------\n\n)', raw_text, re.DOTALL)[0][1] |
|
text_target = re.findall(r'(Target hypothesis 1: \n\n)(.*?)(\n\n----------------\n\n)', raw_text, re.DOTALL)[0][1] |
|
|
|
id_ = f'{language}_{author_id}' |
|
yield id_, { |
|
"author": author_id, |
|
"language": language, |
|
"level": level, |
|
"level_grammar": level_grammar, |
|
"level_ortography": level_ortography, |
|
"level_vocabulary_range": level_vocabulary_range, |
|
"level_vocabulary_control": level_vocabulary_control, |
|
"level_coherence": level_coherence, |
|
"level_appropriateness": level_appropriateness, |
|
"text": text, |
|
"text_target": text_target, |
|
} |
|
|