# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datasets import csv _DESCRIPTION = """ This dataset contains 3370555 sentences, which each have an assigned CEFR level derived from EFLLex (https://cental.uclouvain.be/cefrlex/efllex/download). The sentences comes from "the pile books3", which is available on Huggingface (https://huggingface.co/datasets/the_pile_books3). The CEFR levels used are A1, A2, B1, B2 and C1, and there are equals number of sentences for each level. Assigning each sentence a CEFR level followed is based on the concept of "shifted frequency distribution", introduced by David Alfter and his paper can be found at (https://gupea.ub.gu.se/bitstream/2077/66861/4/gupea_2077_66861_4.pdf). For each word in each sentence, take the CEFR level with the highest "shifted frequency distribution" in the EFLLex table. After all words have been processed, the sentence gets annotated with the most frequently appearing CEFR level from the whole senctence. """ _CITATION = """ @misc{cefr_book_sentences, author={Astrid Education AB} year={2021} } """ _URL = "https://huggingface.co/datasets/astrideducation/cefr-combined-task/resolve/main/" _URLS = { "test": _URL + "test_dataset_wo_cefr.csv", } class CefrDataset(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.1.0") def _info(self): features = datasets.Features( { "prompt": datasets.Value("string"), "label": datasets.Value("string") } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" data_dir = dl_manager.download(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": data_dir["test"], "split": "test" }, ), ] def _generate_examples( self, filepath, split ): """ Yields examples as (key, example) tuples. """ with open(filepath, encoding="utf-8") as csv_file: csv_reader = csv.reader( csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True ) next(csv_reader, None) for id_, row in enumerate(csv_reader): (prompt, label) = row yield id_, { "prompt": prompt, "label": label }