|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The CC-News dataset is based on Common Crawl News Dataset by Sebastian Nagel""" |
|
|
|
import json |
|
import os |
|
from fnmatch import fnmatch |
|
import io |
|
import textwrap |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
""" |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
_DOWNLOAD_URL = { |
|
"ax": { |
|
"test": [os.path.join("data", "ax", "test.tar.gz")], |
|
}, |
|
"cola": { |
|
"train": [os.path.join("data", "cola", "train.tar.gz")], |
|
"test": [os.path.join("data", "cola", "test.tar.gz")], |
|
"validation": [os.path.join("data", "cola", "validation.tar.gz")], |
|
}, |
|
"mnli": { |
|
"train": [os.path.join("data", "mnli", "train.tar.gz")], |
|
"test_matched": [os.path.join("data", "mnli", "test_matched.tar.gz")], |
|
"validation_matched": [ |
|
os.path.join("data", "mnli", "validation_matched.tar.gz") |
|
], |
|
"test_mismatched": [os.path.join("data", "mnli", "test_mismatched.tar.gz")], |
|
"validation_mismatched": [ |
|
os.path.join("data", "mnli", "validation_mismatched.tar.gz") |
|
], |
|
}, |
|
"mrpc": { |
|
"train": [os.path.join("data", "mrpc", "train.tar.gz")], |
|
"test": [os.path.join("data", "mrpc", "test.tar.gz")], |
|
"validation": [os.path.join("data", "mrpc", "validation.tar.gz")], |
|
}, |
|
"qnli": { |
|
"train": [os.path.join("data", "qnli", "train.tar.gz")], |
|
"test": [os.path.join("data", "qnli", "test.tar.gz")], |
|
"validation": [os.path.join("data", "qnli", "validation.tar.gz")], |
|
}, |
|
"qqp": { |
|
"train": [os.path.join("data", "qqp", "train.tar.gz")], |
|
"test": [os.path.join("data", "qqp", "test.tar.gz")], |
|
"validation": [os.path.join("data", "qqp", "validation.tar.gz")], |
|
}, |
|
"rte": { |
|
"train": [os.path.join("data", "rte", "train.tar.gz")], |
|
"test": [os.path.join("data", "rte", "test.tar.gz")], |
|
"validation": [os.path.join("data", "rte", "validation.tar.gz")], |
|
}, |
|
"sst2": { |
|
"train": [os.path.join("data", "sst2", "train.tar.gz")], |
|
"test": [os.path.join("data", "sst2", "test.tar.gz")], |
|
"validation": [os.path.join("data", "sst2", "validation.tar.gz")], |
|
}, |
|
"stsb": { |
|
"train": [os.path.join("data", "stsb", "train.tar.gz")], |
|
"test": [os.path.join("data", "stsb", "test.tar.gz")], |
|
"validation": [os.path.join("data", "stsb", "validation.tar.gz")], |
|
}, |
|
"wnli": { |
|
"train": [os.path.join("data", "wnli", "train.tar.gz")], |
|
"test": [os.path.join("data", "wnli", "test.tar.gz")], |
|
"validation": [os.path.join("data", "wnli", "validation.tar.gz")], |
|
}, |
|
"vnrte": { |
|
"train": [os.path.join("data", "vnrte", "train.tar.gz")], |
|
"validation": [os.path.join("data", "vnrte", "validation.tar.gz")], |
|
}, |
|
"vsfc": { |
|
"train": [os.path.join("data", "vsfc", "train.tar.gz")], |
|
"test": [os.path.join("data", "vsfc", "test.tar.gz")], |
|
"validation": [os.path.join("data", "vsfc", "dev.tar.gz")], |
|
}, |
|
"vsmec": { |
|
"train": [os.path.join("data", "vsmec", "train.tar.gz")], |
|
"test": [os.path.join("data", "vsmec", "test.tar.gz")], |
|
"validation": [os.path.join("data", "vsmec", "valid.tar.gz")], |
|
}, |
|
"vtoc": { |
|
"train": [os.path.join("data", "vtoc", "train.tar.gz")], |
|
"validation": [os.path.join("data", "vtoc", "validation.tar.gz")], |
|
}, |
|
} |
|
|
|
SUBSET_KWARGS = { |
|
"ax": { |
|
"name": "ax", |
|
"text_features": ["premise", "hypothesis"], |
|
"label_classes": ["entailment", "neutral", "contradiction"], |
|
"label_column": "", |
|
"citation": "", |
|
"description": textwrap.dedent( |
|
"""\ |
|
A manually-curated evaluation dataset for fine-grained analysis of |
|
system performance on a broad range of linguistic phenomena. This |
|
dataset evaluates sentence understanding through Natural Language |
|
Inference (NLI) problems. Use a model trained on MulitNLI to produce |
|
predictions for this dataset.""" |
|
), |
|
}, |
|
"cola": { |
|
"name": "cola", |
|
"text_features": ["sentence"], |
|
"label_classes": ["unacceptable", "acceptable"], |
|
"label_column": "is_acceptable", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
@article{warstadt2018neural, |
|
title={Neural Network Acceptability Judgments}, |
|
author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R}, |
|
journal={arXiv preprint arXiv:1805.12471}, |
|
year={2018} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
The Corpus of Linguistic Acceptability consists of English |
|
acceptability judgments drawn from books and journal articles on |
|
linguistic theory. Each example is a sequence of words annotated |
|
with whether it is a grammatical English sentence.""" |
|
), |
|
}, |
|
"mnli": { |
|
"name": "mnli", |
|
"text_features": ["premise", "hypothesis"], |
|
"label_classes": ["entailment", "neutral", "contradiction"], |
|
"label_column": "gold_label", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
@InProceedings{N18-1101, |
|
author = "Williams, Adina |
|
and Nangia, Nikita |
|
and Bowman, Samuel", |
|
title = "A Broad-Coverage Challenge Corpus for |
|
Sentence Understanding through Inference", |
|
booktitle = "Proceedings of the 2018 Conference of |
|
the North American Chapter of the |
|
Association for Computational Linguistics: |
|
Human Language Technologies, Volume 1 (Long |
|
Papers)", |
|
year = "2018", |
|
publisher = "Association for Computational Linguistics", |
|
pages = "1112--1122", |
|
location = "New Orleans, Louisiana", |
|
url = "http://aclweb.org/anthology/N18-1101" |
|
} |
|
@article{bowman2015large, |
|
title={A large annotated corpus for learning natural language inference}, |
|
author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D}, |
|
journal={arXiv preprint arXiv:1508.05326}, |
|
year={2015} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
The Multi-Genre Natural Language Inference Corpus is a crowdsourced |
|
collection of sentence pairs with textual entailment annotations. Given a premise sentence |
|
and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis |
|
(entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are |
|
gathered from ten different sources, including transcribed speech, fiction, and government reports. |
|
We use the standard test set, for which we obtained private labels from the authors, and evaluate |
|
on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend |
|
the SNLI corpus as 550k examples of auxiliary training data.""" |
|
), |
|
}, |
|
"mrpc": { |
|
"name": "mrpc", |
|
"text_features": ["sentence1", "sentence2"], |
|
"label_classes": ["not_equivalent", "equivalent"], |
|
"label_column": "Quality", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
@inproceedings{dolan2005automatically, |
|
title={Automatically constructing a corpus of sentential paraphrases}, |
|
author={Dolan, William B and Brockett, Chris}, |
|
booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)}, |
|
year={2005} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of |
|
sentence pairs automatically extracted from online news sources, with human annotations |
|
for whether the sentences in the pair are semantically equivalent.""" |
|
), |
|
}, |
|
"qnli": { |
|
"name": "qnli", |
|
"text_features": ["question", "sentence"], |
|
"label_classes": ["entailment", "not_entailment"], |
|
"label_column": "label", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
@article{rajpurkar2016squad, |
|
title={Squad: 100,000+ questions for machine comprehension of text}, |
|
author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy}, |
|
journal={arXiv preprint arXiv:1606.05250}, |
|
year={2016} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
The Stanford Question Answering Dataset is a question-answering |
|
dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn |
|
from Wikipedia) contains the answer to the corresponding question (written by an annotator). We |
|
convert the task into sentence pair classification by forming a pair between each question and each |
|
sentence in the corresponding context, and filtering out pairs with low lexical overlap between the |
|
question and the context sentence. The task is to determine whether the context sentence contains |
|
the answer to the question. This modified version of the original task removes the requirement that |
|
the model select the exact answer, but also removes the simplifying assumptions that the answer |
|
is always present in the input and that lexical overlap is a reliable cue.""" |
|
), |
|
}, |
|
"qqp": { |
|
"name": "qqp", |
|
"text_features": ["question1", "question2"], |
|
"label_classes": ["not_duplicate", "duplicate"], |
|
"label_column": "is_duplicate", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
@online{WinNT, |
|
author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel}, |
|
title = {First Quora Dataset Release: Question Pairs}, |
|
year = {2017}, |
|
url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs}, |
|
urldate = {2019-04-03} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
The Quora Question Pairs2 dataset is a collection of question pairs from the |
|
community question-answering website Quora. The task is to determine whether a |
|
pair of questions are semantically equivalent.""" |
|
), |
|
}, |
|
"rte": { |
|
"name": "rte", |
|
"text_features": ["sentence1", "sentence2"], |
|
"label_classes": ["entailment", "not_entailment"], |
|
"label_column": "label", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
@inproceedings{dagan2005pascal, |
|
title={The PASCAL recognising textual entailment challenge}, |
|
author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo}, |
|
booktitle={Machine Learning Challenges Workshop}, |
|
pages={177--190}, |
|
year={2005}, |
|
organization={Springer} |
|
} |
|
@inproceedings{bar2006second, |
|
title={The second pascal recognising textual entailment challenge}, |
|
author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan}, |
|
booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment}, |
|
volume={6}, |
|
number={1}, |
|
pages={6--4}, |
|
year={2006}, |
|
organization={Venice} |
|
} |
|
@inproceedings{giampiccolo2007third, |
|
title={The third pascal recognizing textual entailment challenge}, |
|
author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill}, |
|
booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing}, |
|
pages={1--9}, |
|
year={2007}, |
|
organization={Association for Computational Linguistics} |
|
} |
|
@inproceedings{bentivogli2009fifth, |
|
title={The Fifth PASCAL Recognizing Textual Entailment Challenge.}, |
|
author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo}, |
|
booktitle={TAC}, |
|
year={2009} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual |
|
entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim |
|
et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are |
|
constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where |
|
for three-class datasets we collapse neutral and contradiction into not entailment, for consistency.""" |
|
), |
|
}, |
|
"sst2": { |
|
"name": "sst2", |
|
"text_features": ["sentence"], |
|
"label_classes": ["negative", "positive"], |
|
"label_column": "label", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
@inproceedings{socher2013recursive, |
|
title={Recursive deep models for semantic compositionality over a sentiment treebank}, |
|
author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher}, |
|
booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing}, |
|
pages={1631--1642}, |
|
year={2013} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
The Stanford Sentiment Treebank consists of sentences from movie reviews and |
|
human annotations of their sentiment. The task is to predict the sentiment of a |
|
given sentence. We use the two-way (positive/negative) class split, and use only |
|
sentence-level labels.""" |
|
), |
|
}, |
|
"stsb": { |
|
"name": "stsb", |
|
"text_features": ["sentence1", "sentence2"], |
|
"label_classes": None, |
|
"label_column": "score", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
@inproceedings{cer2017semeval, |
|
title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation}, |
|
author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia}, |
|
booktitle={Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)}, |
|
pages={1--14}, |
|
year={2017} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of |
|
sentence pairs drawn from news headlines, video and image captions, and natural language |
|
inference data. Each pair is human-annotated with a similarity score from 1 to 5. We |
|
convert this to a binary classification task by labeling examples with a similarity score |
|
>= 4.5 as entailment and < 4.5 as not entailment.""" |
|
), |
|
"process_label": lambda x: float(x), |
|
}, |
|
"wnli": { |
|
"name": "wnli", |
|
"text_features": ["sentence1", "sentence2"], |
|
"label_classes": ["not_entailment", "entailment"], |
|
"label_column": "label", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
@inproceedings{levesque2012winograd, |
|
title={The winograd schema challenge}, |
|
author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora}, |
|
booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning}, |
|
year={2012} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task |
|
in which a system must read a sentence with a pronoun and select the referent of that pronoun from |
|
a list of choices. The examples are manually constructed to foil simple statistical methods: Each |
|
one is contingent on contextual information provided by a single word or phrase in the sentence. |
|
To convert the problem into sentence pair classification, we construct sentence pairs by replacing |
|
the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the |
|
pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of |
|
new examples derived from fiction books that was shared privately by the authors of the original |
|
corpus. While the included training set is balanced between two classes, the test set is imbalanced |
|
between them (65% not entailment). Also, due to a data quirk, the development set is adversarial: |
|
hypotheses are sometimes shared between training and development examples, so if a model memorizes the |
|
training examples, they will predict the wrong label on corresponding development set |
|
example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence |
|
between a model's score on this task and its score on the unconverted original task. We |
|
call converted dataset WNLI (Winograd NLI).""" |
|
), |
|
}, |
|
"vnrte": { |
|
"name": "vnrte", |
|
"text_features": ["sentence1", "sentence2", "topic", "source"], |
|
"label_classes": ["entailment", "not_entailment"], |
|
"label_column": "label", |
|
"citation": textwrap.dedent( |
|
"""\ |
|
""" |
|
), |
|
"description": textwrap.dedent( |
|
"""\ |
|
""" |
|
), |
|
}, |
|
"vsfc": { |
|
"name": "vsfc", |
|
"text_features": ["sentence"], |
|
"label_classes": ["negative", "neutral", "positive"], |
|
"label_column": "label", |
|
"citation": textwrap.dedent( |
|
""" |
|
@inproceedings{van2018uit, |
|
title={UIT-VSFC: Vietnamese students’ feedback corpus for sentiment analysis}, |
|
author={Van Nguyen, Kiet and Nguyen, Vu Duc and Nguyen, Phu XV and Truong, Tham TH and Nguyen, Ngan Luu-Thuy}, |
|
booktitle={2018 10th international conference on knowledge and systems engineering (KSE)}, |
|
pages={19--24}, |
|
year={2018}, |
|
organization={IEEE} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""Vietnamese Students' Feedback Corpus (UIT-VSFC), a free and high-quality corpus for research on two different tasks: sentiment-based and topic-based classifications""" |
|
), |
|
}, |
|
"vsmec": { |
|
"name": "vsmec", |
|
"text_features": ["sentence", "raw_sentence", "emotion"], |
|
"label_classes": [ |
|
"Anger", |
|
"Disgust", |
|
"Enjoyment", |
|
"Fear", |
|
"Other", |
|
"Sadness", |
|
"Surprise", |
|
], |
|
"label_column": "label", |
|
"citation": textwrap.dedent( |
|
"""@inproceedings{ho2020emotion, |
|
title={Emotion recognition for vietnamese social media text}, |
|
author={Ho, Vong Anh and Nguyen, Duong Huynh-Cong and Nguyen, Danh Hoang and Pham, Linh Thi-Van and Nguyen, Duc-Vu and Nguyen, Kiet Van and Nguyen, Ngan Luu-Thuy}, |
|
booktitle={Computational Linguistics: 16th International Conference of the Pacific Association for Computational Linguistics, PACLING 2019, Hanoi, Vietnam, October 11--13, 2019, Revised Selected Papers 16}, |
|
pages={319--333}, |
|
year={2020}, |
|
organization={Springer} |
|
}""" |
|
), |
|
"description": textwrap.dedent( |
|
"""a standard Vietnamese Social Media Emotion Corpus (UIT-VSMEC) with exactly 6,927 emotion-annotated sentences, contributing to emotion recognition research in Vietnamese""" |
|
), |
|
}, |
|
"vtoc": { |
|
"name": "vtoc", |
|
"text_features": ["sentence"], |
|
"label_classes": [ |
|
"Automobile", |
|
"Business", |
|
"Digital", |
|
"Education", |
|
"Entertainment", |
|
"Health", |
|
"Law", |
|
"Life", |
|
"News", |
|
"Perspective", |
|
"Relax", |
|
"Science", |
|
"Sports", |
|
"Travel", |
|
"World", |
|
], |
|
"label_column": "label", |
|
"citation": textwrap.dedent(""""""), |
|
"description": textwrap.dedent(""""""), |
|
}, |
|
} |
|
|
|
|
|
_VERSION = datasets.Version("1.2.0", "") |
|
|
|
|
|
class ViGLUEConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for ViGLUE.""" |
|
|
|
def __init__( |
|
self, |
|
text_features, |
|
label_column="", |
|
data_url="", |
|
data_dir="", |
|
citation="", |
|
url="", |
|
label_classes=None, |
|
process_label=lambda x: x, |
|
**kwargs, |
|
): |
|
"""BuilderConfig for VieGLUE. |
|
Args: |
|
text_features: `dict[string, string]`, map from the name of the feature |
|
dict for each text field to the name of the column in the tsv file |
|
label_column: `string`, name of the column in the tsv file corresponding |
|
to the label |
|
data_url: `string`, url to download the zip file from |
|
data_dir: `string`, the path to the folder containing the tsv files in the |
|
downloaded zip |
|
citation: `string`, citation for the data set |
|
url: `string`, url for information about the data set |
|
label_classes: `list[string]`, the list of classes if the label is |
|
categorical. If not provided, then the label will be of type |
|
`datasets.Value('float32')`. |
|
process_label: `Function[string, any]`, function taking in the raw value |
|
of the label and processing it to the form required by the label feature |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(ViGLUEConfig, self).__init__( |
|
version=datasets.Version("1.0.0", ""), **kwargs |
|
) |
|
self.text_features = text_features |
|
self.label_column = label_column |
|
self.label_classes = label_classes |
|
self.data_url = data_url |
|
self.data_dir = data_dir |
|
self.citation = citation |
|
self.url = url |
|
self.process_label = process_label |
|
|
|
|
|
class VNExpress(datasets.GeneratorBasedBuilder): |
|
"""""" |
|
|
|
VERSION = _VERSION |
|
DEFAULT_CONFIG_NAME = "mnli" |
|
|
|
BUILDER_CONFIGS = [ViGLUEConfig(**config) for config in SUBSET_KWARGS.values()] |
|
|
|
def _info(self): |
|
features = {f: datasets.Value("string") for f in self.config.text_features} |
|
if self.config.label_classes: |
|
features["label"] = datasets.features.ClassLabel( |
|
names=self.config.label_classes |
|
) |
|
else: |
|
features["label"] = datasets.Value("float32") |
|
features["idx"] = datasets.Value("int32") |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(features), |
|
homepage=self.config.url, |
|
citation=self.config.citation + "\n" + _CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
_SPLIT_MAPPING = { |
|
"train": datasets.Split.TRAIN, |
|
"training": datasets.Split.TRAIN, |
|
"test": datasets.Split.TEST, |
|
"testing": datasets.Split.TEST, |
|
"val": datasets.Split.VALIDATION, |
|
"validation": datasets.Split.VALIDATION, |
|
"valid": datasets.Split.VALIDATION, |
|
"dev": datasets.Split.VALIDATION, |
|
"test_matched": "test_matched", |
|
"test_mismatched": "test_mismatched", |
|
"validation_matched": "validation_matched", |
|
"validation_mismatched": "validation_mismatched", |
|
} |
|
|
|
name = self.config.name |
|
download_url = _DOWNLOAD_URL[name] |
|
filepath = dl_manager.download_and_extract(download_url) |
|
|
|
return_datasets = [] |
|
for split in download_url: |
|
return_datasets.append( |
|
datasets.SplitGenerator( |
|
name=_SPLIT_MAPPING[split], |
|
gen_kwargs={ |
|
"files": filepath[split], |
|
"urls": download_url[split], |
|
"stage": split, |
|
"config": self.config, |
|
}, |
|
) |
|
) |
|
|
|
return return_datasets |
|
|
|
def _generate_examples(self, files, urls, stage, config): |
|
id_ = 0 |
|
features = config.text_features |
|
|
|
if not isinstance(files, list): |
|
files = [files] |
|
for path, url in zip(files, urls): |
|
for file in os.listdir(path): |
|
if file.startswith("._"): |
|
continue |
|
file_path = os.path.join(path, file) |
|
if not os.path.isfile(file_path): |
|
continue |
|
with open(file_path, encoding="utf-8") as f: |
|
all_samples = json.load(f) |
|
for sample in all_samples: |
|
if sample["label"] is None or sample["label"] == "": |
|
sample["label"] = -1 |
|
yield id_, { |
|
"idx": id_, |
|
"label": sample["label"], |
|
**{f: sample[f] for f in features}, |
|
} |
|
|
|
id_ += 1 |
|
|