melisa / melisa.py
Lautaro Estienne
Edited melisa.py
fdbbedb
raw history blame
No virus
3.61 kB
import datasets
import csv
_LICENSE = """
TO DO: Licencia
"""
with open("README.md", "r") as f:
lines = iter(f.readlines())
for line in lines:
if "### Dataset Summary" in line:
break
next(lines)
_DESCRIPTION = next(lines)
_CITATION = """
TO DO: Cita
"""
_LANGUAGES = {
"es": "Spanish",
"pt": "Portuguese"
}
_ALL_LANGUAGES = "all_languages"
_VERSION = "1.0.0"
_HOMEPAGE_URL = "https://github.com/lpsc-fiuba/MeLiSA"
_DOWNLOAD_URL = "./{lang}/{split}.csv"
class MeLiSAConfig(datasets.BuilderConfig):
"""BuilderConfig for MeLiSA."""
def __init__(self, languages=None, **kwargs):
"""Constructs a MeLiSAConfig.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(MeLiSAConfig, self).__init__(version=datasets.Version(_VERSION, ""), **kwargs)
self.languages = languages
class MeLiSA(datasets.GeneratorBasedBuilder):
"""MeLiSA dataset."""
BUILDER_CONFIGS = [
MeLiSAConfig(
name=_ALL_LANGUAGES,
languages=_LANGUAGES,
description="A collection of Mercado Libre reviews specifically designed to aid research in spanish and portuguese sentiment classification.",
)
] + [
MeLiSAConfig(
name=lang,
languages=[lang],
description=f"{_LANGUAGES[lang]} examples from a collection of Mercado Libre reviews specifically designed to aid research in sentiment classification",
)
for lang in _LANGUAGES
]
BUILDER_CONFIG_CLASS = MeLiSAConfig
DEFAULT_CONFIG_NAME = _ALL_LANGUAGES
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"country": datasets.Value("string"),
"category": datasets.Value("string"),
"review_content": datasets.Value("string"),
"review_title": datasets.Value("string"),
"review_rate": datasets.Value("int32")
}
),
supervised_keys=None,
license=_LICENSE,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_urls = [_DOWNLOAD_URL.format(split="train", lang=lang) for lang in self.config.languages]
dev_urls = [_DOWNLOAD_URL.format(split="validation", lang=lang) for lang in self.config.languages]
test_urls = [_DOWNLOAD_URL.format(split="test", lang=lang) for lang in self.config.languages]
train_paths = dl_manager.download_and_extract(train_urls)
dev_paths = dl_manager.download_and_extract(dev_urls)
test_paths = dl_manager.download_and_extract(test_urls)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"file_paths": train_paths}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"file_paths": dev_paths}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"file_paths": test_paths}),
]
def _generate_examples(self, file_paths):
"""Generate features given the directory path.
Args:
file_path: path where the tsv file is stored
Yields:
The features.
"""
for file_path in file_paths:
with open(file_path, "r", encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for i, row in enumerate(reader):
yield i, row