# Copyright 2020 The HuggingFace Datasets Authors and the current dataset # script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BPSAD -- Brazilian Portuguese Sentiment Analysis Datasets""" import csv import os import datasets import sys from datasets import ClassLabel csv.field_size_limit(sys.maxsize) _HOMEPAGE = """\ https://www.kaggle.com/datasets/fredericods/ptbr-sentiment-analysis-datasets""" _DESCRIPTION = """\ The Brazilian Portuguese Sentiment Analysis Dataset (BPSAD) is composed by the concatenation of 5 differents sources (Olist, B2W Digital, Buscapé, UTLC-Apps and UTLC-Movies), each one is composed by evaluation sentences classified according to the polarity (0: negative; 1: positive) and ratings (1, 2, 3, 4 and 5 stars).""" _CITATION = """\ @inproceedings{souza2021sentiment, author={ Souza, Frederico Dias and Baptista de Oliveira e Souza Filho, João}, booktitle={ 2021 IEEE Latin American Conference on Computational Intelligence (LA-CCI)}, title={ Sentiment Analysis on Brazilian Portuguese User Reviews}, year={2021}, pages={1-6}, doi={10.1109/LA-CCI48322.2021.9769838} } """ _VERSION = datasets.Version("1.0.0") _LICENSE = "" class BPSAD(datasets.GeneratorBasedBuilder): """BPSAD dataset.""" BUILDER_CONFIGS = [ datasets.BuilderConfig( name="polarity", description="Polarity classification dataset." ), datasets.BuilderConfig( name="rating", description="Rating classification dataset." ), ] @property def manual_download_instructions(self): return ( "To use this dataset you have to download it manually:\n" " 1. Download the `concatenated` file from `{_HOMEPAGE}`.\n" " 2. Extract the file inside `[PATH_TO_FILE]`.\n" " 3. Load the dataset using the command:\n" " datasets.load_dataset(" "\"lm4pt/bpsad\", name=..., data_dir=\"[PATH_TO_FILE]\")\n\n" "Possible names are: `polarity` and `rating`." ) def _info(self): # Note: # DEFAULT_CONFIG_NAME is not working and returns the value `default`. # Also, it is better to set the config name explicitly. if self.config.name not in ['polarity', 'rating']: raise ValueError(( f"`{self.config.name}` is not a valid config name. Possible " "values are `polarity` and `rating`. Make sure to pass via " "`datasets.load_dataset('lm4pt/bpsad', name=...)`" )) if self.config.name == "polarity": features = datasets.Features({ "review_text": datasets.Value("string"), "polarity": ClassLabel( num_classes=2, names=['negative', 'positive'] ), }) else: features = datasets.Features({ "review_text": datasets.Value("string"), "rating": datasets.Value("int8"), }) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE, version=_VERSION, ) def _split_generators(self, dl_manager): data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) # validates if dataset folder exists if not os.path.exists(data_dir): raise FileNotFoundError(( data_dir + " does not exist. Make sure to pass the " "parameter `data_dir` via `datasets.load_dataset`.\n" "Manual download instructions:\n" + self.manual_download_instructions )) data_file = os.path.join(data_dir, "concatenated.csv") # check if dataset file exists if not os.path.exists(data_file): raise FileNotFoundError(( data_file + " does not exist. " + self.manual_download_instructions )) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": data_file, "split": "train", 'kfold_min': 1, 'kfold_max': 8 }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": data_file, "split": "dev", 'kfold_min': 9, 'kfold_max': 9 }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": data_file, "split": "test", 'kfold_min': 10, 'kfold_max': 10 }, ), ] def _generate_examples(self, filepath, split, kfold_min, kfold_max): # CSV columns # 0 - original_index, # 1 - review_text, # 2 - review_text_processed, # 3 - review_text_tokenized, # 4 - polarity, # 5 - rating, # 6 - kfold_polarity, # 7 - kfold_rating with open(filepath) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') # skip header _ = next(csv_reader) _id = 0 if self.config.name == 'polarity': for row in csv_reader: kfold = int(row[7]) if kfold_min <= kfold and kfold <= kfold_max: yield _id, { "review_text": row[2], "polarity": int(float(row[5])), } _id += 1 else: for row in csv_reader: kfold = int(row[8]) if kfold_min <= kfold and kfold <= kfold_max: yield _id, { "review_text": row[2], "rating": int(float(row[6])), } _id += 1