import csv import os import datasets logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """ """ _URLS = { "ccd": "https://drive.google.com/u/0/uc?id=13UYcJ6BcojsCKy-yc9qgHTyBMCCWB-w1&export=download", "clothing": "https://drive.google.com/u/0/uc?id=1BwDS30xzFEDqP-z9adj4wVSlsSjOIGqc&export=download", "clothing_binary": "https://drive.google.com/u/0/uc?id=1P5aPKD0wU1NWUlh2QiIYwSPN-S3wD3ua&export=download", "electronics": "https://drive.google.com/u/0/uc?id=1ztIUsraLPJSKkkle_uTrd78hYsSKKmur&export=download", "electronics_binary": "https://drive.google.com/u/0/uc?id=103kJN6snOc2sSMH9ojd_PNohVQ1g4XUJ&export=download", "office": "https://drive.google.com/u/0/uc?id=1DbrvS02d75sXoxaaPh90bJdRvr15fUS5&export=download", "office_binary": "https://drive.google.com/u/0/uc?id=1ED4JnoTFu_4H80jBUJlqRrEor-taZ2Qz&export=download", "toxicity": "https://drive.google.com/u/0/uc?id=1iATGRaGuOqiUrj31jYjAzns8iS_BJh1h&export=download", } _FIELDS = { "amazon": ["date", "rating", "reviewText", "summary"], "ccd": ["date", "product", "subproduct", "issue", "subissue", "text"], "toxicity": ["rev_id", "toxicity", "date", "comment", "sample"], } _LABELS = { "amazon": ["1", "2", "3", "4", "5"], "amazon_binary": ["0", "1"], "ccd": [ "Checking or savings account", "Credit card or prepaid card", "Credit reporting, credit repair services, or other personal consumer reports", "Debt collection", "Money transfer, virtual currency, or money service", "Mortgage", "Payday loan, title loan, or personal loan", "Student loan", "Vehicle loan or lease", ], "toxicity": [0, 1], } class QBConfig(datasets.BuilderConfig): def __init__( self, csv_fields, label_classes, label_column, text_column, url, **kwargs, ): super().__init__(version=datasets.Version("1.0.0", ""), **kwargs) self.csv_fields = csv_fields self.label_classes = label_classes self.label_column = label_column self.text_column = text_column self.url = url class QB(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ QBConfig( name="ccd", description="Consumer Complaints Database", url=_URLS["ccd"], csv_fields=_FIELDS["ccd"], label_classes=_LABELS["ccd"], label_column="product", text_column="text", ), QBConfig( name="clothing", description="Amazon Reviews (Clothing)", url=_URLS["clothing"], csv_fields=_FIELDS["amazon"], label_classes=_LABELS["amazon"], label_column="rating", text_column="reviewText", ), QBConfig( name="clothing_binary", description="Amazon Reviews (Clothing) with binary labels", url=_URLS["clothing_binary"], csv_fields=_FIELDS["amazon"], label_classes=_LABELS["amazon_binary"], label_column="rating", text_column="reviewText", ), QBConfig( name="electronics", description="Amazon Reviews (Electronics)", url=_URLS["electronics"], csv_fields=_FIELDS["amazon"], label_classes=_LABELS["amazon"], label_column="rating", text_column="reviewText", ), QBConfig( name="electronics_binary", description="Amazon Reviews (Electronics) with binary labels", url=_URLS["electronics_binary"], csv_fields=_FIELDS["amazon"], label_classes=_LABELS["amazon_binary"], label_column="rating", text_column="reviewText", ), QBConfig( name="office", description="Amazon Reviews (Office)", url=_URLS["office"], csv_fields=_FIELDS["amazon"], label_classes=_LABELS["amazon"], label_column="rating", text_column="reviewText", ), QBConfig( name="office_binary", description="Amazon Reviews (Office) with binary labels", url=_URLS["office_binary"], csv_fields=_FIELDS["amazon"], label_classes=_LABELS["amazon_binary"], label_column="rating", text_column="reviewText", ), QBConfig( name="toxicity", description="Wikipedia toxicity data set", url=_URLS["toxicity"], csv_fields=_FIELDS["toxicity"], label_classes=_LABELS["toxicity"], label_column="toxicity", text_column="comment", ), ] def _info(self): features = { "date": datasets.Value("string"), "id": datasets.Value("int32"), "label": datasets.features.ClassLabel(names=self.config.label_classes), "text": datasets.Value("string"), } return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(features), ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download_and_extract(self.config.url) logger.info(str(downloaded_files)) train_filepath = os.path.join(downloaded_files, "train.csv") test_filepath = os.path.join(downloaded_files, "test.csv") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_filepath}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": test_filepath}, ), ] def _generate_examples(self, filepath): logger.info(f"generating examples from {filepath}") idx = 0 with open(filepath, encoding="utf-8") as f: reader = csv.DictReader(f, fieldnames=self.config.csv_fields) for row in reader: yield idx, { "date": row["date"], "id": idx, "label": row[self.config.label_column], "text": row[self.config.text_column], } idx += 1