Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
sentiment-classification
Languages:
English
Size:
100K - 1M
Tags:
amazon review
License:
File size: 4,424 Bytes
f80bebc a198fee f80bebc a198fee f80bebc a198fee f80bebc a198fee f80bebc a198fee f80bebc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
from datasets import Value, ClassLabel,Sequence
import datasets
_SNAP10K_CITATION = """\
"""
_SNAP10K_DESCRIPTION = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
class SNAP10KConfig(datasets.BuilderConfig):
def __init__(
self,
text_features,
label_column,
data_url,
data_dir,
citation,
url,
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
super(SNAP10KConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.text_features = text_features
self.label_column = label_column
self.label_classes = label_classes
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
self.url = url
self.process_label = process_label
class SNAP10K(datasets.GeneratorBasedBuilder):
domain_list = ['Automotive_5', 'Electronics_5', 'Industrial_and_Scientific_5', 'Kindle_Store_5', 'Cell_Phones_and_Accessories_5', 'Musical_Instruments_5', 'Office_Products_5', 'Patio_Lawn_and_Garden_5', 'Sports_and_Outdoors_5', 'Luxury_Beauty_5', 'Grocery_and_Gourmet_Food_5', 'Digital_Music_5', 'Tools_and_Home_Improvement_5', 'Pet_Supplies_5', 'Prime_Pantry_5', 'Toys_and_Games_5', 'Movies_and_TV_5', 'Home_and_Kitchen_5', 'Arts_Crafts_and_Sewing_5', 'Video_Games_5', 'CDs_and_Vinyl_5']
BUILDER_CONFIGS = [
SNAP10KConfig(name=domain_name,
description= f'comments of {domain_name}.',
text_features={'sentence':'sentence', 'domain':'domain'},
label_classes=['POS','NEG'],
label_column='label',
citation="",
data_dir= "",
data_url = r"https://huggingface.co/datasets/kuroneko3578/snap21/resolve/main/",
url='https://github.com/ws719547997/LNB-DA')
for domain_name in domain_list
]
def _info(self):
features = {'id':Value(dtype='int32', id=None),
'domain':Value(dtype='string', id=None),
'label':ClassLabel(num_classes=2, names=['POS', 'NEG'], names_file=None, id=None),
'rank':Value(dtype='string', id=None),
'sentence':Value(dtype='string', id=None)}
return datasets.DatasetInfo(
description=_SNAP10K_DESCRIPTION,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _SNAP10K_CITATION,
)
def _split_generators(self, dl_manager):
test_file = rf'{self.config.data_url}test/{self.config.name}.txt'
dev_file = rf'{self.config.data_url}dev/{self.config.name}.txt'
train_file = rf'{self.config.data_url}train/{self.config.name}.txt'
return [datasets.SplitGenerator(name=datasets.Split.TEST,
gen_kwargs={
"data_file": dl_manager.download(test_file),
"split": "test",
},),
datasets.SplitGenerator(name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": dl_manager.download(dev_file),
"split": "dev",
},),
datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": dl_manager.download(train_file),
"split": "train",
},)]
def _generate_examples(self, data_file, split):
with open(data_file, 'r', encoding='utf-8') as f:
for line in f:
lin = line.strip()
if not lin:
continue
lin_sp = lin.split('\t')
if len(lin_sp) < 5:
continue
# id, {example}
yield lin_sp[0], {'sentence':lin_sp[4],'domain':lin_sp[1], 'label':lin_sp[2], 'id':lin_sp[0], 'rank':lin_sp[3]} |