File size: 4,207 Bytes
8b359ff
3116335
8b359ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3116335
6707cdf
8b359ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3116335
820e93e
 
 
8b359ff
 
b782bf9
8b359ff
 
 
 
b782bf9
8b359ff
 
 
 
b782bf9
8b359ff
 
 
 
 
 
 
 
 
 
 
 
 
3116335
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104

from datasets import Value, ClassLabel
import datasets


_JD21_CITATION = """\

"""

_JD21_DESCRIPTION = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""

class JD21Config(datasets.BuilderConfig):

    def __init__(
        self,
        text_features,
        label_column,
        data_url,
        data_dir,
        citation,
        url,
        label_classes=None,
        process_label=lambda x: x,
        **kwargs,
    ):
        super(JD21Config, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
        
        self.text_features = text_features
        self.label_column = label_column
        self.label_classes = label_classes
        self.data_url = data_url
        self.data_dir = data_dir
        self.citation = citation
        self.url = url
        self.process_label = process_label

class JD21(datasets.GeneratorBasedBuilder):
    domain_list = ['褪黑素', '维生素', '无线耳机', '蛋白粉', '游戏机', '电视', 'MacBook', '洗面奶', '智能手表', '吹风机', '小米手机', '红米手机', '护肤品',
            '电动牙刷', 'iPhone', '海鲜', '酒', '平板电脑', '修复霜', '运动鞋', '智能手环']

    BUILDER_CONFIGS  = [
        JD21Config(name=domain_name, 
                    description= f'comments of JD {domain_name}.', 
                    text_features={'sentence':'sentence', 'domain':'domain'}, 
                    label_classes=['POS','NEG'], 
                    label_column='label',
                    citation="",
                    data_dir= "",
                    data_url = r"https://huggingface.co/datasets/kuroneko5943/jd21/resolve/main/",
                    url='https://github.com/ws719547997/LNB-DA')
        for domain_name in domain_list
    ]

    def _info(self):
        features = {'id':Value(dtype='int32', id=None),
            'domain':Value(dtype='string', id=None),
            'label':ClassLabel(num_classes=2, names=['POS', 'NEG'], names_file=None, id=None),
            'rank':Value(dtype='int32', id=None),
            'sentence':Value(dtype='string', id=None)}

        return datasets.DatasetInfo(
            description=_JD21_DESCRIPTION,
            features=datasets.Features(features),
            homepage=self.config.url,
            citation=self.config.citation + "\n" + _JD21_CITATION,
        )

    def _split_generators(self, dl_manager):

        test_file = rf'{self.config.data_url}test/{self.config.name}.txt'
        dev_file = rf'{self.config.data_url}dev/{self.config.name}.txt'
        train_file = rf'{self.config.data_url}train/{self.config.name}.txt'
        return [datasets.SplitGenerator(name=datasets.Split.TEST,
                                        gen_kwargs={
                                            "data_file": dl_manager.download(test_file),
                                            "split": "test",
                                        },), 
                datasets.SplitGenerator(name=datasets.Split.VALIDATION,
                                        gen_kwargs={
                                            "data_file": dl_manager.download(dev_file),
                                            "split": "dev",
                                        },), 
                datasets.SplitGenerator(name=datasets.Split.TRAIN,
                                        gen_kwargs={
                                            "data_file": dl_manager.download(train_file),
                                            "split": "train",
                                        },)]

    def _generate_examples(self, data_file, split):
        with open(data_file, 'r', encoding='utf-8') as f:
            for line in f:
                lin = line.strip()
                if not lin:
                    continue
                lin_sp = lin.split('\t')
                if len(lin_sp) < 5:
                    continue
                # id, {example}
                yield lin_sp[0], {'sentence':lin_sp[4],'domain':lin_sp[1], 'label':lin_sp[2], 'id':lin_sp[0], 'rank':lin_sp[3]}