File size: 1,456 Bytes
63282f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import datasets
import pyarrow.parquet as pq

_CITATION = ''

_DESCRIPTION = ''

_HOMEPAGE = ''

_LICENSE = ''

_BASE_URL = 'https://huggingface.co/datasets/AresEkb/test/resolve/main/'

_FEATURES = {
    'domains': datasets.Features({
        'reg_number': datasets.Value('string'),
        'standard_name': datasets.Value('string'),
        'name': datasets.Value('string'),
        'purpose': datasets.Value('string'),
        'embeddings': datasets.Sequence(datasets.Value('float32')),
    }),
}

class ProfStandardsDatasetBuilder(datasets.ArrowBasedBuilder):

    VERSION = datasets.Version('0.0.1')

    BUILDER_CONFIGS = [
        datasets.BuilderConfig('domains', VERSION),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=_FEATURES[self.config.name],
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        url = _BASE_URL + self.config.name + '.parquet'
        file_path = dl_manager.download(url)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={'file_path': file_path},
            ),
        ]

    def _generate_tables(self, file_path):
        if file_path.startswith(_BASE_URL):
            file_path = file_path[len(_BASE_URL):]
        yield self.config.name, pq.read_table(file_path)