mpkato commited on
Commit
1d58a65
1 Parent(s): 4a20f8a

Delete loading script

Browse files
Files changed (1) hide show
  1. miracl-japanese-small-corpus.py +0 -75
miracl-japanese-small-corpus.py DELETED
@@ -1,75 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the 'License');
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an 'AS IS' BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import json
17
- import datasets
18
-
19
- _CITATION = '''
20
- '''
21
-
22
- _DESCRIPTION = 'dataset load script for MIRACL Japanese Small Corpus'
23
-
24
- _DATASET_URLS = {
25
- 'train': 'https://huggingface.co/datasets/mpkato/miracl-japanese-small-corpus/resolve/main/miracl-japanese-small-docs.jsonl.gz'
26
- }
27
-
28
-
29
- class MIRACLJapaneseSmallCorpus(datasets.GeneratorBasedBuilder):
30
- BUILDER_CONFIGS = [
31
- datasets.BuilderConfig(
32
- version=datasets.Version('1.0.0'),
33
- description=f'MIRACL Japanese Small dataset.'
34
- )
35
- ]
36
-
37
- def _info(self):
38
- features = datasets.Features({
39
- 'docid': datasets.Value('string'),
40
- 'title': datasets.Value('string'),
41
- 'text': datasets.Value('string'),
42
- })
43
-
44
- return datasets.DatasetInfo(
45
- # This is the description that will appear on the datasets page.
46
- description=_DESCRIPTION,
47
- # This defines the different columns of the dataset and their types
48
- features=features, # Here we define them above because they are different between the two configurations
49
- supervised_keys=None,
50
- # Homepage of the dataset for documentation
51
- homepage='',
52
- # License for the dataset if available
53
- license='',
54
- # Citation for the dataset
55
- citation=_CITATION,
56
- )
57
-
58
- def _split_generators(self, dl_manager):
59
- downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
60
-
61
- splits = [
62
- datasets.SplitGenerator(
63
- name='train',
64
- gen_kwargs={
65
- 'filepath': downloaded_files['train'],
66
- },
67
- ),
68
- ]
69
- return splits
70
-
71
- def _generate_examples(self, filepath):
72
- with open(filepath, encoding="utf-8") as f:
73
- for line in f:
74
- data = json.loads(line)
75
- yield data['docid'], data