Convert dataset to Parquet

#1
README.md CHANGED
@@ -1,8 +1,27 @@
1
  ---
 
 
2
  license: apache-2.0
3
  task_categories:
4
  - text-retrieval
5
- language:
6
- - ja
7
  pretty_name: a
8
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - ja
4
  license: apache-2.0
5
  task_categories:
6
  - text-retrieval
 
 
7
  pretty_name: a
8
+ dataset_info:
9
+ features:
10
+ - name: docid
11
+ dtype: string
12
+ - name: title
13
+ dtype: string
14
+ - name: text
15
+ dtype: string
16
+ splits:
17
+ - name: train
18
+ num_bytes: 82583007
19
+ num_examples: 129260
20
+ download_size: 44837491
21
+ dataset_size: 82583007
22
+ configs:
23
+ - config_name: default
24
+ data_files:
25
+ - split: train
26
+ path: data/train-*
27
+ ---
create_miracl_japanese_small.py DELETED
@@ -1,30 +0,0 @@
1
- import pandas as pd
2
- import json
3
- from tqdm import tqdm
4
- from datasets import load_dataset
5
-
6
- DEV_QREL_FILEPATH = "./qrels.miracl-v1.0-ja-dev.tsv"
7
- OUTPUT_FILEPATH = "./miracl-japanese-small-docs.jsonl"
8
-
9
- def extract_doc_ids(filepath):
10
- dev_qrel = pd.read_csv(filepath, delimiter='\t',
11
- names=['query_id', 'ph', 'doc_pas_id', 'rel'])
12
- doc_ids = set([int(dp_id.split('#')[0]) for dp_id in dev_qrel.doc_pas_id])
13
- return doc_ids
14
-
15
-
16
- if __name__ == '__main__':
17
- dev_doc_ids = extract_doc_ids(DEV_QREL_FILEPATH)
18
- doc_ids = dev_doc_ids
19
- print("# of docids in dev", len(dev_doc_ids))
20
-
21
- new_dataset = []
22
- seen_doc_ids = set()
23
- dataset = load_dataset("miracl/miracl-corpus", "ja")
24
- for data in tqdm(dataset['train']):
25
- docid = int(data["docid"].split("#")[0])
26
- if docid in doc_ids:
27
- new_dataset.append(data)
28
- with open(OUTPUT_FILEPATH, 'w', encoding='utf-8') as f:
29
- for data in new_dataset:
30
- f.write(json.dumps(data) + '\n')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
miracl-japanese-small-docs.jsonl.gz → data/train-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53fd4b33ac6c1b1a7ab9d7af0c2da9f91aa7af0760da1724725184468bc49846
3
- size 32917772
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b89f5a20e5fd08d4a181728f66eec6810b60387bc87c37b7aeab80b1d10e499
3
+ size 44837491
miracl-japanese-small-corpus.py DELETED
@@ -1,75 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the 'License');
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an 'AS IS' BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import json
17
- import datasets
18
-
19
- _CITATION = '''
20
- '''
21
-
22
- _DESCRIPTION = 'dataset load script for MIRACL Japanese Small Corpus'
23
-
24
- _DATASET_URLS = {
25
- 'train': 'https://huggingface.co/datasets/mpkato/miracl-japanese-small-corpus/resolve/main/miracl-japanese-small-docs.jsonl.gz'
26
- }
27
-
28
-
29
- class MIRACLJapaneseSmallCorpus(datasets.GeneratorBasedBuilder):
30
- BUILDER_CONFIGS = [
31
- datasets.BuilderConfig(
32
- version=datasets.Version('1.0.0'),
33
- description=f'MIRACL Japanese Small dataset.'
34
- )
35
- ]
36
-
37
- def _info(self):
38
- features = datasets.Features({
39
- 'docid': datasets.Value('string'),
40
- 'title': datasets.Value('string'),
41
- 'text': datasets.Value('string'),
42
- })
43
-
44
- return datasets.DatasetInfo(
45
- # This is the description that will appear on the datasets page.
46
- description=_DESCRIPTION,
47
- # This defines the different columns of the dataset and their types
48
- features=features, # Here we define them above because they are different between the two configurations
49
- supervised_keys=None,
50
- # Homepage of the dataset for documentation
51
- homepage='',
52
- # License for the dataset if available
53
- license='',
54
- # Citation for the dataset
55
- citation=_CITATION,
56
- )
57
-
58
- def _split_generators(self, dl_manager):
59
- downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
60
-
61
- splits = [
62
- datasets.SplitGenerator(
63
- name='train',
64
- gen_kwargs={
65
- 'filepath': downloaded_files['train'],
66
- },
67
- ),
68
- ]
69
- return splits
70
-
71
- def _generate_examples(self, filepath):
72
- with open(filepath, encoding="utf-8") as f:
73
- for line in f:
74
- data = json.loads(line)
75
- yield data['docid'], data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
qrels.miracl-v1.0-ja-dev.tsv DELETED
The diff for this file is too large to render. See raw diff