# Copyright 2020 The HuggingFace Datasets Authors. # Copyright 2023 Masatoshi Suzuki (@singletongue). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io from typing import Iterator, List, Tuple import datasets import pyarrow as pa _DESCRIPTION = ( "書籍『大規模言語モデル入門』で使用する Wikipedia 段落のデータセットです。" "GitHub リポジトリ singletongue/wikipedia-utils で公開されているデータセットを利用しています。" ) _HOMEPAGE = "https://github.com/singletongue/wikipedia-utils" _LICENSE = ( "本データセットで使用している Wikipedia のコンテンツは、クリエイティブ・コモンズ表示・継承ライセンス 3.0 (CC BY-SA 3.0) " "および GNU 自由文書ライセンス (GFDL) の下に配布されているものです。" ) _URL = "https://github.com/singletongue/wikipedia-utils/releases/download/2023-04-03/paragraphs-jawiki-20230403.json.gz" class JaWikiParagraphs(datasets.ArrowBasedBuilder): VERSION = datasets.Version("1.0.0") def _info(self) -> datasets.DatasetInfo: features = datasets.Features({ "id": datasets.Value("string"), "pageid": datasets.Value("int64"), "revid": datasets.Value("int64"), "paragraph_index": datasets.Value("int64"), "title": datasets.Value("string"), "section": datasets.Value("string"), "text": datasets.Value("string"), "html_tag": datasets.Value("string"), }) return datasets.DatasetInfo( description=_DESCRIPTION, homepage=_HOMEPAGE, license=_LICENSE, features=features, ) def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: filepath = dl_manager.download_and_extract(_URL) return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": filepath})] def _generate_tables(self, filepath: str, chunksize: int = 10 << 20) -> Iterator[Tuple[int, pa.Table]]: # cf. https://github.com/huggingface/datasets/blob/2.12.0/src/datasets/packaged_modules/json/json.py with open(filepath, "rb") as f: batch_idx = 0 block_size = max(chunksize // 32, 16 << 10) while True: batch = f.read(chunksize) if not batch: break batch += f.readline() pa_table = pa.json.read_json( io.BytesIO(batch), read_options=pa.json.ReadOptions(block_size=block_size) ) yield batch_idx, pa_table batch_idx += 1