qboodp commited on
Commit
b6f487c
1 Parent(s): f8792e0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ *.jsonl.gz filter=lfs diff=lfs merge=lfs -text
MNBVC-core.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MNBVC: Massive Never-ending BT Vast Chinese corpus
3
+ """
4
+
5
+ import json
6
+ import datasets
7
+ import numpy as np
8
+ import traceback
9
+ from .meta import MNBVC_META
10
+ from .features import Features
11
+
12
+
13
+ _CITATION = """\
14
+ """
15
+
16
+ _DESCRIPTION = """\
17
+ MNBVC-core: core split of Massive Never-ending BT Vast Chinese corpus
18
+ """
19
+
20
+ _HOMEPAGE = "https://github.com/esbatmop/MNBVC"
21
+
22
+ _LICENSE = "MIT"
23
+
24
+
25
+ class MNBVC(datasets.GeneratorBasedBuilder):
26
+
27
+ # multi configurations 配置
28
+ BUILDER_CONFIGS = [
29
+ datasets.BuilderConfig(
30
+ name=key, version=datasets.Version("0.0.1"), description=value['description']) for key, value in MNBVC_META.items()]
31
+
32
+ def _info(self):
33
+ """
34
+ dataset 的关键属性信息
35
+ """
36
+ return datasets.DatasetInfo(
37
+ description=_DESCRIPTION,
38
+ features=Features[MNBVC_META[self.config.name]['feature_type']],
39
+ homepage=_HOMEPAGE,
40
+ license=_LICENSE,
41
+ citation=_CITATION,
42
+ )
43
+
44
+ def _split_generators(self, dl_manager):
45
+ """
46
+ 下载数据,组织split
47
+ """
48
+ data_dir = dl_manager.download_and_extract(MNBVC_META[self.config.name]['files'])
49
+
50
+ return [
51
+ datasets.SplitGenerator(
52
+ name=datasets.Split.TRAIN,
53
+ gen_kwargs={
54
+ "data_files": data_dir,
55
+ },
56
+ ),
57
+ ]
58
+
59
+ # 生成器:id 和数据元素的元祖
60
+ def _generate_examples(self, data_files):
61
+ id = 0
62
+ features = self._info().features
63
+ feature_keys = set(features.keys())
64
+
65
+ def _drop_unused_keys(data):
66
+ rm_keys = []
67
+ for key in data.keys():
68
+ if key not in feature_keys:
69
+ rm_keys.append(key)
70
+ for key in rm_keys:
71
+ del data[key]
72
+ return data
73
+
74
+ try:
75
+ for file_i, data_file in enumerate(data_files):
76
+ with open(data_file, encoding="utf-8") as f:
77
+ for line_i, line in enumerate(f):
78
+ id += 1
79
+ data = json.loads(line)
80
+ if self.config.name == 'law_judgement':
81
+ text = data['详情']
82
+ del data['详情']
83
+ yield id, {
84
+ "text": text,
85
+ "meta": json.dumps(data, ensure_ascii=False),
86
+ }
87
+ else:
88
+ data = _drop_unused_keys(data)
89
+ if 'simhash' in data: # for issue https://github.com/huggingface/datasets/issues/6007
90
+ data['simhash'] = str(data['simhash'])
91
+
92
+ yield id, data
93
+ except Exception as e:
94
+ error_msg = 'oops, we find an error when loading the dataset\n'
95
+ error_msg += f'Dataset: {self.config.name}\n'
96
+ error_msg += f'Data File: {file_i} {data_file}\n'
97
+ error_msg += f'Row: {line_i}'
98
+ print(error_msg)
99
+ traceback.print_exc()
100
+
101
+ raise e
features.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+ Features = {}
4
+
5
+ # 项目早期所使用的数据集字段,后续会在更新过程中逐渐废弃
6
+ Features['TEXT_CORPUS_LEGACY'] = datasets.Features(
7
+ {
8
+ "text": datasets.Value("string"),
9
+ "meta": datasets.Value("string")
10
+ }
11
+ )
12
+
13
+ # 文本数据集所使用的格式
14
+ Features['TEXT_CORPUS'] = datasets.Features(
15
+ {
16
+ "文件名": datasets.Value("string"),
17
+ "是否待查文件": datasets.Value("bool"),
18
+ "是否重复文件": datasets.Value("bool"),
19
+ "文件大小": datasets.Value("int64"),
20
+ "simhash": datasets.Value("string"), # for issue https://github.com/huggingface/datasets/issues/6007
21
+ "最长段落长度": datasets.Value("int64"),
22
+ "段落数": datasets.Value("int64"),
23
+ "去重段落数": datasets.Value("int64"),
24
+ "低质量段落数": datasets.Value("int64"),
25
+ "段落": [
26
+ datasets.Features(
27
+ {
28
+ "行号": datasets.Value("int64"),
29
+ "是否重复": datasets.Value("bool"),
30
+ "是否跨文件重复": datasets.Value("bool"),
31
+ "md5": datasets.Value("string"),
32
+ "内容": datasets.Value("string"),
33
+ }
34
+ )
35
+ ]
36
+ }
37
+ )
38
+
39
+ # 问答数据所使用的格式
40
+ Features['QA_CORPUS'] = datasets.Features(
41
+ {
42
+ "id": datasets.Value("string"),
43
+ "问": datasets.Value("string"),
44
+ "答": datasets.Value("string"),
45
+ "来源": datasets.Value("string"),
46
+ "元数据": {
47
+ "create_time": datasets.Value("string"),
48
+ "问题明细": datasets.Value("string"),
49
+ "回答明细": datasets.Value("string"),
50
+ "扩展字段": datasets.Value("string"),
51
+ }
52
+ }
53
+ )
54
+
55
+ # 代码数据所使用的格式
56
+ Features['CODE_CORPUS'] = datasets.Features(
57
+ {
58
+ "来源": datasets.Value("string"),
59
+ "仓库名": datasets.Value("string"),
60
+ "path": datasets.Value("string"),
61
+ "文件名": datasets.Value("string"),
62
+ "ext": datasets.Value("string"),
63
+ "size": datasets.Value("int64"),
64
+ "原始编码": datasets.Value("string"),
65
+ "md5": datasets.Value("string"),
66
+ "text": datasets.Value("string")
67
+ }
68
+ )
69
+
70
+ # 论坛对话语料
71
+ Features['FORUM_DIALOGUE'] = datasets.Features(
72
+ {
73
+ "ID": datasets.Value('int32'),
74
+ "主题": datasets.Value("string"),
75
+ "来源": datasets.Value("string"),
76
+ "回复": [
77
+ datasets.Features(
78
+ {
79
+ "楼ID": datasets.Value("string"),
80
+ "回复": datasets.Value("string"),
81
+ "扩展字段": datasets.Value("string"),
82
+ }
83
+ )
84
+ ],
85
+ "元数据": {
86
+ "发帖时间": datasets.Value("string"),
87
+ "回复数": datasets.Value("int32"),
88
+ "扩展字段": datasets.Value("string")
89
+ }
90
+ }
91
+ )
92
+
93
+ # 论坛对话语料
94
+ Features['TEXT_CORPUS'] = datasets.Features(
95
+ {
96
+ '文件名': datasets.Value("string"),
97
+ '是否待查文件': datasets.Value("bool"),
98
+ '是否重复文件': datasets.Value("bool"),
99
+ '文件大小': datasets.Value("int64"),
100
+ 'simhash': datasets.Value("int64"),
101
+ '最长段落长度': datasets.Value("int32"),
102
+ '段落数': datasets.Value("int32"),
103
+ '去重段落数': datasets.Value("int32"),
104
+ '低质量段落数': datasets.Value("int32"),
105
+ '段落': [
106
+ {
107
+ '行号': datasets.Value("int32"),
108
+ '是否重复': datasets.Value("bool"),
109
+ '是否跨文件重复': datasets.Value("bool"),
110
+ 'md5': datasets.Value("string"),
111
+ '内容': datasets.Value("string")
112
+ }
113
+ ]
114
+ }
115
+ )
gov/GovReport.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94b48a3b2887717763f6cc22fd515df5f1d2e601b0ba6b56df57f25713a3ce9a
3
+ size 13833331
gov/XueXiQiangGuo.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27ea4e9f7a7e4ac54330fabb3e519765b89162494e5a8d8a6c169a1eabd4f979
3
+ size 419641134
meta.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ MNBVC_META = {}
3
+
4
+ base_url = 'https://huggingface.co/datasets/qbo-odp/MNBVC-core/resolve/main'
5
+ def _flatten_list(input):
6
+ res = []
7
+ for i in input:
8
+ if type(i) == list:
9
+ res += _flatten_list(i)
10
+ else:
11
+ res.append(i)
12
+ return res
13
+
14
+
15
+ # =============================
16
+ # gov_xuexiqiangguo
17
+ MNBVC_META['gov_xuexiqiangguo'] = {}
18
+ MNBVC_META['gov_xuexiqiangguo']['files'] = [f'{base_url}/gov/XueXiQiangGuo.jsonl.gz']
19
+ MNBVC_META['gov_xuexiqiangguo']['feature_type'] = "TEXT_CORPUS"
20
+ MNBVC_META['gov_xuexiqiangguo']['description'] = "Texts from government files (XueXiQiangGuo)"
21
+
22
+
23
+ # =============================
24
+ # gov_report
25
+ MNBVC_META['gov_report'] = {}
26
+ MNBVC_META['gov_report']['files'] = [f'{base_url}/gov/GovReport.jsonl.gz']
27
+ MNBVC_META['gov_report']['feature_type'] = "TEXT_CORPUS_LEGACY"
28
+ MNBVC_META['gov_report']['description'] = "Texts from government files (Gov Report)"
29
+
30
+ # =============================
31
+ # qa_mfa
32
+ MNBVC_META['qa_mfa'] = {}
33
+ MNBVC_META['qa_mfa']['files'] = [
34
+ [f'{base_url}/qa/mfa/0.jsonl.gz']
35
+ ]
36
+ MNBVC_META['qa_mfa']['files'] = _flatten_list(MNBVC_META['qa_mfa']['files'])
37
+ MNBVC_META['qa_mfa']['feature_type'] = "QA_CORPUS"
38
+ MNBVC_META['qa_mfa']['description'] = "QA data from Ministry of Foreign Affairs"
39
+
40
+
41
+ # =============================
42
+ # news_peoples_daily
43
+ MNBVC_META['news_peoples_daily'] = {}
44
+ MNBVC_META['news_peoples_daily']['files'] = [
45
+ [f'{base_url}/news/peoples_daily/{i}.jsonl.gz' for i in range(12)]
46
+ ]
47
+ MNBVC_META['news_peoples_daily']['files'] = _flatten_list(MNBVC_META['news_peoples_daily']['files'])
48
+ MNBVC_META['news_peoples_daily']['feature_type'] = "TEXT_CORPUS"
49
+ MNBVC_META['news_peoples_daily']['description'] = "News data from people's daily"
50
+
51
+ # =============================
52
+ # qa_stackexchange
53
+ MNBVC_META['qa_stackexchange'] = {}
54
+ MNBVC_META['qa_stackexchange']['files'] = []
55
+
56
+ MNBVC_META['qa_stackexchange']['files'] += [
57
+ [f'{base_url}/qa/stackexchange/{i}.jsonl.gz' for i in range(1, 2)]
58
+ ]
59
+ MNBVC_META['qa_stackexchange']['files'] = _flatten_list(MNBVC_META['qa_stackexchange']['files'])
60
+ MNBVC_META['qa_stackexchange']['feature_type'] = "QA_CORPUS"
61
+ MNBVC_META['qa_stackexchange']['description'] = "QA data from StackExchange"
news/peoples_daily/0.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7117c40faba186aca81a307ecebbbcc59f6d420c2c6cc606401d316c91190e99
3
+ size 173145247
news/peoples_daily/1.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f28651da19d9df0f73dc3a30e8247377e5fbaec222c6be3ba87b0bd2f0d662ab
3
+ size 173604769
news/peoples_daily/10.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39ab154fbdd682514beaa19c58b8939507edb37cce5439c54ddd921969bca9c6
3
+ size 174046061
news/peoples_daily/11.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89ca302549e6004f4e0a498991186737bc04edafce65c347a85479db54f306b9
3
+ size 27017403
news/peoples_daily/2.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3d8b5c11a36ead95fda905da50c33cc9323e5ab602ce7fd3ac2531cfa8949dd
3
+ size 173523914
news/peoples_daily/3.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c97947d6ce5a1920572eae11c76f37fcbd2d9b58e1b71ac320d14db36c11157
3
+ size 173033658
news/peoples_daily/4.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:639b23bba42a66927c4b15b071b096d5d9dab545eba533c2b8f4c1b0aa847d84
3
+ size 173417851
news/peoples_daily/5.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85d787c0e946eef2438664a67ebc5fa874fb502e12f008f4456c0a93b47e1c2c
3
+ size 173478837
news/peoples_daily/6.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bce62ec1cb3d14676b2ced90a26a20b7fae3c01e773e8dac9b50075f56c22b5b
3
+ size 173841841
news/peoples_daily/7.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e9d179b19536e104e869f803e2ead9d5756adbd57c4a69dccad2858224f5108
3
+ size 173575647
news/peoples_daily/8.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfe27c76bc4c0f1f54212b6d2e6909542c73bb95f34f855399e93a5c3d1157cf
3
+ size 173193115
news/peoples_daily/9.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d3a08ee1eac2d8a11e6e51c5b27f1390471b3e111b88357866674e52e22933a
3
+ size 173191766
qa/mfa/0.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20c5db18b9747773277c1b287a41ce72c6649980fdb6a868d64684d70533ae83
3
+ size 3728313
qa/stackexchange/1.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:407c3ebf887fcd0176560848641c62f9dcb05b3793c149d4bc0a6a78cc13694d
3
+ size 46358610
qa/stackexchange/1000.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77c122dd64b06358a47b6c9d95c97324f10462d5476cb4df5533fefe8f480f9d
3
+ size 490927
test.ipynb ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "5d3c22fa-b62f-4828-bb55-60640e1a393c",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "!pip install datasets"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "9f6e89ca-43c5-409c-987e-94b6a1c89082",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "# 测试 datasets 正确性\n",
21
+ "!datasets-cli test /workspace/data/MNBVC-core --data_dir=/workspace/data/MNBVC-core --save_info --all_configs"
22
+ ]
23
+ },
24
+ {
25
+ "cell_type": "code",
26
+ "execution_count": 12,
27
+ "id": "2cb2a5cf-43b1-4372-8583-bd37455406a0",
28
+ "metadata": {},
29
+ "outputs": [],
30
+ "source": [
31
+ "from datasets import load_from_disk,load_dataset_builder, get_dataset_split_names, get_dataset_config_names\n",
32
+ "\n",
33
+ "DATASET='/workspace/data/MNBVC-core'\n",
34
+ "CONFIGURATION='qa_mfa'"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 15,
40
+ "id": "ddadbe3a-5b78-4b22-8d5a-fb82b42dd01f",
41
+ "metadata": {},
42
+ "outputs": [
43
+ {
44
+ "ename": "FileNotFoundError",
45
+ "evalue": "Directory /workspace/data/MNBVC-core is neither a `Dataset` directory nor a `DatasetDict` directory.",
46
+ "output_type": "error",
47
+ "traceback": [
48
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
49
+ "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
50
+ "Cell \u001b[0;32mIn[15], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m ds \u001b[38;5;241m=\u001b[39m \u001b[43mload_from_disk\u001b[49m\u001b[43m(\u001b[49m\u001b[43mDATASET\u001b[49m\u001b[43m)\u001b[49m\n",
51
+ "File \u001b[0;32m/opt/conda/envs/envd/lib/python3.10/site-packages/datasets/load.py:2252\u001b[0m, in \u001b[0;36mload_from_disk\u001b[0;34m(dataset_path, fs, keep_in_memory, storage_options)\u001b[0m\n\u001b[1;32m 2250\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m DatasetDict\u001b[38;5;241m.\u001b[39mload_from_disk(dataset_path, keep_in_memory\u001b[38;5;241m=\u001b[39mkeep_in_memory, storage_options\u001b[38;5;241m=\u001b[39mstorage_options)\n\u001b[1;32m 2251\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2252\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mFileNotFoundError\u001b[39;00m(\n\u001b[1;32m 2253\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDirectory \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdataset_path\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m is neither a `Dataset` directory nor a `DatasetDict` directory.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 2254\u001b[0m )\n",
52
+ "\u001b[0;31mFileNotFoundError\u001b[0m: Directory /workspace/data/MNBVC-core is neither a `Dataset` directory nor a `DatasetDict` directory."
53
+ ]
54
+ }
55
+ ],
56
+ "source": [
57
+ "ds = load_from_disk(DATASET)"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 14,
63
+ "id": "5fe8f56e-019f-475d-811a-f707450934e2",
64
+ "metadata": {},
65
+ "outputs": [
66
+ {
67
+ "name": "stdout",
68
+ "output_type": "stream",
69
+ "text": [
70
+ "Help on function load_from_disk in module datasets.load:\n",
71
+ "\n",
72
+ "load_from_disk(dataset_path: str, fs='deprecated', keep_in_memory: Optional[bool] = None, storage_options: Optional[dict] = None) -> Union[datasets.arrow_dataset.Dataset, datasets.dataset_dict.DatasetDict]\n",
73
+ " Loads a dataset that was previously saved using [`~Dataset.save_to_disk`] from a dataset directory, or\n",
74
+ " from a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.\n",
75
+ " \n",
76
+ " Args:\n",
77
+ " dataset_path (`str`):\n",
78
+ " Path (e.g. `\"dataset/train\"`) or remote URI (e.g.\n",
79
+ " `\"s3://my-bucket/dataset/train\"`) of the [`Dataset`] or [`DatasetDict`] directory where the dataset will be\n",
80
+ " loaded from.\n",
81
+ " fs (`~filesystems.S3FileSystem` or `fsspec.spec.AbstractFileSystem`, *optional*):\n",
82
+ " Instance of the remote filesystem used to download the files from.\n",
83
+ " \n",
84
+ " <Deprecated version=\"2.9.0\">\n",
85
+ " \n",
86
+ " `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.\n",
87
+ " Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.\n",
88
+ " \n",
89
+ " </Deprecated>\n",
90
+ " \n",
91
+ " keep_in_memory (`bool`, defaults to `None`):\n",
92
+ " Whether to copy the dataset in-memory. If `None`, the dataset\n",
93
+ " will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to\n",
94
+ " nonzero. See more details in the [improve performance](../cache#improve-performance) section.\n",
95
+ " \n",
96
+ " storage_options (`dict`, *optional*):\n",
97
+ " Key/value pairs to be passed on to the file-system backend, if any.\n",
98
+ " \n",
99
+ " <Added version=\"2.9.0\"/>\n",
100
+ " \n",
101
+ " Returns:\n",
102
+ " [`Dataset`] or [`DatasetDict`]:\n",
103
+ " - If `dataset_path` is a path of a dataset directory: the dataset requested.\n",
104
+ " - If `dataset_path` is a path of a dataset dict directory, a [`DatasetDict`] with each split.\n",
105
+ " \n",
106
+ " Example:\n",
107
+ " \n",
108
+ " ```py\n",
109
+ " >>> from datasets import load_from_disk\n",
110
+ " >>> ds = load_from_disk('path/to/dataset/directory')\n",
111
+ " ```\n",
112
+ "\n"
113
+ ]
114
+ }
115
+ ],
116
+ "source": [
117
+ "help(load_from_disk)"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": null,
123
+ "id": "6a2a08b1-0f99-4944-bc6d-98d288f6e366",
124
+ "metadata": {},
125
+ "outputs": [],
126
+ "source": []
127
+ }
128
+ ],
129
+ "metadata": {
130
+ "kernelspec": {
131
+ "display_name": "Python 3 (ipykernel)",
132
+ "language": "python",
133
+ "name": "python3"
134
+ },
135
+ "language_info": {
136
+ "codemirror_mode": {
137
+ "name": "ipython",
138
+ "version": 3
139
+ },
140
+ "file_extension": ".py",
141
+ "mimetype": "text/x-python",
142
+ "name": "python",
143
+ "nbconvert_exporter": "python",
144
+ "pygments_lexer": "ipython3",
145
+ "version": "3.10.13"
146
+ }
147
+ },
148
+ "nbformat": 4,
149
+ "nbformat_minor": 5
150
+ }