Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
1598c59
1 Parent(s): 9f7c666

Delete legacy dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +0 -178
dataset_infos.json DELETED
@@ -1,178 +0,0 @@
1
- {
2
- "wikitext-103-v1": {
3
- "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
4
- "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
5
- "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
6
- "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
7
- "features": {
8
- "text": {
9
- "dtype": "string",
10
- "_type": "Value"
11
- }
12
- },
13
- "builder_name": "wikitext",
14
- "dataset_name": "wikitext",
15
- "config_name": "wikitext-103-v1",
16
- "version": {
17
- "version_str": "1.0.0",
18
- "major": 1,
19
- "minor": 0,
20
- "patch": 0
21
- },
22
- "splits": {
23
- "test": {
24
- "name": "test",
25
- "num_bytes": 1295575,
26
- "num_examples": 4358,
27
- "dataset_name": null
28
- },
29
- "train": {
30
- "name": "train",
31
- "num_bytes": 545141915,
32
- "num_examples": 1801350,
33
- "dataset_name": null
34
- },
35
- "validation": {
36
- "name": "validation",
37
- "num_bytes": 1154751,
38
- "num_examples": 3760,
39
- "dataset_name": null
40
- }
41
- },
42
- "download_size": 313093838,
43
- "dataset_size": 547592241,
44
- "size_in_bytes": 860686079
45
- },
46
- "wikitext-2-v1": {
47
- "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
48
- "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
49
- "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
50
- "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
51
- "features": {
52
- "text": {
53
- "dtype": "string",
54
- "_type": "Value"
55
- }
56
- },
57
- "builder_name": "wikitext",
58
- "dataset_name": "wikitext",
59
- "config_name": "wikitext-2-v1",
60
- "version": {
61
- "version_str": "1.0.0",
62
- "major": 1,
63
- "minor": 0,
64
- "patch": 0
65
- },
66
- "splits": {
67
- "test": {
68
- "name": "test",
69
- "num_bytes": 1270947,
70
- "num_examples": 4358,
71
- "dataset_name": null
72
- },
73
- "train": {
74
- "name": "train",
75
- "num_bytes": 10918118,
76
- "num_examples": 36718,
77
- "dataset_name": null
78
- },
79
- "validation": {
80
- "name": "validation",
81
- "num_bytes": 1134123,
82
- "num_examples": 3760,
83
- "dataset_name": null
84
- }
85
- },
86
- "download_size": 7371282,
87
- "dataset_size": 13323188,
88
- "size_in_bytes": 20694470
89
- },
90
- "wikitext-103-raw-v1": {
91
- "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
92
- "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
93
- "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
94
- "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
95
- "features": {
96
- "text": {
97
- "dtype": "string",
98
- "_type": "Value"
99
- }
100
- },
101
- "builder_name": "wikitext",
102
- "dataset_name": "wikitext",
103
- "config_name": "wikitext-103-raw-v1",
104
- "version": {
105
- "version_str": "1.0.0",
106
- "major": 1,
107
- "minor": 0,
108
- "patch": 0
109
- },
110
- "splits": {
111
- "test": {
112
- "name": "test",
113
- "num_bytes": 1305088,
114
- "num_examples": 4358,
115
- "dataset_name": null
116
- },
117
- "train": {
118
- "name": "train",
119
- "num_bytes": 546500949,
120
- "num_examples": 1801350,
121
- "dataset_name": null
122
- },
123
- "validation": {
124
- "name": "validation",
125
- "num_bytes": 1159288,
126
- "num_examples": 3760,
127
- "dataset_name": null
128
- }
129
- },
130
- "download_size": 315466397,
131
- "dataset_size": 548965325,
132
- "size_in_bytes": 864431722
133
- },
134
- "wikitext-2-raw-v1": {
135
- "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
136
- "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
137
- "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
138
- "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
139
- "features": {
140
- "text": {
141
- "dtype": "string",
142
- "_type": "Value"
143
- }
144
- },
145
- "builder_name": "wikitext",
146
- "dataset_name": "wikitext",
147
- "config_name": "wikitext-2-raw-v1",
148
- "version": {
149
- "version_str": "1.0.0",
150
- "major": 1,
151
- "minor": 0,
152
- "patch": 0
153
- },
154
- "splits": {
155
- "test": {
156
- "name": "test",
157
- "num_bytes": 1305088,
158
- "num_examples": 4358,
159
- "dataset_name": null
160
- },
161
- "train": {
162
- "name": "train",
163
- "num_bytes": 11061717,
164
- "num_examples": 36718,
165
- "dataset_name": null
166
- },
167
- "validation": {
168
- "name": "validation",
169
- "num_bytes": 1159288,
170
- "num_examples": 3760,
171
- "dataset_name": null
172
- }
173
- },
174
- "download_size": 7747362,
175
- "dataset_size": 13526093,
176
- "size_in_bytes": 21273455
177
- }
178
- }