Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
c8a1c90
1 Parent(s): de54f13

Add wikitext-2-v1 data files

Browse files
README.md CHANGED
@@ -77,16 +77,16 @@ dataset_info:
77
  dtype: string
78
  splits:
79
  - name: test
80
- num_bytes: 1270951
81
  num_examples: 4358
82
  - name: train
83
- num_bytes: 10918134
84
  num_examples: 36718
85
  - name: validation
86
- num_bytes: 1134127
87
  num_examples: 3760
88
- download_size: 4475746
89
- dataset_size: 13323212
90
  configs:
91
  - config_name: wikitext-103-v1
92
  data_files:
@@ -96,6 +96,14 @@ configs:
96
  path: wikitext-103-v1/train-*
97
  - split: validation
98
  path: wikitext-103-v1/validation-*
 
 
 
 
 
 
 
 
99
  ---
100
 
101
  # Dataset Card for "wikitext"
 
77
  dtype: string
78
  splits:
79
  - name: test
80
+ num_bytes: 1270947
81
  num_examples: 4358
82
  - name: train
83
+ num_bytes: 10918118
84
  num_examples: 36718
85
  - name: validation
86
+ num_bytes: 1134123
87
  num_examples: 3760
88
+ download_size: 7371282
89
+ dataset_size: 13323188
90
  configs:
91
  - config_name: wikitext-103-v1
92
  data_files:
 
96
  path: wikitext-103-v1/train-*
97
  - split: validation
98
  path: wikitext-103-v1/validation-*
99
+ - config_name: wikitext-2-v1
100
+ data_files:
101
+ - split: test
102
+ path: wikitext-2-v1/test-*
103
+ - split: train
104
+ path: wikitext-2-v1/train-*
105
+ - split: validation
106
+ path: wikitext-2-v1/validation-*
107
  ---
108
 
109
  # Dataset Card for "wikitext"
dataset_infos.json CHANGED
@@ -51,18 +51,14 @@
51
  "features": {
52
  "text": {
53
  "dtype": "string",
54
- "id": null,
55
  "_type": "Value"
56
  }
57
  },
58
- "post_processed": null,
59
- "supervised_keys": null,
60
- "task_templates": null,
61
  "builder_name": "wikitext",
 
62
  "config_name": "wikitext-2-v1",
63
  "version": {
64
  "version_str": "1.0.0",
65
- "description": null,
66
  "major": 1,
67
  "minor": 0,
68
  "patch": 0
@@ -70,33 +66,26 @@
70
  "splits": {
71
  "test": {
72
  "name": "test",
73
- "num_bytes": 1270951,
74
  "num_examples": 4358,
75
- "dataset_name": "wikitext"
76
  },
77
  "train": {
78
  "name": "train",
79
- "num_bytes": 10918134,
80
  "num_examples": 36718,
81
- "dataset_name": "wikitext"
82
  },
83
  "validation": {
84
  "name": "validation",
85
- "num_bytes": 1134127,
86
  "num_examples": 3760,
87
- "dataset_name": "wikitext"
88
- }
89
- },
90
- "download_checksums": {
91
- "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip": {
92
- "num_bytes": 4475746,
93
- "checksum": "92675f1d63015c1c8b51f1656a52d5bdbc33aafa60cc47a218a66e7ee817488c"
94
  }
95
  },
96
- "download_size": 4475746,
97
- "post_processing_size": null,
98
- "dataset_size": 13323212,
99
- "size_in_bytes": 17798958
100
  },
101
  "wikitext-103-raw-v1": {
102
  "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
 
51
  "features": {
52
  "text": {
53
  "dtype": "string",
 
54
  "_type": "Value"
55
  }
56
  },
 
 
 
57
  "builder_name": "wikitext",
58
+ "dataset_name": "wikitext",
59
  "config_name": "wikitext-2-v1",
60
  "version": {
61
  "version_str": "1.0.0",
 
62
  "major": 1,
63
  "minor": 0,
64
  "patch": 0
 
66
  "splits": {
67
  "test": {
68
  "name": "test",
69
+ "num_bytes": 1270947,
70
  "num_examples": 4358,
71
+ "dataset_name": null
72
  },
73
  "train": {
74
  "name": "train",
75
+ "num_bytes": 10918118,
76
  "num_examples": 36718,
77
+ "dataset_name": null
78
  },
79
  "validation": {
80
  "name": "validation",
81
+ "num_bytes": 1134123,
82
  "num_examples": 3760,
83
+ "dataset_name": null
 
 
 
 
 
 
84
  }
85
  },
86
+ "download_size": 7371282,
87
+ "dataset_size": 13323188,
88
+ "size_in_bytes": 20694470
 
89
  },
90
  "wikitext-103-raw-v1": {
91
  "description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
wikitext-2-v1/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6b3913da714b63a60a571698b20ff15441fb015783ea1b5285f707d4f2f00a9
3
+ size 685430
wikitext-2-v1/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfc27e4360c639dc1fba1e403bfffd53af4a5c75d5363b5724d49bf12d07cce6
3
+ size 6068114
wikitext-2-v1/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:717de9a0c1c0b0b1dfdd8f1e6ad8a30ece618bbde81f5da8207277547d324215
3
+ size 617738