carlosejimenez commited on
Commit
127d428
1 Parent(s): 29ae553

Upload dataset_infos.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset_infos.json +40 -1
dataset_infos.json CHANGED
@@ -1 +1,40 @@
1
- {"carlosejimenez--wikipedia-20220301.en-block-size-1024": {"description": "Wikipedia dataset containing cleaned articles of all languages.\nThe datasets are built from the Wikipedia dump\n(https://dumps.wikimedia.org/) with one split per language. Each example\ncontains the content of one full Wikipedia article with cleaning to strip\nmarkdown and unwanted sections (references, etc.).\n", "citation": "@ONLINE {wikidump,\n author = {Wikimedia Foundation},\n title = {Wikimedia Downloads},\n url = {https://dumps.wikimedia.org}\n}\n", "homepage": "https://dumps.wikimedia.org", "license": "", "features": {"url": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "title": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "id": {"dtype": "int64", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": null, "config_name": null, "version": null, "splits": {"validation": {"name": "validation", "num_bytes": 5329560, "num_examples": 221, "dataset_name": "wikipedia-20220301.en-block-size-1024"}, "train": {"name": "train", "num_bytes": 1166278251, "num_examples": 48382, "dataset_name": "wikipedia-20220301.en-block-size-1024"}}, "download_checksums": null, "download_size": 282037240, "post_processing_size": null, "dataset_size": 1171607811, "size_in_bytes": 1453645051}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"default": {
2
+ "description": "Wikipedia dataset containing cleaned articles of all languages.\nThe datasets are built from the Wikipedia dump\n(https://dumps.wikimedia.org/) with one split per language. Each example\ncontains the content of one full Wikipedia article with cleaning to strip\nmarkdown and unwanted sections (references, etc.).",
3
+ "citation": "@ONLINE {wikidump,\n author = {Wikimedia Foundation},\n title = {Wikimedia Downloads},\n url = {https://dumps.wikimedia.org}\n}",
4
+ "homepage": "https://dumps.wikimedia.org",
5
+ "license": "",
6
+ "features": {
7
+ "tokens": {
8
+ "feature": {
9
+ "dtype": "string",
10
+ "_type": "Value"
11
+ },
12
+ "_type": "Sequence"
13
+ },
14
+ "id": {
15
+ "dtype": "int64",
16
+ "_type": "Value"
17
+ },
18
+ "text": {
19
+ "dtype": "string",
20
+ "_type": "Value"
21
+ }
22
+ },
23
+ "splits": {
24
+ "validation": {
25
+ "name": "validation",
26
+ "num_bytes": 301864191,
27
+ "num_examples": 21817,
28
+ "dataset_name": null
29
+ },
30
+ "train": {
31
+ "name": "train",
32
+ "num_bytes": 60558566627,
33
+ "num_examples": 4368542,
34
+ "dataset_name": null
35
+ }
36
+ },
37
+ "download_size": 20321590769,
38
+ "dataset_size": 60860430818,
39
+ "size_in_bytes": 81182021587
40
+ }}