albertvillanova HF staff commited on
Commit
6fa1fd1
1 Parent(s): 89e866d

Convert dataset to Parquet

Browse files

Convert dataset to Parquet.

README.md CHANGED
@@ -1,22 +1,30 @@
1
  ---
2
- paperswithcode_id: null
3
  pretty_name: TinyShakespeare
4
  dataset_info:
5
  features:
6
  - name: text
7
  dtype: string
8
  splits:
9
- - name: test
10
- num_bytes: 55780
11
- num_examples: 1
12
  - name: train
13
- num_bytes: 1003864
14
  num_examples: 1
15
  - name: validation
16
- num_bytes: 55780
 
 
 
17
  num_examples: 1
18
- download_size: 1115394
19
- dataset_size: 1115424
 
 
 
 
 
 
 
 
 
20
  ---
21
 
22
  # Dataset Card for "tiny_shakespeare"
 
1
  ---
 
2
  pretty_name: TinyShakespeare
3
  dataset_info:
4
  features:
5
  - name: text
6
  dtype: string
7
  splits:
 
 
 
8
  - name: train
9
+ num_bytes: 1003858
10
  num_examples: 1
11
  - name: validation
12
+ num_bytes: 55774
13
+ num_examples: 1
14
+ - name: test
15
+ num_bytes: 55774
16
  num_examples: 1
17
+ download_size: 706067
18
+ dataset_size: 1115406
19
+ configs:
20
+ - config_name: default
21
+ data_files:
22
+ - split: train
23
+ path: data/train-*
24
+ - split: validation
25
+ path: data/validation-*
26
+ - split: test
27
+ path: data/test-*
28
  ---
29
 
30
  # Dataset Card for "tiny_shakespeare"
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3657411dc127ce923291c3543582228841254121e57333441f64d45652e04d5b
3
+ size 36537
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5418159a0b1995e455ba9f03d6e2e8a7d0cf0bccba8d5589bcd6eb5dba936b0
3
+ size 633757
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff6d2dffe1afa2e43ec0c3496ef8396e1ee3145ec6695a1e7bc8879c247b48c7
3
+ size 35773
dataset_infos.json CHANGED
@@ -1 +1,46 @@
1
- {"default": {"description": "40,000 lines of Shakespeare from a variety of Shakespeare's plays. Featured in Andrej Karpathy's blog post 'The Unreasonable Effectiveness of Recurrent Neural Networks': http://karpathy.github.io/2015/05/21/rnn-effectiveness/.\n\nTo use for e.g. character modelling:\n\n```\nd = datasets.load_dataset(name='tiny_shakespeare')['train']\nd = d.map(lambda x: datasets.Value('strings').unicode_split(x['text'], 'UTF-8'))\n# train split includes vocabulary for other splits\nvocabulary = sorted(set(next(iter(d)).numpy()))\nd = d.map(lambda x: {'cur_char': x[:-1], 'next_char': x[1:]})\nd = d.unbatch()\nseq_len = 100\nbatch_size = 2\nd = d.batch(seq_len)\nd = d.batch(batch_size)\n```\n", "citation": "@misc{\n author={Karpathy, Andrej},\n title={char-rnn},\n year={2015},\n howpublished={\\url{https://github.com/karpathy/char-rnn}}\n}", "homepage": "https://github.com/karpathy/char-rnn/blob/master/data/tinyshakespeare/input.txt", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "tiny_shakespeare", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 55780, "num_examples": 1, "dataset_name": "tiny_shakespeare"}, "train": {"name": "train", "num_bytes": 1003864, "num_examples": 1, "dataset_name": "tiny_shakespeare"}, "validation": {"name": "validation", "num_bytes": 55780, "num_examples": 1, "dataset_name": "tiny_shakespeare"}}, "download_checksums": {"https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt": {"num_bytes": 1115394, "checksum": "86c4e6aa9db7c042ec79f339dcb96d42b0075e16b8fc2e86bf0ca57e2dc565ed"}}, "download_size": 1115394, "dataset_size": 1115424, "size_in_bytes": 2230818}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "default": {
3
+ "description": "40,000 lines of Shakespeare from a variety of Shakespeare's plays. Featured in Andrej Karpathy's blog post 'The Unreasonable Effectiveness of Recurrent Neural Networks': http://karpathy.github.io/2015/05/21/rnn-effectiveness/.\n\nTo use for e.g. character modelling:\n\n```\nd = datasets.load_dataset(name='tiny_shakespeare')['train']\nd = d.map(lambda x: datasets.Value('strings').unicode_split(x['text'], 'UTF-8'))\n# train split includes vocabulary for other splits\nvocabulary = sorted(set(next(iter(d)).numpy()))\nd = d.map(lambda x: {'cur_char': x[:-1], 'next_char': x[1:]})\nd = d.unbatch()\nseq_len = 100\nbatch_size = 2\nd = d.batch(seq_len)\nd = d.batch(batch_size)\n```\n",
4
+ "citation": "@misc{\n author={Karpathy, Andrej},\n title={char-rnn},\n year={2015},\n howpublished={\\url{https://github.com/karpathy/char-rnn}}\n}",
5
+ "homepage": "https://github.com/karpathy/char-rnn/blob/master/data/tinyshakespeare/input.txt",
6
+ "license": "",
7
+ "features": {
8
+ "text": {
9
+ "dtype": "string",
10
+ "_type": "Value"
11
+ }
12
+ },
13
+ "builder_name": "tiny_shakespeare",
14
+ "dataset_name": "tiny_shakespeare",
15
+ "config_name": "default",
16
+ "version": {
17
+ "version_str": "1.0.0",
18
+ "major": 1,
19
+ "minor": 0,
20
+ "patch": 0
21
+ },
22
+ "splits": {
23
+ "train": {
24
+ "name": "train",
25
+ "num_bytes": 1003858,
26
+ "num_examples": 1,
27
+ "dataset_name": null
28
+ },
29
+ "validation": {
30
+ "name": "validation",
31
+ "num_bytes": 55774,
32
+ "num_examples": 1,
33
+ "dataset_name": null
34
+ },
35
+ "test": {
36
+ "name": "test",
37
+ "num_bytes": 55774,
38
+ "num_examples": 1,
39
+ "dataset_name": null
40
+ }
41
+ },
42
+ "download_size": 706067,
43
+ "dataset_size": 1115406,
44
+ "size_in_bytes": 1821473
45
+ }
46
+ }