philschmid HF staff commited on
Commit
61f715d
1 Parent(s): 0ddd59a

Upload dataset_infos.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset_infos.json +67 -0
dataset_infos.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"philschmid--processed_bert_dataset": {
2
+ "description": "Books are a rich source of both fine-grained information, how a character, an object or a scene looks like, as well as high-level semantics, what someone is thinking, feeling and how these states evolve through a story.This work aims to align books to their movie releases in order to providerich descriptive explanations for visual content that go semantically farbeyond the captions available in current datasets. \n\nWikipedia dataset containing cleaned articles of all languages.\nThe datasets are built from the Wikipedia dump\n(https://dumps.wikimedia.org/) with one split per language. Each example\ncontains the content of one full Wikipedia article with cleaning to strip\nmarkdown and unwanted sections (references, etc.).",
3
+ "citation": "@InProceedings{Zhu_2015_ICCV,\n title = {Aligning Books and Movies: Towards Story-Like Visual Explanations by Watching Movies and Reading Books},\n author = {Zhu, Yukun and Kiros, Ryan and Zemel, Rich and Salakhutdinov, Ruslan and Urtasun, Raquel and Torralba, Antonio and Fidler, Sanja},\n booktitle = {The IEEE International Conference on Computer Vision (ICCV)},\n month = {December},\n year = {2015}\n}\n\n\n@ONLINE {wikidump,\n author = {Wikimedia Foundation},\n title = {Wikimedia Downloads},\n url = {https://dumps.wikimedia.org}\n}",
4
+ "homepage": "https://yknzhu.wixsite.com/mbweb\n\nhttps://dumps.wikimedia.org",
5
+ "license": "",
6
+ "features": {
7
+ "input_ids": {
8
+ "feature": {
9
+ "dtype": "int32",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "length": -1,
14
+ "id": null,
15
+ "_type": "Sequence"
16
+ },
17
+ "token_type_ids": {
18
+ "feature": {
19
+ "dtype": "int8",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "length": -1,
24
+ "id": null,
25
+ "_type": "Sequence"
26
+ },
27
+ "attention_mask": {
28
+ "feature": {
29
+ "dtype": "int8",
30
+ "id": null,
31
+ "_type": "Value"
32
+ },
33
+ "length": -1,
34
+ "id": null,
35
+ "_type": "Sequence"
36
+ },
37
+ "special_tokens_mask": {
38
+ "feature": {
39
+ "dtype": "int8",
40
+ "id": null,
41
+ "_type": "Value"
42
+ },
43
+ "length": -1,
44
+ "id": null,
45
+ "_type": "Sequence"
46
+ }
47
+ },
48
+ "post_processed": null,
49
+ "supervised_keys": null,
50
+ "task_templates": null,
51
+ "builder_name": null,
52
+ "config_name": null,
53
+ "version": null,
54
+ "splits": {
55
+ "train": {
56
+ "name": "train",
57
+ "num_bytes": 24027300000.0,
58
+ "num_examples": 6674250,
59
+ "dataset_name": "processed_bert_dataset"
60
+ }
61
+ },
62
+ "download_checksums": null,
63
+ "download_size": 5731320582,
64
+ "post_processing_size": null,
65
+ "dataset_size": 24027300000.0,
66
+ "size_in_bytes": 29758620582.0
67
+ }}