elricwan commited on
Commit
a5ef790
1 Parent(s): 941faff

Upload dataset_infos.json

Browse files
Files changed (1) hide show
  1. dataset_infos.json +1 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"elricwan--roberta-data": {"description": "Books are a rich source of both fine-grained information, how a character, an object or a scene looks like, as well as high-level semantics, what someone is thinking, feeling and how these states evolve through a story.This work aims to align books to their movie releases in order to providerich descriptive explanations for visual content that go semantically farbeyond the captions available in current datasets. \n\nWikipedia dataset containing cleaned articles of all languages.\nThe datasets are built from the Wikipedia dump\n(https://dumps.wikimedia.org/) with one split per language. Each example\ncontains the content of one full Wikipedia article with cleaning to strip\nmarkdown and unwanted sections (references, etc.).\n\n\nCC-News containing news articles from news sites all over the world The data is available on AWS S3 in the Common Crawl bucket at /crawl-data/CC-NEWS/. This version of the dataset has 708241 articles. It represents a small portion of English language subset of the CC-News dataset created using news-please(Hamborg et al.,2017) to collect and extract English language portion of CC-News.\n\n\nAn open-source replication of the WebText dataset from OpenAI.\n", "citation": "@InProceedings{Zhu_2015_ICCV,\n title = {Aligning Books and Movies: Towards Story-Like Visual Explanations by Watching Movies and Reading Books},\n author = {Zhu, Yukun and Kiros, Ryan and Zemel, Rich and Salakhutdinov, Ruslan and Urtasun, Raquel and Torralba, Antonio and Fidler, Sanja},\n booktitle = {The IEEE International Conference on Computer Vision (ICCV)},\n month = {December},\n year = {2015}\n}\n\n\n@ONLINE {wikidump,\n author = \"Wikimedia Foundation\",\n title = \"Wikimedia Downloads\",\n url = \"https://dumps.wikimedia.org\"\n}\n\n\n@InProceedings{Hamborg2017,\n author = {Hamborg, Felix and Meuschke, Norman and Breitinger, Corinna and Gipp, Bela},\n title = {news-please: A Generic News Crawler and Extractor},\n year = {2017},\n booktitle = {Proceedings of the 15th International Symposium of Information Science},\n location = {Berlin},\n doi = {10.5281/zenodo.4120316},\n pages = {218--223},\n month = {March}\n}\n\n\n@misc{Gokaslan2019OpenWeb,\n title={OpenWebText Corpus},\n author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},\n howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},\n year={2019}\n}\n", "homepage": "https://yknzhu.wixsite.com/mbweb\n\nhttps://dumps.wikimedia.org\n\nhttps://commoncrawl.org/2016/10/news-dataset-available/\n\nhttps://skylion007.github.io/OpenWebTextCorpus/", "license": "", "features": {"attention_mask": {"feature": {"dtype": "int8", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "input_ids": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "labels": {"feature": {"dtype": "int64", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": null, "config_name": null, "version": null, "splits": {"train": {"name": "train", "num_bytes": 194589557896, "num_examples": 29182397, "dataset_name": "roberta-data"}}, "download_checksums": null, "download_size": 51330686491, "post_processing_size": null, "dataset_size": 194589557896, "size_in_bytes": 245920244387}}