Datasets:
RWKV
/

Modalities:
Text
Formats:
json
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
m8than's picture
data
5194068
|
raw
history blame
8.53 kB
---
annotations_creators:
- no-annotation
language_creators:
- crowdsourced
license:
- cc-by-sa-4.0
task_categories:
- text-generation
- fill-mask
task_ids:
- language-modeling
- masked-language-modeling
source_datasets:
- original
language:
- en
configs:
- config_name: default
data_files:
- split: final
path: "data"
- config_name: chunk0
data_files:
- split: final
path: "data/dataset_chunk_0/*"
- config_name: chunk1
data_files:
- split: final
path: "data/dataset_chunk_1/*"
- config_name: chunk2
data_files:
- split: final
path: "data/dataset_chunk_2/*"
- config_name: chunk3
data_files:
- split: final
path: "data/dataset_chunk_3/*"
- config_name: chunk4
data_files:
- split: final
path: "data/dataset_chunk_4/*"
- config_name: chunk5
data_files:
- split: final
path: "data/dataset_chunk_5/*"
- config_name: chunk6
data_files:
- split: final
path: "data/dataset_chunk_6/*"
- config_name: chunk7
data_files:
- split: final
path: "data/dataset_chunk_7/*"
- config_name: chunk8
data_files:
- split: final
path: "data/dataset_chunk_8/*"
- config_name: chunk9
data_files:
- split: final
path: "data/dataset_chunk_9/*"
pretty_name: EagleX-v2-WorldContinued
---
# Dataset Card for EagleX v2 Dataset
This dataset was used to train RWKV Eagle 7B for continued pretrain of 1.1T tokens (approximately) (boosting it to 2.25T) with the final model being released as [RWKV EagleX v2](https://huggingface.co/datasets/RWKV/v5-EagleX-v2-7B-HF).
## Dataset Details
### Dataset Description
EagleX-WorldContinued is a pretraining dataset built from many of our datasets over at Recursal AI + a few others.
- **Curated by:** M8than, KaraKaraWitch, Darok
- **Funded by [optional]:** Recursal.ai
- **Shared by [optional]:** M8than
- **Language(s) (NLP):** English, Chinese, Russian + 100 others
- **License:** cc-by-sa-4.0
### Format
Dataset files are JSONL with each line representing one conversation. Each entry is keyed with the full text entry.
### Data Splits
- final
- Contains full conversations.
The config names are dataset chunks 0 to 9. They may have slightly varied sizes because the dataset was built by document count rather than tokens/word lengnth.
### Dataset Curators
M8than. (If something is wrong, `@m8than` on discord.)
### Licensing Information
This release contains content from europarl.
Recursal Waifus (The banner image) are licensed under CC-BY-SA.
They do not represent the related websites in any official capacity unless otherwise or announced by the website.
You may use them as a banner image. However, you must always link back to the dataset.
### Citation Information
```latex
@software{penedo2024fineweb,
author = {Penedo, Guilherme and Kydlíček, Hynek and von Werra, Leandro and Wolf, Thomas},
title = {FineWeb},
month = April,
year = 2024,
doi = { 10.57967/hf/2092 },
url = {https://huggingface.co/datasets/HuggingFaceFW/fineweb}
}
@article{gao2020pile,
title={The {P}ile: An 800{GB} dataset of diverse text for language modeling},
author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and others},
journal={arXiv preprint arXiv:2101.00027},
year={2020}
}
@misc{cerebras2023slimpajama,
author = {Soboleva, Daria and Al-Khateeb, Faisal and Myers, Robert and Steeves, Jacob R and Hestness, Joel and Dey, Nolan},
title = {{SlimPajama: A 627B token cleaned and deduplicated version of RedPajama}},
month = June,
year = 2023,
howpublished = {\url{https://www.cerebras.net/blog/slimpajama-a-627b-token-cleaned-and-deduplicated-version-of-redpajama}},
url = {https://huggingface.co/datasets/cerebras/SlimPajama-627B},
}
@misc{kudugunta2023madlad400,
title={MADLAD-400: A Multilingual And Document-Level Large Audited Dataset},
author={Sneha Kudugunta and Isaac Caswell and Biao Zhang and Xavier Garcia and Christopher A. Choquette-Choo and Katherine Lee and Derrick Xin and Aditya Kusupati and Romi Stella and Ankur Bapna and Orhan Firat},
year={2023},
eprint={2309.04662},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
@misc{lozhkov2024starcoder,
title={StarCoder 2 and The Stack v2: The Next Generation},
author={Anton Lozhkov and Raymond Li and Loubna Ben Allal and Federico Cassano and Joel Lamy-Poirier and Nouamane Tazi and Ao Tang and Dmytro Pykhtar and Jiawei Liu and Yuxiang Wei and Tianyang Liu and Max Tian and Denis Kocetkov and Arthur Zucker and Younes Belkada and Zijian Wang and Qian Liu and Dmitry Abulkhanov and Indraneil Paul and Zhuang Li and Wen-Ding Li and Megan Risdal and Jia Li and Jian Zhu and Terry Yue Zhuo and Evgenii Zheltonozhskii and Nii Osae Osae Dade and Wenhao Yu and Lucas Krauß and Naman Jain and Yixuan Su and Xuanli He and Manan Dey and Edoardo Abati and Yekun Chai and Niklas Muennighoff and Xiangru Tang and Muhtasham Oblokulov and Christopher Akiki and Marc Marone and Chenghao Mou and Mayank Mishra and Alex Gu and Binyuan Hui and Tri Dao and Armel Zebaze and Olivier Dehaene and Nicolas Patry and Canwen Xu and Julian McAuley and Han Hu and Torsten Scholak and Sebastien Paquet and Jennifer Robinson and Carolyn Jane Anderson and Nicolas Chapados and Mostofa Patwary and Nima Tajbakhsh and Yacine Jernite and Carlos Muñoz Ferrandis and Lingming Zhang and Sean Hughes and Thomas Wolf and Arjun Guha and Leandro von Werra and Harm de Vries},
year={2024},
eprint={2402.19173},
archivePrefix={arXiv},
primaryClass={cs.SE}
}
@ONLINE{europarl-translation-instruct,
title = {europarl-translation-instruct},
author = {M8than, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/europarl-translation-instruct}},
}
@ONLINE{europarl-conversation,
title = {europarl-conversation},
author = {M8than, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/europarl-conversation}},
}
@ONLINE{recursalberg,
title = {Recursalberg},
author = {KaraKaraWitch, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/Recursalberg}},
}
@ONLINE{lecturegratuits,
title = {LectureGratuits},
author = {Darok, KaraKaraWitch, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/Recursalberg}},
}
@ONLINE{arxiv-cc0,
title = {arxiv-CC0-v0.5},
author = {M8than, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/arxiv-CC0-v0.5}},
}
@misc{StackingExchange,
title = {Stacking Exchange},
author = {KaraKaraWitch, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/StackingExchange}},
}
@misc{MDN,
title = {MDN},
author = {KaraKaraWitch, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/MDN}},
}
@misc{scp-recursal,
title = {SCP-Recursal},
author = {Darok, KaraKaraWitch, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/SCP-RECURSAL}},
}
@misc{superwiki,
title = {SuperWIKI-1.5},
author = {KaraKaraWitch, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/SuperWiki-1.5}},
}
@misc{Devopedia,
title = {Devopedia},
author = {KaraKaraWitch, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/Devopedia}},
}
@ONLINE{fantaticfandom,
title = {FanaticFandom},
author = {KaraKaraWitch, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/FanaticFandom}},
}
@ONLINE{superwiki-next,
title = {SuperWikiNEXT-32B},
author = {KaraKaraWitch, recursal.ai},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/recursal/SuperWikipedia-NEXT}},
}
```