Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Tags:
DOI:
License:
coyotte508 HF staff FalconLLM slippylolo guipenedo HF staff commited on
Commit
c735840
0 Parent(s):

Squashing commit

Browse files

Co-authored-by: FalconLLM <FalconLLM@users.noreply.huggingface.co>
Co-authored-by: slippylolo <slippylolo@users.noreply.huggingface.co>
Co-authored-by: guipenedo <guipenedo@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +54 -0
  2. README.md +177 -0
  3. data/train-00000-of-05534-b8fc5348cbe605a5.parquet +3 -0
  4. data/train-00001-of-05534-9bca3ce859516338.parquet +3 -0
  5. data/train-00002-of-05534-01680948bd81de83.parquet +3 -0
  6. data/train-00003-of-05534-b7806bb8ca893c23.parquet +3 -0
  7. data/train-00004-of-05534-cbe1137f523084a7.parquet +3 -0
  8. data/train-00005-of-05534-e4a5eae6c1419c9b.parquet +3 -0
  9. data/train-00006-of-05534-5bc49be138fd315b.parquet +3 -0
  10. data/train-00007-of-05534-c185da59aece723d.parquet +3 -0
  11. data/train-00008-of-05534-8ed029166c6795cf.parquet +3 -0
  12. data/train-00009-of-05534-f0954d1648b16e88.parquet +3 -0
  13. data/train-00010-of-05534-5373970b3705bfc5.parquet +3 -0
  14. data/train-00011-of-05534-7d7970b7bb98922c.parquet +3 -0
  15. data/train-00012-of-05534-acf3fb5fee2b7e50.parquet +3 -0
  16. data/train-00013-of-05534-a2a70ea6a8536f67.parquet +3 -0
  17. data/train-00014-of-05534-2628ae1bc45a28c9.parquet +3 -0
  18. data/train-00015-of-05534-0c056156dc81ffc2.parquet +3 -0
  19. data/train-00016-of-05534-0e0c9b82ed6ccecc.parquet +3 -0
  20. data/train-00017-of-05534-81d837539550c007.parquet +3 -0
  21. data/train-00018-of-05534-6d374c09460d8143.parquet +3 -0
  22. data/train-00019-of-05534-724cfbde611d86f5.parquet +3 -0
  23. data/train-00020-of-05534-442ffd4b989fb05d.parquet +3 -0
  24. data/train-00021-of-05534-8ada25673bef5f61.parquet +3 -0
  25. data/train-00022-of-05534-77181f7417ee27cb.parquet +3 -0
  26. data/train-00023-of-05534-68a7ffc65bb0793d.parquet +3 -0
  27. data/train-00024-of-05534-fd6960561baf3ed9.parquet +3 -0
  28. data/train-00025-of-05534-364c4d103a621e0f.parquet +3 -0
  29. data/train-00026-of-05534-031de1cf0c1afcb0.parquet +3 -0
  30. data/train-00027-of-05534-bddc08d9377fbb66.parquet +3 -0
  31. data/train-00028-of-05534-09ffe38fe3bb6c59.parquet +3 -0
  32. data/train-00029-of-05534-fd33dccd28c51349.parquet +3 -0
  33. data/train-00030-of-05534-bfbcdce619c0f298.parquet +3 -0
  34. data/train-00031-of-05534-ee852b432a95e05a.parquet +3 -0
  35. data/train-00032-of-05534-50284d5c130b0be0.parquet +3 -0
  36. data/train-00033-of-05534-1f8fe78125424cc4.parquet +3 -0
  37. data/train-00034-of-05534-06a9248bcdf8d5ee.parquet +3 -0
  38. data/train-00035-of-05534-4af1fa8b5ec251d2.parquet +3 -0
  39. data/train-00036-of-05534-75f410e082912165.parquet +3 -0
  40. data/train-00037-of-05534-378fea92f8c1f986.parquet +3 -0
  41. data/train-00038-of-05534-422efc900cd120fe.parquet +3 -0
  42. data/train-00039-of-05534-9b84156534f2e079.parquet +3 -0
  43. data/train-00040-of-05534-22ecdb900499b2bf.parquet +3 -0
  44. data/train-00041-of-05534-4b0e16289c2c41a8.parquet +3 -0
  45. data/train-00042-of-05534-6e461fee85cdb6c2.parquet +3 -0
  46. data/train-00043-of-05534-6711ba24927f3382.parquet +3 -0
  47. data/train-00044-of-05534-5c643be3cd561476.parquet +3 -0
  48. data/train-00045-of-05534-fb61b8a4dae74c99.parquet +3 -0
  49. data/train-00046-of-05534-bbd7d5cc9a0dbc0d.parquet +3 -0
  50. data/train-00047-of-05534-dbeffe2b7bddff3a.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # Audio files - uncompressed
37
+ *.pcm filter=lfs diff=lfs merge=lfs -text
38
+ *.sam filter=lfs diff=lfs merge=lfs -text
39
+ *.raw filter=lfs diff=lfs merge=lfs -text
40
+ # Audio files - compressed
41
+ *.aac filter=lfs diff=lfs merge=lfs -text
42
+ *.flac filter=lfs diff=lfs merge=lfs -text
43
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ *.ogg filter=lfs diff=lfs merge=lfs -text
45
+ *.wav filter=lfs diff=lfs merge=lfs -text
46
+ # Image files - uncompressed
47
+ *.bmp filter=lfs diff=lfs merge=lfs -text
48
+ *.gif filter=lfs diff=lfs merge=lfs -text
49
+ *.png filter=lfs diff=lfs merge=lfs -text
50
+ *.tiff filter=lfs diff=lfs merge=lfs -text
51
+ # Image files - compressed
52
+ *.jpg filter=lfs diff=lfs merge=lfs -text
53
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: content
5
+ dtype: string
6
+ - name: url
7
+ dtype: string
8
+ - name: timestamp
9
+ dtype: timestamp[s]
10
+ - name: dump
11
+ dtype: string
12
+ - name: segment
13
+ dtype: string
14
+ - name: image_urls
15
+ sequence:
16
+ sequence: string
17
+ splits:
18
+ - name: train
19
+ num_bytes: 2766953721769
20
+ num_examples: 968000015
21
+ download_size: 466888198663
22
+ dataset_size: 2766953721769
23
+ license: odc-by
24
+ task_categories:
25
+ - text-generation
26
+ language:
27
+ - en
28
+ pretty_name: Falcon RefinedWeb
29
+ size_categories:
30
+ - 100B<n<1T
31
+ ---
32
+
33
+ # 📀 Falcon RefinedWeb
34
+
35
+ **Falcon RefinedWeb is a massive English web dataset built by [TII](https://www.tii.ae) and released under an ODC-By 1.0 license.**
36
+
37
+ See the 📓 [paper on arXiv](https://arxiv.org/abs/2306.01116) for more details.
38
+
39
+ RefinedWeb is built through stringent filtering and large-scale deduplication of CommonCrawl; we found models trained on RefinedWeb to achieve performance in-line or better than models trained on curated datasets, while only relying on web data.
40
+
41
+ RefinedWeb is also "multimodal-friendly": it contains links and alt texts for images in processed samples.
42
+
43
+ This public extract should contain 500-650GT depending on the tokenizer you use, and can be enhanced with the curated corpora of your choosing. This public extract is about ~500GB to download, requiring 2.8TB of local storage once unpacked.
44
+
45
+ ```python
46
+ from datasets import load_dataset
47
+ rw = load_dataset("tiiuae/falcon-refinedweb")
48
+ ```
49
+
50
+ RefinedWeb is the main dataset we have used for training the [Falcon LLM](https://falconllm.tii.ae) models:
51
+
52
+ * It was used in conjunction with a curated corpora to train Falcon-[7B](https://huggingface.co/tiiuae/falcon-7b)/[40B](https://huggingface.co/tiiuae/falcon-40b), two state-of-the-art open-source models.
53
+ * It was also used to train Falcon-RW-[1B](https://huggingface.co/tiiuae/falcon-rw-1b)/[7B](https://huggingface.co/tiiuae/falcon-rw-7b), two models trained on 350 billion tokens of RefinedWeb alone to demonstrate its quality compared to curated corpora.
54
+
55
+
56
+ # Dataset card for Falcon RefinedWeb
57
+
58
+ ## Dataset Description
59
+
60
+ * **Homepage:** [falconllm.tii.ae](falconllm.tii.ae)
61
+ * **Paper:** [https://arxiv.org/abs/2306.01116](https://arxiv.org/abs/2306.01116)
62
+ * **Point of Contact:** [falconllm@tii.ae](mailto:falconllm@tii.ae)
63
+
64
+ ### Dataset Summary
65
+
66
+ Falcon RefinedWeb was created to serve as an English large-scale dataset for the pretraining of large language models. It may be used on its own, or augmented with curated sources (e.g., Wikipedia, StackOverflow).
67
+
68
+ It was built on top of CommonCrawl, leveraging stringent filtering and extensive deduplication.
69
+
70
+ ### Supported Tasks and Leaderboards
71
+
72
+ RefinedWeb is intended to be primarly used as a pretraining dataset for large language models. Practitioners may leverage it for upstream evaluation with a validation loss, but we do not provide any canonical split.
73
+
74
+ ### Languages
75
+
76
+ RefinedWeb primarly contains English.
77
+
78
+
79
+ ## Dataset Structure
80
+
81
+ ### Data Instances
82
+
83
+ Each data instance corresponds to an individual web page which has been crawled, processed, and deduplicated against all other instances.
84
+
85
+ This public extract of RefinedWeb contains about 1B instances (968M individual web pages), for a total of 2.8TB of clean text data.
86
+
87
+ ### Data Fields
88
+
89
+ * `content`: the processed and cleaned text contained in the page;
90
+ * `url`: the url of the webpage crawled to produce the sample;
91
+ * `timestamp`: timestamp of when the webpage was crawled by CommonCrawl;
92
+ * `dump`: the CommonCrawl dump the sample is a part of;
93
+ * `segment`: the CommonCrawl segment the sample is a part of;
94
+ * `image_urls`: a list of elements in the type [`image_url`, `image_alt_text`] for all the images found in the content of the sample.
95
+
96
+ ### Data Splits
97
+
98
+ We do not provide any canonical splits for RefinedWeb.
99
+
100
+
101
+ ## Dataset Creation
102
+
103
+ ### Curation Rationale
104
+
105
+ Falcon RefinedWeb is built on-top of [CommonCrawl](https://commoncrawl.org), using the Macrodata Refinement Pipeline, which combines content extraction, filtering heuristics, and deduplication.
106
+
107
+ In designing RefinedWeb, we abided to the following philosophy:
108
+
109
+ * (1) **Scale first.** We intend MDR to produce datasets to be used to train 40-200B parameters models, thus requiring trillions of tokens [(Hoffmann et al., 2022)](https://arxiv.org/abs/2203.15556). For English-only RefinedWeb, we target a size of 3-6 trillion tokens. Specifically, we eschew any labour intensive human curation process, and focus on CommonCrawl instead of disparate single-domain sources.
110
+ * (2) **Strict deduplication.** Inspired by the work of [Lee et al., 2021](https://arxiv.org/abs/2107.06499), which demonstrated the value of deduplication for large language models, we implement a rigorous deduplication pipeline. We combine both exact and fuzzy deduplication, and use strict settings leading to removal rates far higher than others datasets have reported.
111
+ * (3) **Neutral filtering.** To avoid introducing further undesirable biases into the model, we avoid using ML-based filtering outside of language identification ([Dodge et al., 2021](https://arxiv.org/abs/2104.08758); [Welbl et al., 2021](https://arxiv.org/abs/2109.07445)) . We stick to simple rules and heuristics, and use only URL filtering for adult content.
112
+
113
+ During its development, we iterated on RefinedWeb by measuring the zero-shot performance of models trained on development version of the dataset. Our main goal was to maximize the performance obtained, bridging the gap between curated and web data. We also manually audited samples to identify potential filtering improvements.
114
+
115
+ ### Source Data
116
+
117
+ RefinedWeb is built from [CommonCrawl](https://commoncrawl.org) dumps. These dumps are constructed from crawling publicly available web pages.
118
+
119
+ ### Data Collection and Preprocessing
120
+
121
+ We applied extensive preprocessing and cleaning of the data, using our Macrodata Refinement Pipeline.
122
+
123
+ We first filter URLs to remove adult content using a blocklist and a score system, we then use `trafilatura` to extract content from pages, and perform language identification with the `fastText` classifier from CCNet ([Wenzek et al., 2019](https://arxiv.org/abs/1911.00359)). After this first preprocessing stage, we filter data using heuristics from MassiveWeb ([Rae et al., 2021](https://arxiv.org/abs/2112.11446)), and our own line-wise corrections.
124
+
125
+ Finally, we run extensive deduplication, removing URLs revisited across dumps and performing subsequently fuzzy and exact substring deduplication.
126
+
127
+ ### Annotations
128
+
129
+ We provide automatically collected annotations for the source `url`, `timestamp` of the crawl, original CommonCrawl `dump` and `segment` in which the document was found, and `image_urls` contained in the page.
130
+
131
+
132
+ ### Personal and Sensitive Information
133
+
134
+ As RefinedWeb is built upon publicly available web pages, it may contain sensitive information such as emails, phone numbers, or IP addresses. We believe that deduplication may have helped reduced the prevalence of PII in the dataset, but practitioners working with RefinedWeb should take care.
135
+
136
+ ## Considerations for Using the Data
137
+
138
+ ### Social Impact of Dataset
139
+
140
+ With the open-source release of Falcon RefinedWeb, we aim to increase access to high-quality web data, which has typically been held private by model developers. We believe this release will in turn improve the accessibility and the spread of performant large language models.
141
+
142
+ ### Discussion of Biases
143
+
144
+ As toxic or biased data is prevalent on the internet, it is likely our dataset contains such content. Notably, using the Perspective API, we estimated the prevalence of toxic content in the dataset to be similar to The Pile.
145
+
146
+ ### Other Known Limitations
147
+
148
+ Despite our best efforts to filter content that does not qualify as natural language, and to deduplicate documents, our pipeline may let through documents that may be considered as errors or redundant.
149
+
150
+ ## Additional Information
151
+
152
+ ### Licensing Information
153
+
154
+ This public extract is made available under an [ODC-By 1.0](https://opendatacommons.org/licenses/by/1-0/) license; users should also abide to the [CommonCrawl ToU](https://commoncrawl.org/terms-of-use/).
155
+
156
+ ### Citation Information
157
+
158
+ ```
159
+ @article{refinedweb,
160
+ title={The {R}efined{W}eb dataset for {F}alcon {LLM}: outperforming curated corpora with web data, and web data only},
161
+ author={Guilherme Penedo and Quentin Malartic and Daniel Hesslow and Ruxandra Cojocaru and Alessandro Cappelli and Hamza Alobeidli and Baptiste Pannier and Ebtesam Almazrouei and Julien Launay},
162
+ journal={arXiv preprint arXiv:2306.01116},
163
+ eprint={2306.01116},
164
+ eprinttype = {arXiv},
165
+ url={https://arxiv.org/abs/2306.01116},
166
+ year={2023}
167
+ }
168
+ ```
169
+
170
+ ### Opt-out request
171
+
172
+ RefinedWeb is based on [CommonCrawl](https://commoncrawl.org/). Their crawler honors opt-out requests in the `robots.txt`, see the [CC FAQ](https://commoncrawl.org/big-picture/frequently-asked-questions/) for details.
173
+
174
+ To remove a document from RefinedWeb, please message falconllm@tii.ae.
175
+
176
+ ### Contact
177
+ falconllm@tii.ae
data/train-00000-of-05534-b8fc5348cbe605a5.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:450a61452d52f40beb95e5a6c804751d8739b737cb7af17466396f35de987b43
3
+ size 313806000
data/train-00001-of-05534-9bca3ce859516338.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c34b5492555ee602799b84c8b034d8de48d9e2b6ecae6a858cb094a57110e2de
3
+ size 301113599
data/train-00002-of-05534-01680948bd81de83.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebc37d1b56dd56b3045cb33cb4981222973be0c970f575fdb5854d6d79e08557
3
+ size 296473228
data/train-00003-of-05534-b7806bb8ca893c23.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acfdcbce6853684f27acf7c75709c13149a6675ee13d7b37ea925c721dcf983e
3
+ size 301936212
data/train-00004-of-05534-cbe1137f523084a7.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae8c18421382844730c3da25ffb9290b173f922a397c5633429a5f16394cb6d4
3
+ size 308239446
data/train-00005-of-05534-e4a5eae6c1419c9b.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a44569345fe16572e2e54ccc8d97061085ccc57979895d69ad47e54fe04c7339
3
+ size 305459417
data/train-00006-of-05534-5bc49be138fd315b.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e8d76c969c6d720135c812a91a07fea57670b3e1433b953999d9bf478511f6b
3
+ size 299980414
data/train-00007-of-05534-c185da59aece723d.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0271a457c0c1f47baf226191e88a8ba407078a3a010572eecd05979a07ed21d1
3
+ size 294929488
data/train-00008-of-05534-8ed029166c6795cf.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cb2e228362922f65b1dc9670df6aafdf48e2f15a3b3f19e24f007d2dcac6914
3
+ size 290907720
data/train-00009-of-05534-f0954d1648b16e88.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca6f8b027789a2ebd749be660295d4b9cf8bedd3d98d00027e1458edc2ae970d
3
+ size 300800582
data/train-00010-of-05534-5373970b3705bfc5.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:309dcbe5537cf7ad2771008e13d4d9a7589e44ed00db6ddad98206727c71d319
3
+ size 294289446
data/train-00011-of-05534-7d7970b7bb98922c.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a6fabfdecbd7e0f2f88bb605a5a3ad986bb8408c0f893aa520c287da7bacac7
3
+ size 303766398
data/train-00012-of-05534-acf3fb5fee2b7e50.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f2906a2bf00074a9fec61ea0bdb32f245c10a62df70e691c14176bd256819b6
3
+ size 320725771
data/train-00013-of-05534-a2a70ea6a8536f67.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91dfef4f189c461cbf17dc511ba6ed6548a55c6df175b71700191e4922ebfe70
3
+ size 294565944
data/train-00014-of-05534-2628ae1bc45a28c9.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbfd49a3d0e60ebfa3c89389342ec9b4037e8caa24dcf7234471d5507b69c0fd
3
+ size 301159279
data/train-00015-of-05534-0c056156dc81ffc2.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:022b12dc381c8b699db408dd6680cbc1f4eeaf1dd61f286b258465d74b45051d
3
+ size 306326423
data/train-00016-of-05534-0e0c9b82ed6ccecc.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1fa3113049c20c88e9889e4a7ce152c9af71a4157030fea42da1abcb12fcbae
3
+ size 303774604
data/train-00017-of-05534-81d837539550c007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48262fd490801052f86c386c4841cf5cad4a221377401a5cc253e2c087d5aa0e
3
+ size 324168890
data/train-00018-of-05534-6d374c09460d8143.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0559dc7ac90903a39d0e4a19eb7bd14dae1cdadb51b822ff3b3487993956dc70
3
+ size 320678826
data/train-00019-of-05534-724cfbde611d86f5.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0d6a4444fee63dc7c816ce87553df896b7f6aa771fcdc8fcef1f62e152f50de
3
+ size 310850995
data/train-00020-of-05534-442ffd4b989fb05d.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c89acae8b641bf646bb1816df45ca856b0f59a33afe0f1dc924dd26f7d0bd52f
3
+ size 299073286
data/train-00021-of-05534-8ada25673bef5f61.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62438c368c42a021ed3f16910457efa513ee44d9a0aa9772c8f1329877de7496
3
+ size 304733037
data/train-00022-of-05534-77181f7417ee27cb.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9de27ec71bed4dd4d8ceb515693832c211b7990743af5969d9b143c232c4f9c
3
+ size 302305176
data/train-00023-of-05534-68a7ffc65bb0793d.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5108137d4294eab8d53cd3de07ab40198ea32f229337b205e295c82cacc4fe1
3
+ size 308768017
data/train-00024-of-05534-fd6960561baf3ed9.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82e7ad536ced24c8113a285cbbbf62afad335ceb7818d62f7a8e7a4216ca8ed0
3
+ size 298887665
data/train-00025-of-05534-364c4d103a621e0f.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f0e5e5fede41448c25bdcc73d7472c2b50b178d52f818d5c237273610b294d
3
+ size 309236583
data/train-00026-of-05534-031de1cf0c1afcb0.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ab78ca0edfce4e08f510e58a7d4a1800ff292d92690c71acf2407a6a3bb2395
3
+ size 292882816
data/train-00027-of-05534-bddc08d9377fbb66.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:395f237eae362d46814acc8265d6cc3941155bad5a39058dc66f140ed61e4f37
3
+ size 301620665
data/train-00028-of-05534-09ffe38fe3bb6c59.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a91ff0d2bc507be12788605609b9b09540e09b1e16cfdce85abd5f2aaabfd439
3
+ size 288012963
data/train-00029-of-05534-fd33dccd28c51349.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32b22ab8dd19f91af7a19217c8391f878bff0f2a2cf3ac3f1251341a361cd2a0
3
+ size 293319857
data/train-00030-of-05534-bfbcdce619c0f298.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ae728f23034d897e21c8e7222a6fc5ca18b3744347bd3750f0bcf16a3d4175d
3
+ size 301761724
data/train-00031-of-05534-ee852b432a95e05a.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbbf823da20875a1df3ca17be7379392bf33d497ab4c8cbae35e32f03b94d5e6
3
+ size 310458170
data/train-00032-of-05534-50284d5c130b0be0.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74bc1a9857767ae2e2b0e63633c81c3a32c658459ec41c46732cd9876fa11ae3
3
+ size 309100947
data/train-00033-of-05534-1f8fe78125424cc4.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08aee6df4485964e96bd91c970710c377af30286586bbf1fdbbc718c2a00d1d0
3
+ size 284676442
data/train-00034-of-05534-06a9248bcdf8d5ee.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2ee470a83317690e81714504e32f130f7b03e28fea025499b8d1be25c098f6d
3
+ size 309617806
data/train-00035-of-05534-4af1fa8b5ec251d2.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:732b87b9e5b9047511c055a9a40678913a48730aae60efb80972beda12ad0630
3
+ size 300346065
data/train-00036-of-05534-75f410e082912165.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b9e4999b3dc03e52e8fc2edecc89ecacd88976a69df088d109a01e93df97510
3
+ size 314570278
data/train-00037-of-05534-378fea92f8c1f986.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcb5e4bfdb2c5931e1cdd07bef8a0dac580d0d393af2124c567f4a4270427777
3
+ size 308116800
data/train-00038-of-05534-422efc900cd120fe.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7af96450484a8049986223112f1074a0b8801482823f0e1ced68b55f2662beb6
3
+ size 321330215
data/train-00039-of-05534-9b84156534f2e079.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d1bbccc20e264c7b8b93ff205c08772b633482e478c45601b608c2fb8153e3a
3
+ size 302639643
data/train-00040-of-05534-22ecdb900499b2bf.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8720222773f1a74a970a4546c9dc328b697f8902a28712e1ba835e0552010f6c
3
+ size 305841347
data/train-00041-of-05534-4b0e16289c2c41a8.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60fd6f57fe309d3d13e324bd73137ce9fedc2f8e26cb3c4a6947ca6e131d54ea
3
+ size 302834076
data/train-00042-of-05534-6e461fee85cdb6c2.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e27b825880b35d9624641159121d38ae55c0de1600b74fb0ae90e25012efd6a
3
+ size 304578162
data/train-00043-of-05534-6711ba24927f3382.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebb76ceb8469a2ea265aab94207299816317dec6cf9e707cb9ac0f92621bc10b
3
+ size 307488703
data/train-00044-of-05534-5c643be3cd561476.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85133effcc916739a31c2c05d68b6d230c85d4715bffa5d724e35a2eb7f7c5c1
3
+ size 310349298
data/train-00045-of-05534-fb61b8a4dae74c99.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06b52cac8c99f3485ca3b4776f7303063be80cc213617a4a3654038dfae50524
3
+ size 301456068
data/train-00046-of-05534-bbd7d5cc9a0dbc0d.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e987c74b9e7efd9245be2dd94104e9d52b2b07146b8a76624eff410c0344c448
3
+ size 295084948
data/train-00047-of-05534-dbeffe2b7bddff3a.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45fd37a865498635924a2892a1182e86940f79e9169505c9cc5819e5be975cf2
3
+ size 293300532