KaraKaraWitch commited on
Commit
b9f7d10
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +60 -0
  2. README.md +452 -0
  3. data/chunk_00/0xcert.jsonl +3 -0
  4. data/chunk_00/11mike10.jsonl +3 -0
  5. data/chunk_00/13ReasonsWhy.jsonl +3 -0
  6. data/chunk_00/196_butOnlyPorn.jsonl +3 -0
  7. data/chunk_00/1inch.jsonl +3 -0
  8. data/chunk_00/1stPersonAnimations.jsonl +3 -0
  9. data/chunk_00/209fuck.jsonl +3 -0
  10. data/chunk_00/20k.jsonl +3 -0
  11. data/chunk_00/20questions.jsonl +3 -0
  12. data/chunk_00/21savage.jsonl +3 -0
  13. data/chunk_00/21stCenturyQuotes.jsonl +3 -0
  14. data/chunk_00/2d20games.jsonl +3 -0
  15. data/chunk_00/2healthbars.jsonl +3 -0
  16. data/chunk_00/360Waves.jsonl +3 -0
  17. data/chunk_00/3dprintingdms.jsonl +3 -0
  18. data/chunk_00/3dshomebrew.jsonl +3 -0
  19. data/chunk_00/410freaks.jsonl +3 -0
  20. data/chunk_00/5050.jsonl +3 -0
  21. data/chunk_00/5thgen4runners.jsonl +3 -0
  22. data/chunk_00/806MensRoom.jsonl +3 -0
  23. data/chunk_00/8rack.jsonl +3 -0
  24. data/chunk_00/90DayFianceUK.jsonl +3 -0
  25. data/chunk_00/911Calls.jsonl +3 -0
  26. data/chunk_00/AAPL.jsonl +3 -0
  27. data/chunk_00/ABBA.jsonl +3 -0
  28. data/chunk_00/ACNHGardening.jsonl +3 -0
  29. data/chunk_00/AIK.jsonl +3 -0
  30. data/chunk_00/AI_Music.jsonl +3 -0
  31. data/chunk_00/ALevelChemistry.jsonl +3 -0
  32. data/chunk_00/AMCPlus.jsonl +3 -0
  33. data/chunk_00/AMERICANLEAN.jsonl +3 -0
  34. data/chunk_00/AMLCompliance.jsonl +3 -0
  35. data/chunk_00/ANVILVaultBreaker.jsonl +3 -0
  36. data/chunk_00/AO3.jsonl +3 -0
  37. data/chunk_00/API3.jsonl +3 -0
  38. data/chunk_00/AQuietPlace.jsonl +3 -0
  39. data/chunk_00/ASRT_stock.jsonl +3 -0
  40. data/chunk_00/AVINOC.jsonl +3 -0
  41. data/chunk_00/AZguns.jsonl +3 -0
  42. data/chunk_00/A_Cups.jsonl +3 -0
  43. data/chunk_00/AbandonedPorn.jsonl +3 -0
  44. data/chunk_00/AbbyBerner_fr.jsonl +3 -0
  45. data/chunk_00/Acadiana.jsonl +3 -0
  46. data/chunk_00/AcalaNetwork.jsonl +3 -0
  47. data/chunk_00/Actingclass.jsonl +3 -0
  48. data/chunk_00/ActualWomen.jsonl +3 -0
  49. data/chunk_00/AdamCarolla.jsonl +3 -0
  50. data/chunk_00/Adjuncts.jsonl +3 -0
.gitattributes ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
56
+ # Video files - compressed
57
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
58
+ *.webm filter=lfs diff=lfs merge=lfs -text
59
+ # Jsonl
60
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ size_categories:
3
+ - 100M<n<1B
4
+ configs:
5
+ - config_name: default
6
+ data_files:
7
+ - split: train
8
+ path: data/chunk_*/*.jsonl
9
+ pretty_name: OKReddit (Alpha)
10
+ task_categories:
11
+ - text-generation
12
+ - fill-mask
13
+ task_ids:
14
+ - language-modeling
15
+ - masked-language-modeling
16
+ source_datasets:
17
+ - original
18
+ language:
19
+ - en
20
+ ---
21
+
22
+ # OKReddit α (Alpha)
23
+
24
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/633e85093a17ab61de8d9073/iIT11kzCFgbKSc0E-p5S4.png)
25
+
26
+ # Dataset Summary
27
+
28
+ OKReddit is a filtered collection of **5TiB** of reddit submissions and comments from 2005 to 2023. This dataset has been prepared for research or archival purposes.
29
+
30
+ This dataset includes (obviously) a filtered list of subreddits.
31
+
32
+ - **Curated by:** KaraKaraWitch
33
+ - **Funded by:** Recursal.ai
34
+ - **Shared by:** KaraKaraWitch
35
+ - **Language(s) (NLP):** Mainly English. Other languages are available at smaller sizes.
36
+ - **License:** ~~`Scripts` folder are Apache 2.0~~ (Not available in alpha!). Refer to [Licensing Information](#licensing-information) for data license.
37
+
38
+ **NOTE**: While the dataset is currently usable, it's marked as alpha:
39
+
40
+ - There are some stray filters that I should really add but is not included in this release.
41
+ - The current data structure is quite poor for processing.
42
+ - Some [deleted] users text are empty.
43
+ - Some subreddits were not processed properly/fully due to python exceptions.
44
+
45
+ We are currently addressing these issues. (Re-running the fixed script) In the next release, these shall be fixed.
46
+
47
+ ### Dataset Sources
48
+
49
+ - **Source Data:** [Academic Torrents](https://academictorrents.com/details/9c263fc85366c1ef8f5bb9da0203f4c8c8db75f4) by (stuck_in_the_matrix, Watchful1, RaiderBDev & pushshift folks.)
50
+
51
+ ## Supported Tasks and Leaderboards
52
+
53
+ The dataset may be used for a variety of natural language processing (NLP) tasks including:
54
+
55
+ - Text Classification: Classifying comments and posts into categories based on sentiment, topic, or subreddit.
56
+
57
+ - Language Modeling: Training language models to understand and generate conversational text.
58
+
59
+ - Sentiment Analysis: Analyzing the sentiment of comments and posts across different subreddits and topics.
60
+
61
+ - Topic Modeling: Identifying and modeling topics discussed in the posts and comments.
62
+
63
+ ## Languages
64
+
65
+ The primary language of the dataset is English, as the majority of redditors are English educated. However, posts in other languages may also be present in smaller quanitites.
66
+
67
+ ## Dataset Structure
68
+
69
+ ### Data Instances
70
+
71
+ Each data instance repreasents a submission thread within a subreddit.
72
+
73
+ - `thread_id`: The submission thread ID. Inclusive of the `t3_` that reddit uses to mark an id as a thread. `https://reddit.com/r/<SUBREDDIT>/comments/<THREAD_ID>/`
74
+ - `subreddit`: The name of the subreddit. Case-insensitive. Reddit just redirects you to the correct-cased subreddit.
75
+ - `namedconversation`: A OpenAI "compatible" conversation:
76
+ - `from`: The author username that posted the content. **It is not `user`, `system`,`model`!**
77
+ - `content`: The reddit markdown posted.
78
+ - The first value of `namedconversation` is the submission. The rest are replies.
79
+ - If a submission is marked as NSFW / Mature, a `[R-18]` is appended to the front of the title.
80
+ - `submission` / `comments`: The raw submission and comments respectively.
81
+
82
+ Unsure or Confused? We have provided a real sample below.
83
+
84
+ ### Data Sample
85
+
86
+ <details>
87
+ <summary>Sample Thread</summary>
88
+ <pre>
89
+ <code class="language-json">
90
+ {
91
+ "thread_id": "t3_of7h2",
92
+ "subreddit": "Gaben",
93
+ "namedconversation": [
94
+ {
95
+ "from": "[deleted]",
96
+ "content": "[13 Jan 2012, 07:01:07] TIL Half-Life 2's source code was hacked because the hacker guessed Gabe's password, which was \"gaben\"\n\nLink: half-life.wikia.com"
97
+ },
98
+ {
99
+ "from": "clydethefrog",
100
+ "content": "[15 Jan 2012, 18:01:06] That's my password too"
101
+ },
102
+ {
103
+ "from": "Dunge",
104
+ "content": "[29 Feb 2012, 02:02:34] \"Gembe was led into believing that Valve wanted to employ him as an in-house security auditor. He was to be offered a flight to the USA and was to be arrested on arrival by the FBI.\"\n\nWow that's sad"
105
+ },
106
+ {
107
+ "from": "captainregularr",
108
+ "content": "[13 Jan 2012, 14:01:14] Did you know gaben makes me gaben my gaben?"
109
+ },
110
+ {
111
+ "from": "Turellio",
112
+ "content": "[13 Jan 2012, 17:01:53] that's what gaben gaben"
113
+ },
114
+ {
115
+ "from": "captainregularr",
116
+ "content": "[13 Jan 2012, 17:01:05] I gaben to gaben's demands."
117
+ },
118
+ {
119
+ "from": "RagingRetard",
120
+ "content": "[13 Jan 2012, 17:01:49] Oh, quit your incessant gaben."
121
+ }
122
+ ],
123
+ "submission": {
124
+ "sub": {
125
+ "name": "Gaben",
126
+ "id": "2scx1",
127
+ "subs": null,
128
+ "type": null
129
+ },
130
+ "author": null,
131
+ "title": "TIL Half-Life 2's source code was hacked because the hacker guessed Gabe's password, which was \"gaben\"",
132
+ "score": 23,
133
+ "created": 1326440407.0,
134
+ "id": "of7h2",
135
+ "flags": "",
136
+ "link_flair": null,
137
+ "url": "http://half-life.wikia.com/wiki/Half-Life_2_Beta#Source_code_leak",
138
+ "text": "",
139
+ "removed": [],
140
+ "cross": []
141
+ },
142
+ "comments": [
143
+ {
144
+ "sub": {
145
+ "name": "Gaben",
146
+ "id": "2scx1",
147
+ "subs": -1,
148
+ "type": ""
149
+ },
150
+ "author": {
151
+ "name": "clydethefrog",
152
+ "uid": "",
153
+ "create": -1,
154
+ "flair": null,
155
+ "patreon": false,
156
+ "premium": false
157
+ },
158
+ "text": "That's my password too",
159
+ "score": 1,
160
+ "created": "1326652326",
161
+ "id": "c3hge04",
162
+ "parent_id": "t3_of7h2",
163
+ "thread_id": "t3_of7h2",
164
+ "flags": "A",
165
+ "children": []
166
+ },
167
+ {
168
+ "sub": {
169
+ "name": "Gaben",
170
+ "id": "2scx1",
171
+ "subs": -1,
172
+ "type": ""
173
+ },
174
+ "author": {
175
+ "name": "Dunge",
176
+ "uid": "",
177
+ "create": -1,
178
+ "flair": null,
179
+ "patreon": false,
180
+ "premium": false
181
+ },
182
+ "text": "\"Gembe was led into believing that Valve wanted to employ him as an in-house security auditor. He was to be offered a flight to the USA and was to be arrested on arrival by the FBI.\"\n\nWow that's sad",
183
+ "score": 3,
184
+ "created": "1330483894",
185
+ "id": "c3w2ulz",
186
+ "parent_id": "t3_of7h2",
187
+ "thread_id": "t3_of7h2",
188
+ "flags": "A",
189
+ "children": []
190
+ },
191
+ {
192
+ "sub": {
193
+ "name": "Gaben",
194
+ "id": "2scx1",
195
+ "subs": -1,
196
+ "type": ""
197
+ },
198
+ "author": {
199
+ "name": "captainregularr",
200
+ "uid": "",
201
+ "create": -1,
202
+ "flair": null,
203
+ "patreon": false,
204
+ "premium": false
205
+ },
206
+ "text": "Did you know gaben makes me gaben my gaben?",
207
+ "score": 5,
208
+ "created": "1326463514",
209
+ "id": "c3gsfkx",
210
+ "parent_id": "t3_of7h2",
211
+ "thread_id": "t3_of7h2",
212
+ "flags": "A",
213
+ "children": [
214
+ {
215
+ "sub": {
216
+ "name": "Gaben",
217
+ "id": "2scx1",
218
+ "subs": -1,
219
+ "type": ""
220
+ },
221
+ "author": {
222
+ "name": "Turellio",
223
+ "uid": "",
224
+ "create": -1,
225
+ "flair": null,
226
+ "patreon": false,
227
+ "premium": false
228
+ },
229
+ "text": "that's what gaben gaben",
230
+ "score": 3,
231
+ "created": "1326476873",
232
+ "id": "c3guihp",
233
+ "parent_id": "t1_c3gsfkx",
234
+ "thread_id": "t3_of7h2",
235
+ "flags": "A",
236
+ "children": [
237
+ {
238
+ "sub": {
239
+ "name": "Gaben",
240
+ "id": "2scx1",
241
+ "subs": -1,
242
+ "type": ""
243
+ },
244
+ "author": {
245
+ "name": "captainregularr",
246
+ "uid": "",
247
+ "create": -1,
248
+ "flair": null,
249
+ "patreon": false,
250
+ "premium": false
251
+ },
252
+ "text": "I gaben to gaben's demands.",
253
+ "score": 5,
254
+ "created": "1326477005",
255
+ "id": "c3guje0",
256
+ "parent_id": "t1_c3guihp",
257
+ "thread_id": "t3_of7h2",
258
+ "flags": "AE",
259
+ "children": [
260
+ {
261
+ "sub": {
262
+ "name": "Gaben",
263
+ "id": "2scx1",
264
+ "subs": -1,
265
+ "type": ""
266
+ },
267
+ "author": {
268
+ "name": "RagingRetard",
269
+ "uid": "",
270
+ "create": -1,
271
+ "flair": null,
272
+ "patreon": false,
273
+ "premium": false
274
+ },
275
+ "text": "Oh, quit your incessant gaben.",
276
+ "score": 2,
277
+ "created": "1326477409",
278
+ "id": "c3gulzh",
279
+ "parent_id": "t1_c3guje0",
280
+ "thread_id": "t3_of7h2",
281
+ "flags": "A",
282
+ "children": []
283
+ }
284
+ ]
285
+ }
286
+ ]
287
+ }
288
+ ]
289
+ }
290
+ ]
291
+ }
292
+ </code>
293
+ </pre>
294
+ </details>
295
+
296
+ # Dataset Creation
297
+
298
+ ### Curation Rationale
299
+
300
+ Reddit has graced the world with it's unique design and way of comments (Extremely nested comment chains).
301
+ However, we have noted that it's possible to flatten comment chains into 1 long conversation without the comversation looking too strange or out of place.
302
+
303
+ Additionally since Reddit goes back to 2005, it has a lot of data that is waiting to be explored and used.
304
+ (Plus, recent Large Language Models have been using reddit for quite some time!)
305
+
306
+ After reviewing UpVoteWeb's curation practices, we have taken upon ourselves to develop a more open dataset.
307
+ Recognising variety is the spice of life, we only pruned subreddits that do not contain useful data based on 3 metrics:
308
+
309
+ 1. Engagement (How active a submission is to the amount of comments recieved. Total Comments / Total submissions)
310
+ 2. Richness (The amount of media submissions to all the submissions squared)
311
+ 3. Diversity (The sum of unique authors from comments and submissions over the number of unique authors from submissions)
312
+
313
+ In practice, it looks something like this:
314
+
315
+ ```py
316
+ # ...
317
+
318
+ engagement = comment_data["comments"] / submission_data["submissions"]
319
+ richness = (submission_data["media"] / submission_data["submissions"]) ** 2
320
+ diversity = (
321
+ comment_data["authors"] + submission_data["authors"]
322
+ ) / submission_data["submissions"]
323
+ ```
324
+
325
+ We additionally employ some baseline metrics such as minimum submission, submission authors, comment and comment authors.
326
+
327
+ In practice:
328
+
329
+ ```py
330
+ if (
331
+ stats_data["submission"]["authors"] < 70 # Total unique authors
332
+ or stats_data["comment"]["authors"] < 20 # Total unique commentors
333
+ or stats_data["submission"]["submissions"] < 450 # Total submissions count
334
+ or stats_data["comment"]["comments"] < 585 # Total comments count
335
+ ):
336
+ # Skip the subreddit
337
+ ```
338
+
339
+ With the baseline and these 3 metrics, we filter out a host of low quality subreddits. By this stage, we have successfully selected ~62K subreddits that are of good to high quality.
340
+
341
+ After filtering subreddits, we then filter submissions and comments by the following:
342
+
343
+ 1. We skip submission threads with less than 5 comments
344
+ 2. We prune comments that have less than -4 score. (Score from reddit defaults.)
345
+ 3. For submissions with more than 50 comments, we drop all comments that have a nested depth of 6. (Inspired from a RES filter)
346
+ 4. If a comment chain drops below 0, we prune the rest of the content.
347
+ 5. For child comments that have a parent from (2,3,4), they are additionally pruned.
348
+
349
+ For more infomation, refer to the scripts provided alongside this repo. Specifically `RedditScoring.py` for subreddit filtering and `RedditThreader.py` for per thread filtering.
350
+
351
+ ### Source Data
352
+
353
+ This dataset is a filtered collection of posts and comments from the beginning of reddit to up to end of 2023.
354
+
355
+ # Considerations for Using the Data
356
+
357
+ ### Social Impact of Dataset
358
+
359
+ With the release of this dataset, we aim to make this development resource available to the community at large.
360
+
361
+ ### Discussion of Biases
362
+
363
+ We've decided **not to censor out NSFW or toxic content.** This allows for better toxic analysis and a varied dataset.
364
+
365
+ # Additional Information
366
+
367
+ ## Recursal's Vision
368
+
369
+ > To make AI accessible to everyone, regardless of language, or economical status
370
+
371
+ This is the collective goal of the `RWKV Open Source foundation` and `Recursal AI`, the commercial entity who backs it.
372
+
373
+ We believe that AI should not be controlled by a select few individual organization. And that it should be made accessible regardless if you are rich or poor, or a native speaker of english.
374
+
375
+ ### About RWKV
376
+
377
+ RWKV is an Open Source, non profit group, under the linux foundation. Focused on developing the RWKV AI architecture, in accordence to our vision.
378
+
379
+ The RWKV architecture scales efficiently and economically. As an RNN & Transformer hybrid, it is able to provide the performance similar to leading transformer models, while having the compute and energy efficiency of an RNN based architecture.
380
+
381
+ You can find out more about the project, and latest models, at the following
382
+
383
+ - [https://blog.rwkv.com](https://blog.rwkv.com)
384
+ - [https://wiki.rwkv.com](https://wiki.rwkv.com)
385
+
386
+
387
+ ### About Recursal AI
388
+
389
+ Recursal AI, is the commercial entity built to provide support for RWKV model development and users, while providing commercial services via its public cloud, or private-cloud / on-premise offerings.
390
+
391
+ As part of our vision. Our commitment, is to ensure open source development and access to the best foundational AI models and datasets.
392
+
393
+ The following dataset/models provided here, is part of that commitment.
394
+
395
+ You can find out more about recursal AI here
396
+
397
+ - [https://recursal.ai](https://recursal.ai)
398
+ - [https://blog.recursal.ai](https://blog.recursal.ai)
399
+
400
+ ### Licensing Information
401
+
402
+ Since this dataset is derived from a public crawl of reddit, the original content may be subject to copyright and other licensing terms set by the original site owner and/or the content creators.
403
+ Additionally, this dataset is for research and archival purposes only.
404
+
405
+ ### Citation Information
406
+
407
+ If you use this dataset in your research or project, please cite it as follows:
408
+ ```TeX
409
+ @dataset{OKReddit,
410
+ title = {OKReddit},
411
+ year = {2024},
412
+ publisher = {KaraKaraWitch},
413
+ url = {<https://huggingface.co/datasets/KaraKaraWitch/OKReddit>}
414
+ }
415
+ ```
416
+
417
+ Additionally, pleace cite the following source bibtex as well.
418
+ ```TeX
419
+ @article{,
420
+ title= {Reddit comments/submissions 2005-06 to 2023-12},
421
+ journal= {},
422
+ author= {stuck_in_the_matrix, Watchful1, RaiderBDev},
423
+ year= {},
424
+ url= {},
425
+ abstract= {Reddit comments and submissions from 2005-06 to 2023-09 collected by pushshift and u/RaiderBDev.
426
+
427
+ These are zstandard compressed ndjson files. Example python scripts for parsing the data can be found here https://github.com/Watchful1/PushshiftDumps
428
+
429
+ The more recent dumps are collected by u/RaiderBDev and questions can be submitted here https://github.com/ArthurHeitmann/arctic_shift},
430
+ keywords= {reddit},
431
+ terms= {},
432
+ license= {},
433
+ superseded= {}
434
+ }
435
+ ```
436
+
437
+ ## ...
438
+
439
+ ```
440
+ Qngnfrg Mra
441
+ - XnenXnenJvgpu @ erphefny.nv FRCG 24
442
+
443
+ - Nalguvat, naq rirelguvat pna or pbyyngrq vagb qngnfrg.
444
+ - Gb orpbzr bar jvgu gur qngn, bar zhfg or jvyyvat gb bcra gurve zvaqf.
445
+ - Ab znggre ubj phefrq vg znl frra, gurer'f nyjnlf zber jbefr guvatf bhg gurer.
446
+ - NCV Yvzvgf, Cnljnyyf, Fhofpevcgvbaf naq bgure yvzvgngvbaf ner n "fhttrfgvba".
447
+ - Vs nyy ryfr snvyf, cebkvrf naq nppbhagf.
448
+ - Bar funyy arire cehar pbagrag jvgubhg eulzr be ernfba.
449
+ - Hayrff vg'f pyrneyl NV-Fybc. Lbh'er serr gb tb unz.
450
+ - Qngnfrgf ner Rireterra, arire qrpvqhbhf.
451
+ - Ohvyq gb fpnyr, arire fvatyr-guernqrq.
452
+ ```
data/chunk_00/0xcert.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62d27b05ca84cb70ac80111838cbf0bc8904833d5c5840e6e28d35ad2672dc06
3
+ size 590479
data/chunk_00/11mike10.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b50b4948a5ecf0873e9adf15120a637b9c5c70c1f7d9369019e0b0b2024ce0d
3
+ size 448073
data/chunk_00/13ReasonsWhy.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05bde9cd7681acc29f0fba1e117c021babb370c4a8459faca06c94b1551a9078
3
+ size 253039958
data/chunk_00/196_butOnlyPorn.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f2af9a0e89740d85aa65f218fe8db20bda60320d82c5f0f1de562f76af34461
3
+ size 419593
data/chunk_00/1inch.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ae46fe3cfffe49b6fb6a2520e3a81beebf8a27c583929738295702eeb3f159b
3
+ size 5092450
data/chunk_00/1stPersonAnimations.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:893bac6ff1e0adf2db4be0c81310ac733e71a0d75a85d5a394202947cb9ed8de
3
+ size 1863872
data/chunk_00/209fuck.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b07ce664035f27f97db90d6d17d3b955c012a409ce26f82b62b06ba52d384414
3
+ size 5240092
data/chunk_00/20k.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0aa73360b7e187366255d22ec4939d803091c4a9c937f231d0c48b59c4e43a0
3
+ size 1336988
data/chunk_00/20questions.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33dc8803b5a4ab2e3c068f8148819d46aa3c0ba65d5e9d96bc4063c1c4d89b86
3
+ size 12171275
data/chunk_00/21savage.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60d7e08ae58b786a85ef3aed2fd863e7004c94fecb5b851b2743cab3f7350dad
3
+ size 11651661
data/chunk_00/21stCenturyQuotes.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:186982548b8a6425f2a19fdc2ca98844ad6d3f99dca11d6520c404a7d4900aef
3
+ size 461148
data/chunk_00/2d20games.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25c83f9cd2ac5abc6ca89fc12b21732bd818eedd375d3a52bdbca2958a1cbbbd
3
+ size 1351134
data/chunk_00/2healthbars.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c5c60f0162e68b795ced9df61e7864d8259a129fbb1f9629dd8d90f3a348c5b
3
+ size 105151342
data/chunk_00/360Waves.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58c1aea15321087af2172eb99d2b7672c9cdd26de3e1e55bc57680202e3d27b2
3
+ size 49224739
data/chunk_00/3dprintingdms.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dd5c2abf75b5ecddd90c9e753dc9edc43c70b156257db31b70981ca0b8e840a
3
+ size 5622669
data/chunk_00/3dshomebrew.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e6603f8cd536be21310e62297763fac0649f5c9342cac96dac6fb9da18e2b0e
3
+ size 1924483
data/chunk_00/410freaks.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de06671934da2cc7a3d4f568efe1577aa38db7c1f53364a68d9f9d3ebfad209c
3
+ size 742923
data/chunk_00/5050.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab1b656ed09b43c962fdd52257f5be4b402ad2a09b2b3dcb8b7db34f5a0aa771
3
+ size 11118962
data/chunk_00/5thgen4runners.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44567236c2763a11a94689e72119b262ea111c13b33ed35c9cad6f620ac72fbb
3
+ size 2027395
data/chunk_00/806MensRoom.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58ab061125b1e2a38134b44be7b4c96664cb4240f281aca5e9def350758a096b
3
+ size 2221018
data/chunk_00/8rack.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92765b32aa06ba5d6a8ab6e65913e4b566a7c52e1a31145d616720f315478e8e
3
+ size 7742858
data/chunk_00/90DayFianceUK.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28e1cae6c5d2376445f5c3a29f395567abcfa073761ec2b8d415afd79abf9afd
3
+ size 29751473
data/chunk_00/911Calls.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2036db1ee926a7ad9d4472af757ff8056170bcbb5cc9b0354baf497cef3a9521
3
+ size 880439
data/chunk_00/AAPL.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45a9095f3b94137a177a46d733027e036d58a6f3f76a79386f0d6a54b13c8bd9
3
+ size 5353460
data/chunk_00/ABBA.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fde937de2cf16fbcc32ca45fc819b1909a8b99bf7e208d9b3f216a4d93994798
3
+ size 14468777
data/chunk_00/ACNHGardening.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a758aab483d533ba70c3b304d41582556063983ecfa68fadf6a14a0da10c0ace
3
+ size 26511393
data/chunk_00/AIK.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd85a14f116e5dede72f5538788318f7e6531d98e07776d8f8ee241e02dc4866
3
+ size 4866196
data/chunk_00/AI_Music.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dcb4bf1affb4071d976720ccd9c2286e397ccada0af5efaca49b28f02b596a7
3
+ size 778109
data/chunk_00/ALevelChemistry.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1320695a97b8de67aea18ee5274f6b77b4cbab2ff1194449b7c1626fc5474319
3
+ size 1828571
data/chunk_00/AMCPlus.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:961412d9242fffd7c9fd0d43eae3395bdc7f0d6880f6631c5b7efe00b22d2cc2
3
+ size 1437274
data/chunk_00/AMERICANLEAN.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b7a53265b13a5805cd62e69b4839dc6e91c4e04c215edf2a2ea967f16789f84
3
+ size 792823
data/chunk_00/AMLCompliance.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b02ecd87a5b6dcadcef09a7e30d34aefbb64db0291a4598f40759a1deb6e54f
3
+ size 4313321
data/chunk_00/ANVILVaultBreaker.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f40c554cfd5b490614a5d84827402b03eb5fb77a3c7d844a347538e8fa15affc
3
+ size 6469087
data/chunk_00/AO3.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9754a2a2106e8011b0f24a3d12eb247f96e4465a80dfde16aa90423923841555
3
+ size 471412057
data/chunk_00/API3.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea51b9742c354d1864e5cf777a742507189d5f79f54e722ae5989482770fddb2
3
+ size 832835
data/chunk_00/AQuietPlace.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b3756fe7f1acc650abf9215cc4abf5d1c4c5420aa6b4db246959d02ddae5f5b
3
+ size 1530628
data/chunk_00/ASRT_stock.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17207b7bca8ef7bd23a17e1ed7408aaa8b330b99f367460eb9ca9184b77a116b
3
+ size 5658675
data/chunk_00/AVINOC.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6082d94be3567140d2c59d2099150f156743e8426c4962590445eaba75f6425
3
+ size 818892
data/chunk_00/AZguns.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:647d6504bab173a8b110e2a2db1a44a9bad7fe62cab526644361d2526f745324
3
+ size 30876581
data/chunk_00/A_Cups.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d83e9d899e69521a4106365acde351e5ec44c7cb44f8e07bec52e38de6ef5e1
3
+ size 16670650
data/chunk_00/AbandonedPorn.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc10619aae56b986c01fc09640c7b9391b304fdd5ad7fd2e37feefd372e49dcd
3
+ size 53176430
data/chunk_00/AbbyBerner_fr.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c5608e4b1c8ae1cb289547c7543e324d4554c08c815566ce5ce2d1e02b96003
3
+ size 4602313
data/chunk_00/Acadiana.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc96fcc93c6bd7b5073bf82807e76f23648e5520cd8256e265a98f07e4beb1c1
3
+ size 182639422
data/chunk_00/AcalaNetwork.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a74c0314059df00d4b1f66622329cbdecf3837d49f17574b33cfce5628607a7
3
+ size 8960970
data/chunk_00/Actingclass.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bef4c92f5e54c48be75c72680fe26a61faaa1b258c5ed9557c5e10f995049ada
3
+ size 40849293
data/chunk_00/ActualWomen.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd80730a137e7204b38402748b034d6711c48dc09e2b8daf150122bc83bd3077
3
+ size 10940416
data/chunk_00/AdamCarolla.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:849bc8a082a20fbcae2e34baf6fa7d03fd87546bf395d977208975f5dd1547d3
3
+ size 336712349
data/chunk_00/Adjuncts.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86477bc04cc92097db85c4e6acc0bac3af7878c2251894e0a0be0ad925a335d4
3
+ size 10784470