Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
Qingyun commited on
Commit
dd8be43
1 Parent(s): 2fad0a7

Upload dataset

Browse files
CC-MAIN-2014-35/train-00000-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af1c428b5531251b6b1ce7524f794208b34994b22546cf3aff500d42537a99de
3
+ size 407616441
CC-MAIN-2014-35/train-00001-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7afbc92784ca01de96356ac38ce9a4b27d530416858a61c25c1ec5fe375b764
3
+ size 405774567
CC-MAIN-2014-35/train-00002-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4e1828e9dc4c28af3125f83fa9fec6f6a7c6697b11d342d1ea7f494346ed71e
3
+ size 406190527
CC-MAIN-2014-35/train-00003-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51369091fb065f4651c7ebd1b479a3634e59c11dfa846a93baff0fa10a87fad9
3
+ size 405574249
CC-MAIN-2014-35/train-00004-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d479e982c0ba6e86139347fbc81c1161f22488a04c5112d16ed15de54a07945
3
+ size 407816238
CC-MAIN-2014-35/train-00005-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3290c15ec58410bde9c441beba732de4583fbbce4aa3ea5d4136aca26bee2d8
3
+ size 408034066
CC-MAIN-2014-35/train-00006-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76d24200eaa7543c664f0cdc64f4b45bf5483667a4c564c1fda741bc573e8a17
3
+ size 408578525
README.md CHANGED
@@ -268,6 +268,58 @@ dataset_info:
268
  num_examples: 1455331
269
  download_size: 3468852905
270
  dataset_size: 7997621043
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  configs:
272
  - config_name: CC-MAIN-2013-20
273
  data_files:
@@ -289,6 +341,10 @@ configs:
289
  data_files:
290
  - split: train
291
  path: CC-MAIN-2014-23/train-*
 
 
 
 
292
  ---
293
 
294
  We are uploading the dataset files ~
 
268
  num_examples: 1455331
269
  download_size: 3468852905
270
  dataset_size: 7997621043
271
+ - config_name: CC-MAIN-2014-35
272
+ features:
273
+ - name: general_metadata
274
+ struct:
275
+ - name: domain
276
+ sequence: string
277
+ - name: fluency_prob
278
+ dtype: float64
279
+ - name: id
280
+ dtype: string
281
+ - name: non_advertisement_prob
282
+ dtype: float64
283
+ - name: politics_prob
284
+ dtype: float64
285
+ - name: porn_prob
286
+ dtype: float64
287
+ - name: toxic_prob
288
+ dtype: float64
289
+ - name: url
290
+ dtype: string
291
+ - name: images
292
+ sequence: string
293
+ - name: texts
294
+ sequence: string
295
+ - name: metadata
296
+ list:
297
+ - name: aesthetic_prob
298
+ dtype: float64
299
+ - name: bytes
300
+ dtype: int64
301
+ - name: d_hash
302
+ dtype: string
303
+ - name: d_hash_dup_count
304
+ dtype: int64
305
+ - name: height
306
+ dtype: int64
307
+ - name: img_url_sha
308
+ dtype: string
309
+ - name: p_hash
310
+ dtype: string
311
+ - name: p_hash_dup_count
312
+ dtype: int64
313
+ - name: unsafe_prob
314
+ dtype: float64
315
+ - name: width
316
+ dtype: int64
317
+ splits:
318
+ - name: train
319
+ num_bytes: 6228103779
320
+ num_examples: 1219200
321
+ download_size: 2849584613
322
+ dataset_size: 6228103779
323
  configs:
324
  - config_name: CC-MAIN-2013-20
325
  data_files:
 
341
  data_files:
342
  - split: train
343
  path: CC-MAIN-2014-23/train-*
344
+ - config_name: CC-MAIN-2014-35
345
+ data_files:
346
+ - split: train
347
+ path: CC-MAIN-2014-35/train-*
348
  ---
349
 
350
  We are uploading the dataset files ~