lysandre HF staff commited on
Commit
ecadae4
1 Parent(s): f5c0357

Upload dataset

Browse files
README.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: dates
5
+ dtype: string
6
+ - name: type
7
+ struct:
8
+ - name: authorAssociation
9
+ dtype: string
10
+ - name: comment
11
+ dtype: bool
12
+ - name: issue
13
+ dtype: bool
14
+ splits:
15
+ - name: transformers
16
+ num_bytes: 5088186
17
+ num_examples: 143279
18
+ - name: peft
19
+ num_bytes: 284914
20
+ num_examples: 8301
21
+ - name: evaluate
22
+ num_bytes: 67751
23
+ num_examples: 1935
24
+ - name: huggingface_hub
25
+ num_bytes: 326157
26
+ num_examples: 9336
27
+ - name: accelerate
28
+ num_bytes: 414534
29
+ num_examples: 11854
30
+ - name: datasets
31
+ num_bytes: 863386
32
+ num_examples: 24326
33
+ - name: optimum
34
+ num_bytes: 212650
35
+ num_examples: 6130
36
+ - name: pytorch_image_models
37
+ num_bytes: 150547
38
+ num_examples: 4363
39
+ - name: gradio
40
+ num_bytes: 1270190
41
+ num_examples: 34996
42
+ - name: tokenizers
43
+ num_bytes: 208631
44
+ num_examples: 6100
45
+ - name: diffusers
46
+ num_bytes: 1574656
47
+ num_examples: 43907
48
+ - name: safetensors
49
+ num_bytes: 51932
50
+ num_examples: 1506
51
+ - name: sentence_transformers
52
+ num_bytes: 325557
53
+ num_examples: 9524
54
+ - name: candle
55
+ num_bytes: 203917
56
+ num_examples: 5365
57
+ - name: text_generation_inference
58
+ num_bytes: 241371
59
+ num_examples: 7120
60
+ - name: chat_ui
61
+ num_bytes: 109488
62
+ num_examples: 3143
63
+ - name: hub_docs
64
+ num_bytes: 143368
65
+ num_examples: 4073
66
+ download_size: 3583931
67
+ dataset_size: 11537235
68
+ configs:
69
+ - config_name: default
70
+ data_files:
71
+ - split: transformers
72
+ path: data/transformers-*
73
+ - split: peft
74
+ path: data/peft-*
75
+ - split: evaluate
76
+ path: data/evaluate-*
77
+ - split: huggingface_hub
78
+ path: data/huggingface_hub-*
79
+ - split: accelerate
80
+ path: data/accelerate-*
81
+ - split: datasets
82
+ path: data/datasets-*
83
+ - split: optimum
84
+ path: data/optimum-*
85
+ - split: pytorch_image_models
86
+ path: data/pytorch_image_models-*
87
+ - split: gradio
88
+ path: data/gradio-*
89
+ - split: tokenizers
90
+ path: data/tokenizers-*
91
+ - split: diffusers
92
+ path: data/diffusers-*
93
+ - split: safetensors
94
+ path: data/safetensors-*
95
+ - split: sentence_transformers
96
+ path: data/sentence_transformers-*
97
+ - split: candle
98
+ path: data/candle-*
99
+ - split: text_generation_inference
100
+ path: data/text_generation_inference-*
101
+ - split: chat_ui
102
+ path: data/chat_ui-*
103
+ - split: hub_docs
104
+ path: data/hub_docs-*
105
+ ---
data/accelerate-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e003bd33e218b3a31da9b61803d06934d225cf98a92f485b79fc8f1dd2818126
3
+ size 136262
data/candle-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ece1e228c0499c8e3183fa7cbdb7e2b8e2fdc65a2d4c14245195d3b80b7b033
3
+ size 64129
data/chat_ui-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ffbb20db29bfdbfbb03385258615ba502d39358379267745462d1c8efc6be90
3
+ size 38859
data/datasets-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a64dbf3d7fdc283209b0b6ac6b01855284abbf61bd8f2b279e82cdf2f69a7ab
3
+ size 277860
data/diffusers-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3afd339043b248f0b8de6487201b550feab5489ab4641034abca8bd73fd7f33a
3
+ size 464629
data/evaluate-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9d936eb4103478346a994565e581c9172b3fdb68721bd98ba9819601a82323a
3
+ size 24775
data/gradio-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c45c8fbe8b5f0594a419d764f4f380852f52cc77b7e065f7c54894104955982
3
+ size 379721
data/hub_docs-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cce38d2a1c33131ff9cf33649f0687e13fee703d9f208a69286629cb8f503ce5
3
+ size 50075
data/huggingface_hub-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cddb5af8f1efab7501e159a6a69689f93088627c4087872749d16d36264413b
3
+ size 109258
data/optimum-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8240301ad5b02e04e0a141547dfd7e684da7dd1f7022998a2292afa39347cb51
3
+ size 74125
data/peft-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aad31b56fc421e386b66427d23314cd3685324195809917beaa7fe6834d5a32a
3
+ size 94436
data/pytorch_image_models-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02c35e308890186009479b079d88277031c354afc243c9ebafde7b24a8ebda72
3
+ size 56224
data/safetensors-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b86675357f503cd37556f762f6566b7634e1d6390344672d23b21df652c7698
3
+ size 19942
data/sentence_transformers-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fad2b673eb054857a75af0a3e6f4a8360e2bb8e57cc3859c6a7546f1e323b4ff
3
+ size 115044
data/text_generation_inference-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb0580bdeb33bf65cdab8c6453cd46d8d643c449e544f51ca1654229ca4526f5
3
+ size 82335
data/tokenizers-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f66016ac114215c6ff12f83fff06a4b91afea829c29303a958e9b01384677a8b
3
+ size 75848
data/transformers-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca122cced42e13982b53c874e77cbb0283f5301675b0135646e25ef25aa7c75f
3
+ size 1520409
dataset_infos.json CHANGED
@@ -28,14 +28,14 @@
28
  "splits": {
29
  "transformers": {
30
  "name": "transformers",
31
- "num_bytes": 5086147,
32
- "num_examples": 143220,
33
  "dataset_name": null
34
  },
35
  "peft": {
36
  "name": "peft",
37
- "num_bytes": 284778,
38
- "num_examples": 8297,
39
  "dataset_name": null
40
  },
41
  "evaluate": {
@@ -46,20 +46,20 @@
46
  },
47
  "huggingface_hub": {
48
  "name": "huggingface_hub",
49
- "num_bytes": 325913,
50
- "num_examples": 9329,
51
  "dataset_name": null
52
  },
53
  "accelerate": {
54
  "name": "accelerate",
55
- "num_bytes": 414323,
56
- "num_examples": 11849,
57
  "dataset_name": null
58
  },
59
  "datasets": {
60
  "name": "datasets",
61
- "num_bytes": 863318,
62
- "num_examples": 24324,
63
  "dataset_name": null
64
  },
65
  "optimum": {
@@ -76,20 +76,20 @@
76
  },
77
  "gradio": {
78
  "name": "gradio",
79
- "num_bytes": 1269944,
80
- "num_examples": 34989,
81
  "dataset_name": null
82
  },
83
  "tokenizers": {
84
  "name": "tokenizers",
85
- "num_bytes": 208563,
86
- "num_examples": 6098,
87
  "dataset_name": null
88
  },
89
  "diffusers": {
90
  "name": "diffusers",
91
- "num_bytes": 1574248,
92
- "num_examples": 43895,
93
  "dataset_name": null
94
  },
95
  "safetensors": {
@@ -100,26 +100,26 @@
100
  },
101
  "sentence_transformers": {
102
  "name": "sentence_transformers",
103
- "num_bytes": 325153,
104
- "num_examples": 9513,
105
  "dataset_name": null
106
  },
107
  "candle": {
108
  "name": "candle",
109
- "num_bytes": 203878,
110
- "num_examples": 5364,
111
  "dataset_name": null
112
  },
113
  "text_generation_inference": {
114
  "name": "text_generation_inference",
115
- "num_bytes": 241200,
116
- "num_examples": 7115,
117
  "dataset_name": null
118
  },
119
  "chat_ui": {
120
  "name": "chat_ui",
121
- "num_bytes": 109454,
122
- "num_examples": 3142,
123
  "dataset_name": null
124
  },
125
  "hub_docs": {
@@ -129,8 +129,8 @@
129
  "dataset_name": null
130
  }
131
  },
132
- "download_size": 3582852,
133
- "dataset_size": 11533167,
134
- "size_in_bytes": 15116019
135
  }
136
  }
 
28
  "splits": {
29
  "transformers": {
30
  "name": "transformers",
31
+ "num_bytes": 5088186,
32
+ "num_examples": 143279,
33
  "dataset_name": null
34
  },
35
  "peft": {
36
  "name": "peft",
37
+ "num_bytes": 284914,
38
+ "num_examples": 8301,
39
  "dataset_name": null
40
  },
41
  "evaluate": {
 
46
  },
47
  "huggingface_hub": {
48
  "name": "huggingface_hub",
49
+ "num_bytes": 326157,
50
+ "num_examples": 9336,
51
  "dataset_name": null
52
  },
53
  "accelerate": {
54
  "name": "accelerate",
55
+ "num_bytes": 414534,
56
+ "num_examples": 11854,
57
  "dataset_name": null
58
  },
59
  "datasets": {
60
  "name": "datasets",
61
+ "num_bytes": 863386,
62
+ "num_examples": 24326,
63
  "dataset_name": null
64
  },
65
  "optimum": {
 
76
  },
77
  "gradio": {
78
  "name": "gradio",
79
+ "num_bytes": 1270190,
80
+ "num_examples": 34996,
81
  "dataset_name": null
82
  },
83
  "tokenizers": {
84
  "name": "tokenizers",
85
+ "num_bytes": 208631,
86
+ "num_examples": 6100,
87
  "dataset_name": null
88
  },
89
  "diffusers": {
90
  "name": "diffusers",
91
+ "num_bytes": 1574656,
92
+ "num_examples": 43907,
93
  "dataset_name": null
94
  },
95
  "safetensors": {
 
100
  },
101
  "sentence_transformers": {
102
  "name": "sentence_transformers",
103
+ "num_bytes": 325557,
104
+ "num_examples": 9524,
105
  "dataset_name": null
106
  },
107
  "candle": {
108
  "name": "candle",
109
+ "num_bytes": 203917,
110
+ "num_examples": 5365,
111
  "dataset_name": null
112
  },
113
  "text_generation_inference": {
114
  "name": "text_generation_inference",
115
+ "num_bytes": 241371,
116
+ "num_examples": 7120,
117
  "dataset_name": null
118
  },
119
  "chat_ui": {
120
  "name": "chat_ui",
121
+ "num_bytes": 109488,
122
+ "num_examples": 3143,
123
  "dataset_name": null
124
  },
125
  "hub_docs": {
 
129
  "dataset_name": null
130
  }
131
  },
132
+ "download_size": 3583931,
133
+ "dataset_size": 11537235,
134
+ "size_in_bytes": 15121166
135
  }
136
  }