sharpenb commited on
Commit
bcd6d7c
1 Parent(s): 44a9bb8

Upload folder using huggingface_hub (#6)

Browse files

- 6034f36849f9adbc0f206e478ad13022e246443202df6955cc1d824a482c4883 (fe5002a478bd2db79e4a08bc0b495dd22c31a66d)

Files changed (4) hide show
  1. README.md +5 -5
  2. config.json +26 -26
  3. model.safetensors +1 -1
  4. smash_config.json +1 -1
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  thumbnail: "https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"
3
- base_model: ORIGINAL_REPO_NAME
4
  metrics:
5
  - memory_disk
6
  - memory_inference
@@ -52,7 +52,7 @@ tags:
52
 
53
  You can run the smashed model with these steps:
54
 
55
- 0. Check requirements from the original repo ORIGINAL_REPO_NAME installed. In particular, check python, cuda, and transformers versions.
56
  1. Make sure that you have installed quantization related packages.
57
  ```bash
58
  pip install transformers accelerate bitsandbytes>0.37.0
@@ -63,7 +63,7 @@ You can run the smashed model with these steps:
63
 
64
 
65
  model = AutoModelForCausalLM.from_pretrained("PrunaAI/distributed-optimized-gpt2-1b-bnb-8bit-smashed", trust_remote_code=True, device_map='auto')
66
- tokenizer = AutoTokenizer.from_pretrained("ORIGINAL_REPO_NAME")
67
 
68
  input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
69
 
@@ -77,9 +77,9 @@ The configuration info are in `smash_config.json`.
77
 
78
  ## Credits & License
79
 
80
- The license of the smashed model follows the license of the original model. Please check the license of the original model ORIGINAL_REPO_NAME before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
81
 
82
  ## Want to compress other models?
83
 
84
  - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
85
- - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
 
1
  ---
2
  thumbnail: "https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg"
3
+ base_model: distributed/optimized-gpt2-1b
4
  metrics:
5
  - memory_disk
6
  - memory_inference
 
52
 
53
  You can run the smashed model with these steps:
54
 
55
+ 0. Check requirements from the original repo distributed/optimized-gpt2-1b installed. In particular, check python, cuda, and transformers versions.
56
  1. Make sure that you have installed quantization related packages.
57
  ```bash
58
  pip install transformers accelerate bitsandbytes>0.37.0
 
63
 
64
 
65
  model = AutoModelForCausalLM.from_pretrained("PrunaAI/distributed-optimized-gpt2-1b-bnb-8bit-smashed", trust_remote_code=True, device_map='auto')
66
+ tokenizer = AutoTokenizer.from_pretrained("distributed/optimized-gpt2-1b")
67
 
68
  input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
69
 
 
77
 
78
  ## Credits & License
79
 
80
+ The license of the smashed model follows the license of the original model. Please check the license of the original model distributed/optimized-gpt2-1b before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi.
81
 
82
  ## Want to compress other models?
83
 
84
  - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact).
85
+ - Do it by yourself [here](https://docs.pruna.ai/en/latest/setup/pip.html).
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpkxfo3h1v7mopnxe4",
3
  "activation_function": "gelu_new",
4
  "all_reduce_scores": {
5
  "0": "NON_PARTICIPATING",
@@ -26,7 +26,7 @@
26
  "117": "NON_PARTICIPATING",
27
  "118": "NON_PARTICIPATING",
28
  "119": "NON_PARTICIPATING",
29
- "12": "SUCCESS",
30
  "120": "NON_PARTICIPATING",
31
  "121": "NON_PARTICIPATING",
32
  "122": "NON_PARTICIPATING",
@@ -36,7 +36,7 @@
36
  "126": "NON_PARTICIPATING",
37
  "127": "NON_PARTICIPATING",
38
  "128": "NON_PARTICIPATING",
39
- "129": "SUCCESS",
40
  "13": "NON_PARTICIPATING",
41
  "130": "NON_PARTICIPATING",
42
  "131": "NON_PARTICIPATING",
@@ -49,9 +49,9 @@
49
  "138": "NON_PARTICIPATING",
50
  "139": "NON_PARTICIPATING",
51
  "14": "NON_PARTICIPATING",
52
- "140": "SUCCESS",
53
- "141": "NON_PARTICIPATING",
54
- "142": "SUCCESS",
55
  "143": "SUCCESS",
56
  "144": "NON_PARTICIPATING",
57
  "145": "NON_PARTICIPATING",
@@ -61,15 +61,15 @@
61
  "149": "NON_PARTICIPATING",
62
  "15": "NON_PARTICIPATING",
63
  "150": "NON_PARTICIPATING",
64
- "151": "SUCCESS",
65
  "152": "NON_PARTICIPATING",
66
  "153": "NON_PARTICIPATING",
67
  "154": "NON_PARTICIPATING",
68
  "155": "NON_PARTICIPATING",
69
- "156": "NON_PARTICIPATING",
70
  "157": "NON_PARTICIPATING",
71
  "158": "NON_PARTICIPATING",
72
- "159": "SUCCESS",
73
  "16": "NON_PARTICIPATING",
74
  "160": "NON_PARTICIPATING",
75
  "161": "NON_PARTICIPATING",
@@ -80,7 +80,7 @@
80
  "166": "NON_PARTICIPATING",
81
  "167": "NON_PARTICIPATING",
82
  "168": "NON_PARTICIPATING",
83
- "169": "NON_PARTICIPATING",
84
  "17": "NON_PARTICIPATING",
85
  "170": "NON_PARTICIPATING",
86
  "171": "NON_PARTICIPATING",
@@ -90,8 +90,8 @@
90
  "175": "NON_PARTICIPATING",
91
  "176": "NON_PARTICIPATING",
92
  "177": "NON_PARTICIPATING",
93
- "178": "SUCCESS",
94
- "179": "NON_PARTICIPATING",
95
  "18": "NON_PARTICIPATING",
96
  "180": "NON_PARTICIPATING",
97
  "181": "NON_PARTICIPATING",
@@ -114,7 +114,7 @@
114
  "197": "NON_PARTICIPATING",
115
  "198": "NON_PARTICIPATING",
116
  "199": "NON_PARTICIPATING",
117
- "2": "NON_PARTICIPATING",
118
  "20": "NON_PARTICIPATING",
119
  "200": "NON_PARTICIPATING",
120
  "201": "NON_PARTICIPATING",
@@ -130,7 +130,7 @@
130
  "210": "NON_PARTICIPATING",
131
  "211": "NON_PARTICIPATING",
132
  "212": "NON_PARTICIPATING",
133
- "213": "NON_PARTICIPATING",
134
  "214": "NON_PARTICIPATING",
135
  "215": "NON_PARTICIPATING",
136
  "216": "SUCCESS",
@@ -179,7 +179,7 @@
179
  "255": "NON_PARTICIPATING",
180
  "26": "NON_PARTICIPATING",
181
  "27": "NON_PARTICIPATING",
182
- "28": "NON_PARTICIPATING",
183
  "29": "NON_PARTICIPATING",
184
  "3": "NON_PARTICIPATING",
185
  "30": "NON_PARTICIPATING",
@@ -206,7 +206,7 @@
206
  "5": "NON_PARTICIPATING",
207
  "50": "NON_PARTICIPATING",
208
  "51": "NON_PARTICIPATING",
209
- "52": "NON_PARTICIPATING",
210
  "53": "NON_PARTICIPATING",
211
  "54": "NON_PARTICIPATING",
212
  "55": "NON_PARTICIPATING",
@@ -222,41 +222,41 @@
222
  "64": "NON_PARTICIPATING",
223
  "65": "NON_PARTICIPATING",
224
  "66": "NON_PARTICIPATING",
225
- "67": "SUCCESS",
226
  "68": "NON_PARTICIPATING",
227
  "69": "NON_PARTICIPATING",
228
  "7": "SUCCESS",
229
  "70": "NON_PARTICIPATING",
230
  "71": "NON_PARTICIPATING",
231
  "72": "NON_PARTICIPATING",
232
- "73": "SUCCESS",
233
- "74": "NON_PARTICIPATING",
234
- "75": "SUCCESS",
235
  "76": "NON_PARTICIPATING",
236
  "77": "NON_PARTICIPATING",
237
- "78": "SUCCESS",
238
  "79": "NON_PARTICIPATING",
239
  "8": "NON_PARTICIPATING",
240
  "80": "NON_PARTICIPATING",
241
  "81": "NON_PARTICIPATING",
242
  "82": "NON_PARTICIPATING",
243
  "83": "NON_PARTICIPATING",
244
- "84": "SUCCESS",
245
  "85": "NON_PARTICIPATING",
246
- "86": "SUCCESS",
247
  "87": "NON_PARTICIPATING",
248
- "88": "NON_PARTICIPATING",
249
  "89": "NON_PARTICIPATING",
250
  "9": "NON_PARTICIPATING",
251
  "90": "NON_PARTICIPATING",
252
  "91": "NON_PARTICIPATING",
253
  "92": "NON_PARTICIPATING",
254
  "93": "NON_PARTICIPATING",
255
- "94": "SUCCESS",
256
  "95": "SUCCESS",
257
  "96": "NON_PARTICIPATING",
258
  "97": "NON_PARTICIPATING",
259
- "98": "SUCCESS",
260
  "99": "NON_PARTICIPATING"
261
  },
262
  "architectures": [
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmpg_8jo7a30wplyaho",
3
  "activation_function": "gelu_new",
4
  "all_reduce_scores": {
5
  "0": "NON_PARTICIPATING",
 
26
  "117": "NON_PARTICIPATING",
27
  "118": "NON_PARTICIPATING",
28
  "119": "NON_PARTICIPATING",
29
+ "12": "NON_PARTICIPATING",
30
  "120": "NON_PARTICIPATING",
31
  "121": "NON_PARTICIPATING",
32
  "122": "NON_PARTICIPATING",
 
36
  "126": "NON_PARTICIPATING",
37
  "127": "NON_PARTICIPATING",
38
  "128": "NON_PARTICIPATING",
39
+ "129": "NON_PARTICIPATING",
40
  "13": "NON_PARTICIPATING",
41
  "130": "NON_PARTICIPATING",
42
  "131": "NON_PARTICIPATING",
 
49
  "138": "NON_PARTICIPATING",
50
  "139": "NON_PARTICIPATING",
51
  "14": "NON_PARTICIPATING",
52
+ "140": "NON_PARTICIPATING",
53
+ "141": "SUCCESS",
54
+ "142": "NON_PARTICIPATING",
55
  "143": "SUCCESS",
56
  "144": "NON_PARTICIPATING",
57
  "145": "NON_PARTICIPATING",
 
61
  "149": "NON_PARTICIPATING",
62
  "15": "NON_PARTICIPATING",
63
  "150": "NON_PARTICIPATING",
64
+ "151": "NON_PARTICIPATING",
65
  "152": "NON_PARTICIPATING",
66
  "153": "NON_PARTICIPATING",
67
  "154": "NON_PARTICIPATING",
68
  "155": "NON_PARTICIPATING",
69
+ "156": "SUCCESS",
70
  "157": "NON_PARTICIPATING",
71
  "158": "NON_PARTICIPATING",
72
+ "159": "NON_PARTICIPATING",
73
  "16": "NON_PARTICIPATING",
74
  "160": "NON_PARTICIPATING",
75
  "161": "NON_PARTICIPATING",
 
80
  "166": "NON_PARTICIPATING",
81
  "167": "NON_PARTICIPATING",
82
  "168": "NON_PARTICIPATING",
83
+ "169": "SUCCESS",
84
  "17": "NON_PARTICIPATING",
85
  "170": "NON_PARTICIPATING",
86
  "171": "NON_PARTICIPATING",
 
90
  "175": "NON_PARTICIPATING",
91
  "176": "NON_PARTICIPATING",
92
  "177": "NON_PARTICIPATING",
93
+ "178": "NON_PARTICIPATING",
94
+ "179": "SUCCESS",
95
  "18": "NON_PARTICIPATING",
96
  "180": "NON_PARTICIPATING",
97
  "181": "NON_PARTICIPATING",
 
114
  "197": "NON_PARTICIPATING",
115
  "198": "NON_PARTICIPATING",
116
  "199": "NON_PARTICIPATING",
117
+ "2": "SUCCESS",
118
  "20": "NON_PARTICIPATING",
119
  "200": "NON_PARTICIPATING",
120
  "201": "NON_PARTICIPATING",
 
130
  "210": "NON_PARTICIPATING",
131
  "211": "NON_PARTICIPATING",
132
  "212": "NON_PARTICIPATING",
133
+ "213": "SUCCESS",
134
  "214": "NON_PARTICIPATING",
135
  "215": "NON_PARTICIPATING",
136
  "216": "SUCCESS",
 
179
  "255": "NON_PARTICIPATING",
180
  "26": "NON_PARTICIPATING",
181
  "27": "NON_PARTICIPATING",
182
+ "28": "SUCCESS",
183
  "29": "NON_PARTICIPATING",
184
  "3": "NON_PARTICIPATING",
185
  "30": "NON_PARTICIPATING",
 
206
  "5": "NON_PARTICIPATING",
207
  "50": "NON_PARTICIPATING",
208
  "51": "NON_PARTICIPATING",
209
+ "52": "SUCCESS",
210
  "53": "NON_PARTICIPATING",
211
  "54": "NON_PARTICIPATING",
212
  "55": "NON_PARTICIPATING",
 
222
  "64": "NON_PARTICIPATING",
223
  "65": "NON_PARTICIPATING",
224
  "66": "NON_PARTICIPATING",
225
+ "67": "NON_PARTICIPATING",
226
  "68": "NON_PARTICIPATING",
227
  "69": "NON_PARTICIPATING",
228
  "7": "SUCCESS",
229
  "70": "NON_PARTICIPATING",
230
  "71": "NON_PARTICIPATING",
231
  "72": "NON_PARTICIPATING",
232
+ "73": "NON_PARTICIPATING",
233
+ "74": "SUCCESS",
234
+ "75": "NON_PARTICIPATING",
235
  "76": "NON_PARTICIPATING",
236
  "77": "NON_PARTICIPATING",
237
+ "78": "NON_PARTICIPATING",
238
  "79": "NON_PARTICIPATING",
239
  "8": "NON_PARTICIPATING",
240
  "80": "NON_PARTICIPATING",
241
  "81": "NON_PARTICIPATING",
242
  "82": "NON_PARTICIPATING",
243
  "83": "NON_PARTICIPATING",
244
+ "84": "NON_PARTICIPATING",
245
  "85": "NON_PARTICIPATING",
246
+ "86": "NON_PARTICIPATING",
247
  "87": "NON_PARTICIPATING",
248
+ "88": "SUCCESS",
249
  "89": "NON_PARTICIPATING",
250
  "9": "NON_PARTICIPATING",
251
  "90": "NON_PARTICIPATING",
252
  "91": "NON_PARTICIPATING",
253
  "92": "NON_PARTICIPATING",
254
  "93": "NON_PARTICIPATING",
255
+ "94": "NON_PARTICIPATING",
256
  "95": "SUCCESS",
257
  "96": "NON_PARTICIPATING",
258
  "97": "NON_PARTICIPATING",
259
+ "98": "NON_PARTICIPATING",
260
  "99": "NON_PARTICIPATING"
261
  },
262
  "architectures": [
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0236a2cb311792dbd6afc14acb974ea097a423a771a763387ad90c1ca5569562
3
  size 1207575528
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:129bac3621dfd112f94500629cc35d3df6c1e4c645ac42707f058a2e8d7f789d
3
  size 1207575528
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpkxfo3h1v",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmpg_8jo7a3",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}