sharpenb commited on
Commit
68220a7
1 Parent(s): bffd7a6

Upload folder using huggingface_hub (#2)

Browse files

- 496ea46e041fc7d353b3ff37808a913445f5cb1bcc6a400ec7b5f44e173d1dc2 (6fcde3866a07ebbb15e62d04045aa93f0fcd035c)
- 17a3b7ac4467af3ace2cba5f78e634773978e56c358e00cadabc16e6364dcd73 (257abbb9a40316f1e95d74b5f4c770167a98650d)

Files changed (5) hide show
  1. README.md +2 -2
  2. config.json +1 -1
  3. plots.png +0 -0
  4. results.json +24 -24
  5. smash_config.json +5 -5
README.md CHANGED
@@ -60,9 +60,9 @@ You can run the smashed model with these steps:
60
  2. Load & run the model.
61
  ```python
62
  from transformers import AutoModelForCausalLM, AutoTokenizer
63
-
64
 
65
- model = AutoModelForCausalLM.from_pretrained("PrunaAI/meta-llama-Meta-Llama-3-8B-AWQ-4bit-smashed", trust_remote_code=True, device_map='auto')
66
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
67
 
68
  input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
 
60
  2. Load & run the model.
61
  ```python
62
  from transformers import AutoModelForCausalLM, AutoTokenizer
63
+ from awq import AutoAWQForCausalLM
64
 
65
+ model = AutoAWQForCausalLM.from_quantized("PrunaAI/meta-llama-Meta-Llama-3-8B-AWQ-4bit-smashed", trust_remote_code=True, device_map='auto')
66
  tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
67
 
68
  input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/tmp/tmpdz8e_z82",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/tmp/tmpbck3xy20",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
plots.png CHANGED
results.json CHANGED
@@ -1,30 +1,30 @@
1
  {
2
  "base_current_gpu_type": "NVIDIA A100-PCIE-40GB",
3
  "base_current_gpu_total_memory": 40339.3125,
4
- "base_token_generation_latency_sync": 85.63984069824218,
5
- "base_token_generation_latency_async": 86.27743311226368,
6
- "base_token_generation_throughput_sync": 0.011676808268753886,
7
- "base_token_generation_throughput_async": 0.011590516360156497,
8
- "base_token_generation_CO2_emissions": 4.384232136591548e-05,
9
- "base_token_generation_energy_consumption": 0.015093384395993267,
10
- "base_inference_latency_sync": 83.11347198486328,
11
- "base_inference_latency_async": 81.85713291168213,
12
- "base_inference_throughput_sync": 0.012031743784956079,
13
- "base_inference_throughput_async": 0.012216406370828147,
14
- "base_inference_CO2_emissions": 4.367591428358372e-05,
15
- "base_inference_energy_consumption": 0.00012165686007698748,
16
  "smashed_current_gpu_type": "NVIDIA A100-PCIE-40GB",
17
  "smashed_current_gpu_total_memory": 40339.3125,
18
- "smashed_token_generation_latency_sync": 53.57108993530274,
19
- "smashed_token_generation_latency_async": 53.81308030337095,
20
- "smashed_token_generation_throughput_sync": 0.018666784663289283,
21
- "smashed_token_generation_throughput_async": 0.01858284257958298,
22
- "smashed_token_generation_CO2_emissions": 3.582011233414709e-05,
23
- "smashed_token_generation_energy_consumption": 0.009884854558870261,
24
- "smashed_inference_latency_sync": 56.26316757202149,
25
- "smashed_inference_latency_async": 55.486488342285156,
26
- "smashed_inference_throughput_sync": 0.017773617148730877,
27
- "smashed_inference_throughput_async": 0.01802240563200176,
28
- "smashed_inference_CO2_emissions": 3.7329031799201206e-05,
29
- "smashed_inference_energy_consumption": 9.846113068875813e-05
30
  }
 
1
  {
2
  "base_current_gpu_type": "NVIDIA A100-PCIE-40GB",
3
  "base_current_gpu_total_memory": 40339.3125,
4
+ "base_token_generation_latency_sync": 56.77331275939942,
5
+ "base_token_generation_latency_async": 57.322778180241585,
6
+ "base_token_generation_throughput_sync": 0.017613909624015017,
7
+ "base_token_generation_throughput_async": 0.01744507212919221,
8
+ "base_token_generation_CO2_emissions": null,
9
+ "base_token_generation_energy_consumption": null,
10
+ "base_inference_latency_sync": 54.84390411376953,
11
+ "base_inference_latency_async": 53.76389026641846,
12
+ "base_inference_throughput_sync": 0.018233566996353427,
13
+ "base_inference_throughput_async": 0.018599844524729483,
14
+ "base_inference_CO2_emissions": null,
15
+ "base_inference_energy_consumption": null,
16
  "smashed_current_gpu_type": "NVIDIA A100-PCIE-40GB",
17
  "smashed_current_gpu_total_memory": 40339.3125,
18
+ "smashed_token_generation_latency_sync": 43.27337074279785,
19
+ "smashed_token_generation_latency_async": 43.26997306197882,
20
+ "smashed_token_generation_throughput_sync": 0.02310890006566992,
21
+ "smashed_token_generation_throughput_async": 0.023110714641944086,
22
+ "smashed_token_generation_CO2_emissions": null,
23
+ "smashed_token_generation_energy_consumption": null,
24
+ "smashed_inference_latency_sync": 51.69879035949707,
25
+ "smashed_inference_latency_async": 43.543338775634766,
26
+ "smashed_inference_throughput_sync": 0.019342812337509556,
27
+ "smashed_inference_throughput_async": 0.02296562523955014,
28
+ "smashed_inference_CO2_emissions": null,
29
+ "smashed_inference_energy_consumption": null
30
  }
smash_config.json CHANGED
@@ -2,19 +2,19 @@
2
  "api_key": null,
3
  "verify_url": "http://johnrachwan.pythonanywhere.com",
4
  "smash_config": {
5
- "pruners": "[]",
6
  "pruning_ratio": 0.0,
7
- "factorizers": "[]",
8
  "quantizers": "['awq']",
9
  "weight_quantization_bits": 4,
10
- "output_deviation": 0.01,
11
- "compilers": "[]",
12
  "static_batch": true,
13
  "static_shape": true,
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
- "cache_dir": "/ceph/hdd/staff/charpent/.cache/models9k4p46gn",
18
  "batch_size": 1,
19
  "model_name": "meta-llama/Meta-Llama-3-8B",
20
  "task": "text_text_generation",
 
2
  "api_key": null,
3
  "verify_url": "http://johnrachwan.pythonanywhere.com",
4
  "smash_config": {
5
+ "pruners": "None",
6
  "pruning_ratio": 0.0,
7
+ "factorizers": "None",
8
  "quantizers": "['awq']",
9
  "weight_quantization_bits": 4,
10
+ "output_deviation": 0.005,
11
+ "compilers": "None",
12
  "static_batch": true,
13
  "static_shape": true,
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
+ "cache_dir": "/ceph/hdd/staff/charpent/.cache/models1tymb0wo",
18
  "batch_size": 1,
19
  "model_name": "meta-llama/Meta-Llama-3-8B",
20
  "task": "text_text_generation",