sharpenb commited on
Commit
6248c40
1 Parent(s): f8e5c38

Upload folder using huggingface_hub (#2)

Browse files

- 37c33a99fa16dd308bfbd948e97f9c19b5028f80f16be2b394f6f6dbf3e56bef (70ffb556e1536f81c6c235fb5e639e207f678504)
- 3b463e7e410d2f1aea38959fa06c8c2f1747f624bd85935cdc17466ee17b5c1c (c430de2dabfe31c1ea67d52f5d336d278fea9928)

Files changed (5) hide show
  1. README.md +2 -2
  2. config.json +2 -2
  3. model.safetensors +2 -2
  4. plots.png +0 -0
  5. smash_config.json +1 -1
README.md CHANGED
@@ -34,7 +34,7 @@ tags:
34
 
35
  ## Results
36
 
37
- Detailed efficiency metrics coming soon!
38
 
39
  **Frequently Asked Questions**
40
  - ***How does the compression work?*** The model is compressed with llm-int8.
@@ -61,7 +61,7 @@ You can run the smashed model with these steps:
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
 
63
  model = AutoModelForCausalLM.from_pretrained("PrunaAI/llm-agents-tora-code-7b-v1.0-bnb-4bit-smashed",
64
- trust_remote_code=True)
65
  tokenizer = AutoTokenizer.from_pretrained("llm-agents/tora-code-7b-v1.0")
66
 
67
  input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
 
34
 
35
  ## Results
36
 
37
+ ![image info](./plots.png)
38
 
39
  **Frequently Asked Questions**
40
  - ***How does the compression work?*** The model is compressed with llm-int8.
 
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
 
63
  model = AutoModelForCausalLM.from_pretrained("PrunaAI/llm-agents-tora-code-7b-v1.0-bnb-4bit-smashed",
64
+ trust_remote_code=True, device_map='auto')
65
  tokenizer = AutoTokenizer.from_pretrained("llm-agents/tora-code-7b-v1.0")
66
 
67
  input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/tmp/tmp3nxm24kx",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -21,7 +21,7 @@
21
  "quantization_config": {
22
  "bnb_4bit_compute_dtype": "bfloat16",
23
  "bnb_4bit_quant_type": "fp4",
24
- "bnb_4bit_use_double_quant": true,
25
  "llm_int8_enable_fp32_cpu_offload": false,
26
  "llm_int8_has_fp16_weight": false,
27
  "llm_int8_skip_modules": [
 
1
  {
2
+ "_name_or_path": "/tmp/tmpya0efvmp",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
21
  "quantization_config": {
22
  "bnb_4bit_compute_dtype": "bfloat16",
23
  "bnb_4bit_quant_type": "fp4",
24
+ "bnb_4bit_use_double_quant": false,
25
  "llm_int8_enable_fp32_cpu_offload": false,
26
  "llm_int8_has_fp16_weight": false,
27
  "llm_int8_skip_modules": [
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae99f60a13492af7d37444967528f89083537e2b811574329ca29f963dd10fed
3
- size 3866058418
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad61780305a987be113c8a0d55dd6af97a6763ef4b27a4a6069f6a46c8645a09
3
+ size 4167738392
plots.png ADDED
smash_config.json CHANGED
@@ -8,7 +8,7 @@
8
  "compilers": "None",
9
  "task": "text_text_generation",
10
  "device": "cuda",
11
- "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelshyof0ywr",
12
  "batch_size": 1,
13
  "model_name": "llm-agents/tora-code-7b-v1.0",
14
  "pruning_ratio": 0.0,
 
8
  "compilers": "None",
9
  "task": "text_text_generation",
10
  "device": "cuda",
11
+ "cache_dir": "/ceph/hdd/staff/charpent/.cache/models_3mqw4kh",
12
  "batch_size": 1,
13
  "model_name": "llm-agents/tora-code-7b-v1.0",
14
  "pruning_ratio": 0.0,