sharpenb commited on
Commit
e8311ab
1 Parent(s): 78f6acd

Upload folder using huggingface_hub (#8)

Browse files

- 1d7a74cd28dda1d861978fbd9db7abeecfdc34fc2882f2179e009384681e440f (a74d48c11c5aee6dff20c15b576252413cdadfce)

Files changed (2) hide show
  1. README.md +6 -6
  2. smash_config.json +1 -1
README.md CHANGED
@@ -59,19 +59,19 @@ You can run the smashed model with these steps:
59
  2. Load & run the model.
60
  ```python
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
- from hqq.engine.hf import HQQModelForCausalLM
63
  from hqq.models.hf.base import AutoHQQHFModel
64
 
65
- try:
66
  model = HQQModelForCausalLM.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed", device_map='auto')
67
  except:
68
  model = AutoHQQHFModel.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed")
69
- tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
70
 
71
- input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
72
 
73
- outputs = model.generate(input_ids, max_new_tokens=216)
74
- tokenizer.decode(outputs[0])
75
  ```
76
 
77
  ## Configurations
 
59
  2. Load & run the model.
60
  ```python
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
+ from hqq.engine.hf import HQQModelForCausalLM
63
  from hqq.models.hf.base import AutoHQQHFModel
64
 
65
+ try:
66
  model = HQQModelForCausalLM.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed", device_map='auto')
67
  except:
68
  model = AutoHQQHFModel.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed")
69
+ tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
70
 
71
+ input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"]
72
 
73
+ outputs = model.generate(input_ids, max_new_tokens=216)
74
+ tokenizer.decode(outputs[0])
75
  ```
76
 
77
  ## Configurations
smash_config.json CHANGED
@@ -14,7 +14,7 @@
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
- "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelslpb8rwvi",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",
 
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
+ "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelscb6mwip3",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",