sharpenb commited on
Commit
78f6acd
1 Parent(s): 6e2e6a5

Upload folder using huggingface_hub (#7)

Browse files

- f274e274104efd1a07b6d00e4cfdf9ad8a2c1086e3a9a600d91184ba898a734a (4ec86840614fd33562c1bf27035bf282ea143d1d)

Files changed (2) hide show
  1. README.md +2 -0
  2. smash_config.json +1 -1
README.md CHANGED
@@ -59,6 +59,8 @@ You can run the smashed model with these steps:
59
  2. Load & run the model.
60
  ```python
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
62
 
63
  try:
64
  model = HQQModelForCausalLM.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed", device_map='auto')
 
59
  2. Load & run the model.
60
  ```python
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
+ from hqq.engine.hf import HQQModelForCausalLM
63
+ from hqq.models.hf.base import AutoHQQHFModel
64
 
65
  try:
66
  model = HQQModelForCausalLM.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed", device_map='auto')
smash_config.json CHANGED
@@ -14,7 +14,7 @@
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
- "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelsiz5iogt8",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",
 
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
+ "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelslpb8rwvi",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",