sharpenb commited on
Commit
b68d34b
1 Parent(s): 880bd23

Upload folder using huggingface_hub (#10)

Browse files

- 8430eda91885ae39e7b81bc8bf7d829e6475edc0f6deeffc380bd0448c5a3abf (5cd4cdc41384a3d4413c5643a57d3ca3d6501c5c)

Files changed (2) hide show
  1. README.md +1 -1
  2. smash_config.json +1 -1
README.md CHANGED
@@ -59,7 +59,7 @@ You can run the smashed model with these steps:
59
  2. Load & run the model.
60
  ```python
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
- from hqq.engine.hf import HQQModelForCausalLM
63
  from hqq.models.hf.base import AutoHQQHFModel
64
 
65
  try:
 
59
  2. Load & run the model.
60
  ```python
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
+ from hqq.engine.hf import HQQModelForCausalLM
63
  from hqq.models.hf.base import AutoHQQHFModel
64
 
65
  try:
smash_config.json CHANGED
@@ -14,7 +14,7 @@
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
- "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelst_zaujwg",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",
 
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
+ "cache_dir": "/ceph/hdd/staff/charpent/.cache/models4xop86mb",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",