Upload folder using huggingface_hub

#6
Files changed (2) hide show
  1. README.md +2 -2
  2. smash_config.json +1 -1
README.md CHANGED
@@ -60,8 +60,8 @@ You can run the smashed model with these steps:
60
  ```python
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
 
63
- try:
64
- model = HQQModelForCausalLM.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed", device_map='auto')
65
  except:
66
  model = AutoHQQHFModel.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed")
67
  tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
 
60
  ```python
61
  from transformers import AutoModelForCausalLM, AutoTokenizer
62
 
63
+ try:
64
+ model = HQQModelForCausalLM.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed", device_map='auto')
65
  except:
66
  model = AutoHQQHFModel.from_quantized("PrunaAI/facebook-opt-125m-HQQ-1bit-smashed")
67
  tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
smash_config.json CHANGED
@@ -14,7 +14,7 @@
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
- "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelsh3ybd3wx",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",
 
14
  "controlnet": "None",
15
  "unet_dim": 4,
16
  "device": "cuda",
17
+ "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelsiz5iogt8",
18
  "batch_size": 1,
19
  "tokenizer": "GPT2TokenizerFast(name_or_path='facebook/opt-125m', vocab_size=50265, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '</s>', 'eos_token': '</s>', 'unk_token': '</s>', 'pad_token': '<pad>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n\t1: AddedToken(\"<pad>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n\t2: AddedToken(\"</s>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n}",
20
  "task": "text_text_generation",