TheBloke kik0220 commited on
Commit
a12f2f2
1 Parent(s): 82e8070

example doesn't work with typo (#5)

Browse files

- example doesn't work with typo (ae61727394b9999ad1dca89430f762d7ea245d6c)


Co-authored-by: kik0220 <kik0220@users.noreply.huggingface.co>

Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -108,14 +108,14 @@ from transformers import AutoTokenizer, pipeline, logging
108
  from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
109
 
110
  model_name_or_path = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPTQ"
111
- model_basename = "Wizard-Vicuna-7B-Uncensored-GPTQ-4bit-128g.no-act.order"
112
 
113
  use_triton = False
114
 
115
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
116
 
117
  model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
118
- model_basename=model_basename
119
  use_safetensors=True,
120
  trust_remote_code=True,
121
  device="cuda:0",
 
108
  from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
109
 
110
  model_name_or_path = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPTQ"
111
+ model_basename = "model"
112
 
113
  use_triton = False
114
 
115
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
116
 
117
  model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
118
+ model_basename=model_basename,
119
  use_safetensors=True,
120
  trust_remote_code=True,
121
  device="cuda:0",