kik0220 commited on
Commit
ae61727
1 Parent(s): 82e8070

example doesn't work with typo

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -108,14 +108,14 @@ from transformers import AutoTokenizer, pipeline, logging
108
  from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
109
 
110
  model_name_or_path = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPTQ"
111
- model_basename = "Wizard-Vicuna-7B-Uncensored-GPTQ-4bit-128g.no-act.order"
112
 
113
  use_triton = False
114
 
115
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
116
 
117
  model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
118
- model_basename=model_basename
119
  use_safetensors=True,
120
  trust_remote_code=True,
121
  device="cuda:0",
 
108
  from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
109
 
110
  model_name_or_path = "TheBloke/Wizard-Vicuna-7B-Uncensored-GPTQ"
111
+ model_basename = "model"
112
 
113
  use_triton = False
114
 
115
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
116
 
117
  model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
118
+ model_basename=model_basename,
119
  use_safetensors=True,
120
  trust_remote_code=True,
121
  device="cuda:0",