AhmedBou commited on
Commit
d820d12
1 Parent(s): f56ac71

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -26,7 +26,7 @@ Import this model using:
26
  <span style="color: #0000FF;">from</span> peft <span style="color: #0000FF;">import</span> PeftModel, PeftConfig
27
  <span style="color: #0000FF;">from</span> transformers <span style="color: #0000FF;">import</span> AutoModelForCausalLM, AutoTokenizer
28
 
29
- peft_model_id = "<span style="color: #A31515;">AhmedBou/databricks-dolly-v2-3b_on_NCSS"</span>
30
  config = PeftConfig.from_pretrained(peft_model_id)
31
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=<span style="color: #0000FF;">True</span>, load_in_8bit=<span style="color: #0000FF;">True</span>, device_map=<span style="color: #0000FF;">'auto'</span>)
32
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
@@ -41,7 +41,7 @@ Inference using:
41
 
42
  <pre>
43
  <code>
44
- <span style="color: #0000FF;">batch</span> = tokenizer("Multiple Regression for Appraisal --&gt;: ", return_tensors=<span style="color: #A31515;">'pt'</span>)
45
 
46
  <span style="color: #0000FF;">with</span> torch.cuda.amp.autocast():
47
  output_tokens = model.generate(**batch, max_new_tokens=<span style="color: #098658;">50</span>)
 
26
  <span style="color: #0000FF;">from</span> peft <span style="color: #0000FF;">import</span> PeftModel, PeftConfig
27
  <span style="color: #0000FF;">from</span> transformers <span style="color: #0000FF;">import</span> AutoModelForCausalLM, AutoTokenizer
28
 
29
+ peft_model_id = "<span style="color: #A31515;>"AhmedBou/databricks-dolly-v2-3b_on_NCSS"</span>
30
  config = PeftConfig.from_pretrained(peft_model_id)
31
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=<span style="color: #0000FF;">True</span>, load_in_8bit=<span style="color: #0000FF;">True</span>, device_map=<span style="color: #0000FF;">'auto'</span>)
32
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
 
41
 
42
  <pre>
43
  <code>
44
+ <span style="color: #0000FF;">batch</span> = tokenizer("Multiple Regression for Appraisal --&gt;: ", return_tensors=<span style="color: #A31515;">'pt'</span>)
45
 
46
  <span style="color: #0000FF;">with</span> torch.cuda.amp.autocast():
47
  output_tokens = model.generate(**batch, max_new_tokens=<span style="color: #098658;">50</span>)