AhmedBou commited on
Commit
f56ac71
1 Parent(s): 10ea185

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -7
README.md CHANGED
@@ -26,7 +26,7 @@ Import this model using:
26
  <span style="color: #0000FF;">from</span> peft <span style="color: #0000FF;">import</span> PeftModel, PeftConfig
27
  <span style="color: #0000FF;">from</span> transformers <span style="color: #0000FF;">import</span> AutoModelForCausalLM, AutoTokenizer
28
 
29
- peft_model_id = "<span style="color: #A31515;">"AhmedBou/databricks-dolly-v2-3b_on_NCSS"</span>
30
  config = PeftConfig.from_pretrained(peft_model_id)
31
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=<span style="color: #0000FF;">True</span>, load_in_8bit=<span style="color: #0000FF;">True</span>, device_map=<span style="color: #0000FF;">'auto'</span>)
32
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
@@ -40,16 +40,22 @@ model = PeftModel.from_pretrained(model, peft_model_id)
40
  Inference using:
41
 
42
  <pre>
43
- batch = tokenizer("“Multiple Regression for Appraisal” -->: ", return_tensors='pt')
 
44
 
45
- with torch.cuda.amp.autocast():
46
- output_tokens = model.generate(**batch, max_new_tokens=50)
47
 
48
- print('\n\n', tokenizer.decode(output_tokens[0], skip_special_tokens=True))
 
 
49
  </pre>
50
 
 
51
  Output:
52
 
53
  <pre>
54
- “Multiple Regression for Appraisal” -->: Multiple Regression for Appraisal (MRA) -->: Multiple Regression for Appraisal (MRA) (with Covariates) -->: Multiple Regression for Appraisal (MRA) (with Covariates
55
- </pre>
 
 
 
26
  <span style="color: #0000FF;">from</span> peft <span style="color: #0000FF;">import</span> PeftModel, PeftConfig
27
  <span style="color: #0000FF;">from</span> transformers <span style="color: #0000FF;">import</span> AutoModelForCausalLM, AutoTokenizer
28
 
29
+ peft_model_id = "<span style="color: #A31515;">AhmedBou/databricks-dolly-v2-3b_on_NCSS"</span>
30
  config = PeftConfig.from_pretrained(peft_model_id)
31
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=<span style="color: #0000FF;">True</span>, load_in_8bit=<span style="color: #0000FF;">True</span>, device_map=<span style="color: #0000FF;">'auto'</span>)
32
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
 
40
  Inference using:
41
 
42
  <pre>
43
+ <code>
44
+ <span style="color: #0000FF;">batch</span> = tokenizer("“Multiple Regression for Appraisal” --&gt;: ", return_tensors=<span style="color: #A31515;">'pt'</span>)
45
 
46
+ <span style="color: #0000FF;">with</span> torch.cuda.amp.autocast():
47
+ output_tokens = model.generate(**batch, max_new_tokens=<span style="color: #098658;">50</span>)
48
 
49
+ <span style="color: #0000FF;">print</span>('
50
+ ', tokenizer.decode(output_tokens[<span style="color: #098658;">0</span>], skip_special_tokens=<span style="color: #0000FF;">True</span>))
51
+ </code>
52
  </pre>
53
 
54
+
55
  Output:
56
 
57
  <pre>
58
+ <code>
59
+ “Multiple Regression for Appraisal” --&gt;: Multiple Regression for Appraisal (MRA) --&gt;: Multiple Regression for Appraisal (MRA) (with Covariates) --&gt;: Multiple Regression for Appraisal (MRA) (with Covariates)
60
+ </code>
61
+ </pre>