0xk1h0 commited on
Commit
00df160
1 Parent(s): 338171c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +39 -0
README.md CHANGED
@@ -1,6 +1,45 @@
1
  ---
2
  library_name: peft
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  ## Training procedure
5
 
6
 
 
1
  ---
2
  library_name: peft
3
  ---
4
+
5
+ ## Model Usage
6
+
7
+ ```python
8
+ import wandb
9
+ import os
10
+ import torch
11
+
12
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TrainingArguments
13
+ from peft import LoraConfig, prepare_model_for_kbit_training, get_peft_model, AutoPeftModelForCausalLM
14
+ from datasets import load_dataset
15
+ from random import randrange
16
+ from trl import SFTTrainer
17
+ from huggingface_hub import login
18
+
19
+
20
+ tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen25-7b-mono", trust_remote_code=True)
21
+ tokenizer.pad_token = tokenizer.eos_token
22
+
23
+ device_map = {"":0}
24
+ model = AutoPeftModelForCausalLM.from_pretrained("0xk1h0/codegen2.5-7b-py150k-r20-QLoRA", device_map=device_map, torch_dtype=torch.bfloat16)
25
+ text ="""
26
+ # Generate AES MODE encrypt python function.
27
+ """
28
+ inputs = tokenizer(text, return_tensors="pt").to("cuda")
29
+ outputs = model.generate(
30
+ input_ids=inputs["input_ids"].to("cuda"),
31
+ attention_mask=inputs["attention_mask"],
32
+ # max_new_tokens=50,
33
+ max_length=256,
34
+ do_sample=True,
35
+ temperature = 0.4,
36
+ top_p=0.95,
37
+ pad_token_id=tokenizer.eos_token_id
38
+ )
39
+
40
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
41
+ ```
42
+
43
  ## Training procedure
44
 
45