sebaweis commited on
Commit
a83cebc
·
1 Parent(s): 905d897

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +62 -0
README.md CHANGED
@@ -46,3 +46,65 @@ python finetune.py \
46
  --eval-file code_eval.jsonl --wandb-project jerboa --wandb-log-model \
47
  --wandb-watch gradients --num-epochs 2
48
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  --eval-file code_eval.jsonl --wandb-project jerboa --wandb-log-model \
47
  --wandb-watch gradients --num-epochs 2
48
  ```
49
+
50
+ Inference:
51
+
52
+ ```Python
53
+ import torch
54
+ from peft import PeftModel
55
+ from transformers import AutoTokenizer, AutoModelForCausalLM
56
+
57
+
58
+ TOKENIZER_SOURCE = 'tiiuae/falcon-40b'
59
+ BASE_MODEL = 'tiiuae/falcon-40b'
60
+ LORA_REPO = 'jinaai/falcon-40b-code-alpaca-lora'
61
+ DEVICE = "cuda"
62
+
63
+ PROMPT = """
64
+ Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
65
+
66
+ ### Instruction:
67
+ Write a for loop in python
68
+
69
+ ### Input:
70
+
71
+ ### Response:
72
+ """
73
+ model = AutoModelForCausalLM.from_pretrained(
74
+ pretrained_model_name_or_path=BASE_MODEL,
75
+ torch_dtype=torch.float16,
76
+ trust_remote_code=True,
77
+ device_map='auto',
78
+ )
79
+
80
+ model = PeftModel.from_pretrained(
81
+ model=model,
82
+ model_id=LORA_REPO,
83
+ )
84
+ model.eval()
85
+
86
+ tokenizer = AutoTokenizer.from_pretrained(
87
+ TOKENIZER_SOURCE,
88
+ trust_remote_code=True,
89
+ padding_side='left',
90
+ )
91
+ tokenizer.pad_token = tokenizer.eos_token
92
+
93
+ inputs = tokenizer(PROMPT, return_tensors="pt")
94
+ input_ids = inputs["input_ids"].to(DEVICE)
95
+ input_attention_mask = inputs["attention_mask"].to(DEVICE)
96
+
97
+ with torch.no_grad():
98
+ generation_output = model.generate(
99
+ input_ids=input_ids,
100
+ attention_mask=input_attention_mask,
101
+ return_dict_in_generate=True,
102
+ max_new_tokens=32,
103
+ eos_token_id=tokenizer.eos_token_id,
104
+ )
105
+ generation_output = generation_output.sequences[0]
106
+ output = tokenizer.decode(generation_output, skip_special_tokens=True)
107
+
108
+ print(output)
109
+
110
+ ```