mogaio commited on
Commit
7e1a2ea
1 Parent(s): a4db5f4

Upload 2 files

Browse files
Files changed (2) hide show
  1. handler.py +53 -0
  2. requirements.txt +3 -0
handler.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
3
+ import torch
4
+ from peft import PeftModel
5
+
6
+ class EndpointHandler:
7
+ def __init__(self, path=""):
8
+ # load model and processor from path
9
+ base_model_name = "snorkelai/Snorkel-Mistral-PairRM-DPO"
10
+ lora_adaptor = "mogaio/Snorkel-Mistral-PairRM-DPO-Freakonomics_MTD-TCD-Lora"
11
+
12
+ self.tokenizer = AutoTokenizer.from_pretrained(base_model_name)
13
+ self.tokenizer.pad_token = self.tokenizer.eos_token
14
+
15
+ self.bnb_config = BitsAndBytesConfig(
16
+ load_in_4bit=True,
17
+ bnb_4bit_use_double_quant=True,
18
+ bnb_4bit_quant_type="nf4",
19
+ bnb_4bit_compute_dtype=torch.bfloat16,
20
+ )
21
+
22
+ self.model = AutoModelForCausalLM.from_pretrained(
23
+ base_model_name,
24
+ quantization_config=self.bnb_config,
25
+ device_map="auto", # Auto selects device to put model on.
26
+ )
27
+ self.model.config.use_cache = False
28
+
29
+ self.inference_model = PeftModel.from_pretrained(self.model, lora_adaptor, from_transformers=True)
30
+
31
+
32
+
33
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
34
+ INTRO = "Below is a conversation between a user and you."
35
+ END = "Instruction: Write a response appropriate to the conversation."
36
+ prompt = "<user>:"
37
+
38
+ # process input
39
+ inputs = data.pop("inputs", data)
40
+ parameters = data.pop("parameters", None)
41
+
42
+ prompt = prompt+inputs
43
+ # preprocess
44
+
45
+ device = "cuda" if torch.cuda.is_available() else "cpu"
46
+
47
+ inputs = self.tokenizer(INTRO+'\n '+prompt+'\n '+END +'\n <assistant>:', return_tensors="pt").to(device)
48
+
49
+ inputs = {k: v.to('cuda') for k, v in inputs.items()}
50
+ output = self.inference_model.generate(input_ids=inputs["input_ids"],pad_token_id=self.tokenizer.pad_token_id, max_new_tokens=100, do_sample=True, temperature=0.1, top_p=0.9, repetition_penalty=1.5)
51
+ reply = self.tokenizer.batch_decode(output.detach().cpu().numpy(), skip_special_tokens=True)
52
+
53
+ return [{"generated_reply": reply}]
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ bitsandbytes
2
+ peft
3
+ transformers