Upload TextGenerationHandlerForString.py
Browse files
TextGenerationHandlerForString.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gc
|
2 |
+
import json
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from ts.torch_handler.base_handler import BaseHandler
|
6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
+
|
8 |
+
import logging
|
9 |
+
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
+
|
12 |
+
|
13 |
+
class TextGenerationHandlerForString(BaseHandler):
|
14 |
+
def __init__(self):
|
15 |
+
super(TextGenerationHandlerForString, self).__init__()
|
16 |
+
self.model = None
|
17 |
+
self.tokenizer = None
|
18 |
+
self.device = None
|
19 |
+
self.task_config = None
|
20 |
+
self.initialized = False
|
21 |
+
|
22 |
+
def load_model(self, model_dir):
|
23 |
+
if self.device.type == "cuda":
|
24 |
+
self.model = AutoModelForCausalLM.from_pretrained(model_dir, torch_dtype="auto", low_cpu_mem_usage=True)
|
25 |
+
if self.model.dtype == torch.float32:
|
26 |
+
self.model = self.model.half()
|
27 |
+
else:
|
28 |
+
self.model = AutoModelForCausalLM.from_pretrained(model_dir, torch_dtype="auto")
|
29 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
30 |
+
try:
|
31 |
+
self.task_config = self.model.config.task_specific_params["text-generation"]
|
32 |
+
except Exception:
|
33 |
+
self.task_config = {}
|
34 |
+
# TODO: Need to compare performance
|
35 |
+
self.model.to(self.device, non_blocking=True)
|
36 |
+
|
37 |
+
def initialize(self, ctx):
|
38 |
+
self.manifest = ctx.manifest
|
39 |
+
properties = ctx.system_properties
|
40 |
+
model_dir = properties.get("model_dir")
|
41 |
+
self.device = torch.device(
|
42 |
+
"cuda:" + str(properties.get("gpu_id"))
|
43 |
+
if torch.cuda.is_available()
|
44 |
+
else "cpu"
|
45 |
+
)
|
46 |
+
self.load_model(model_dir)
|
47 |
+
self.model.eval()
|
48 |
+
self.initialized = True
|
49 |
+
|
50 |
+
def preprocess(self, requests):
|
51 |
+
input_batch = {}
|
52 |
+
for idx, data in enumerate(requests):
|
53 |
+
input_batch["input_text"] = data.get("body").get("text")
|
54 |
+
input_batch["num_samples"] = data.get("body").get("num_samples")
|
55 |
+
input_batch["length"] = data.get("body").get("length")
|
56 |
+
del requests
|
57 |
+
gc.collect()
|
58 |
+
return input_batch
|
59 |
+
|
60 |
+
def inference(self, input_batch):
|
61 |
+
input_text = input_batch["input_text"]
|
62 |
+
length = input_batch["length"]
|
63 |
+
num_samples = input_batch["num_samples"]
|
64 |
+
input_ids = self.tokenizer.encode(input_text, return_tensors="pt").to(
|
65 |
+
self.device
|
66 |
+
)
|
67 |
+
self.task_config["max_length"] = length
|
68 |
+
self.task_config["num_return_sequences"] = num_samples
|
69 |
+
inference_output = self.model.generate(input_ids, **self.task_config)
|
70 |
+
if torch.cuda.is_available():
|
71 |
+
torch.cuda.empty_cache()
|
72 |
+
del input_batch
|
73 |
+
gc.collect()
|
74 |
+
return inference_output
|
75 |
+
|
76 |
+
def postprocess(self, inference_output):
|
77 |
+
output = self.tokenizer.batch_decode(
|
78 |
+
inference_output.tolist(), skip_special_tokens=True
|
79 |
+
)
|
80 |
+
del inference_output
|
81 |
+
gc.collect()
|
82 |
+
return [json.dumps(output, ensure_ascii=False)]
|
83 |
+
|
84 |
+
def handle(self, data, context):
|
85 |
+
self.context = context
|
86 |
+
data = self.preprocess(data)
|
87 |
+
data = self.inference(data)
|
88 |
+
data = self.postprocess(data)
|
89 |
+
return data
|