marban commited on
Commit
61b0df2
1 Parent(s): e273eda

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +38 -0
handler.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoProcessor, MusicgenForConditionalGeneration
3
+ import torch
4
+
5
+ class EndpointHandler:
6
+ def __init__(self, path=""):
7
+ # load model and processor from path
8
+ self.processor = AutoProcessor.from_pretrained(path)
9
+ self.model = MusicgenForConditionalGeneration.from_pretrained(path, torch_dtype=torch.float16).to("cuda")
10
+
11
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
12
+ """
13
+ Args:
14
+ data (:dict:):
15
+ The payload with the text prompt and generation parameters.
16
+ """
17
+ # process input
18
+ inputs = data.pop("inputs", data)
19
+ parameters = data.pop("parameters", None)
20
+
21
+ # preprocess
22
+ inputs = self.processor(
23
+ text=[inputs],
24
+ padding=True,
25
+ return_tensors="pt",).to("cuda")
26
+
27
+ # pass inputs with all kwargs in data
28
+ if parameters is not None:
29
+ with torch.autocast("cuda"):
30
+ outputs = self.model.generate(**inputs, **parameters)
31
+ else:
32
+ with torch.autocast("cuda"):
33
+ outputs = self.model.generate(**inputs,)
34
+
35
+ # postprocess the prediction
36
+ prediction = outputs[0].cpu().numpy().tolist()
37
+
38
+ return [{"generated_audio": prediction}]