Vaibhav Srivastav commited on
Commit
ce15521
1 Parent(s): fa20553
Files changed (1) hide show
  1. handler.py +36 -0
handler.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoProcessor, MusicgenForConditionalGeneration
3
+ import torch
4
+
5
+ class EndpointHandler:
6
+ def __init__(self, path=""):
7
+ # load model and processor from path
8
+ self.processor = AutoProcessor.from_pretrained(path)
9
+ self.model = MusicgenForConditionalGeneration.from_pretrained(path).to("cuda")
10
+
11
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
12
+ """
13
+ Args:
14
+ data (:dict:):
15
+ The payload with the text prompt and generation parameters.
16
+ """
17
+ # process input
18
+ inputs = data.pop("inputs", data)
19
+ parameters = data.pop("parameters", None)
20
+
21
+ # preprocess
22
+ inputs = processor(
23
+ text=inputs,
24
+ padding=True,
25
+ return_tensors="pt",)
26
+
27
+ # pass inputs with all kwargs in data
28
+ if parameters is not None:
29
+ outputs = self.model.generate(inputs, max_new_tokens=256, **parameters)
30
+ else:
31
+ outputs = self.model.generate(inputs, max_new_tokens=256)
32
+
33
+ # postprocess the prediction
34
+ prediction = outputs[0].numpy()
35
+
36
+ return [{"generated_audio": prediction}]