File size: 1,360 Bytes
5593e7e
83b1cb8
5593e7e
081fde1
5593e7e
83b1cb8
5593e7e
 
 
83b1cb8
 
 
 
 
 
 
081fde1
83b1cb8
 
 
 
 
 
5593e7e
081fde1
 
5593e7e
 
cb288da
 
5593e7e
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# import io
from typing import Dict, List, Any

import requests
# import librosa
from transformers import ClapModel, ClapProcessor
# import gc
# import base64


class EndpointHandler:
    def __init__(self, path=""):
        model_name = "laion/larger_clap_general"
        self.model = ClapModel.from_pretrained(model_name)
        self.processor = ClapProcessor.from_pretrained(model_name)

    def __call__(self, data: Dict[str, Any]):
        """
         data args:
              inputs (:obj: `str`)
        Return:
              A :obj:`list` | `dict`: will be serialized and returned
        """
        # print(type(data))
        if 'inputs' in data:
            query = data['inputs']
            text_inputs = self.processor(text=query, return_tensors="pt")
            text_embed = self.model.get_text_features(**text_inputs)[0]
            return text_embed.detach().numpy()
            # return requests.get('https://api.ipify.org?format=json').text

        # if 'audio' in data:
        #     # Load the audio data into librosa
        #     audio_buffer = io.BytesIO(base64.b64decode(data['audio']))
        #     y, sr = librosa.load(audio_buffer, sr=48000)
        #     inputs = self.processor(audios=y, sampling_rate=sr, return_tensors="pt")
        #     embedding = self.model.get_audio_features(**inputs)[0]
        #     gc.collect()