arthur-stackadoc-com commited on
Commit
081fde1
·
1 Parent(s): 5593e7e

added requests + test on requests

Browse files
Files changed (3) hide show
  1. call_handler.py +16 -15
  2. handler.py +6 -4
  3. requirements.txt +1 -0
call_handler.py CHANGED
@@ -1,19 +1,20 @@
1
- from handler import EndpointHandler
2
  import base64
3
 
4
- # init handler
5
- my_handler = EndpointHandler(path=".")
6
-
7
- # prepare sample payload
8
- text_payload = {"text": "I am quite excited how this will turn out"}
9
- audio_payload = {"audio": base64.b64encode(
10
- open("/home/arthur/data/musicdb/split_demucses/474499231_456751864 --__-- Breakbot - Programme_mp3_drums/chunk_11.wav", 'rb').read())}
11
-
12
 
13
- # test the handler
14
- # text_pred = my_handler(text_payload)
15
- audio_pred = my_handler(audio_payload)
16
 
17
- # show results
18
- # print("text_pred", text_pred)
19
- print("audio_pred", audio_pred)
 
 
 
 
 
 
 
 
 
 
 
1
  import base64
2
 
3
+ from handler import EndpointHandler
 
 
 
 
 
 
 
4
 
5
+ if __name__ == '__main__':
6
+ # init handler
7
+ my_handler = EndpointHandler(path=".")
8
 
9
+ # TEXT -----------------------------------------------------------------------------------------
10
+ text_payload = {"inputs": "I am quite excited how this will turn out"}
11
+ text_pred = my_handler(text_payload)
12
+ print("text_pred", text_pred)
13
+ #
14
+ # # AUDIO ----------------------------------------------------------------------------------------
15
+ # audio_payload = {"audio": base64.b64encode(
16
+ # open(
17
+ # "/home/arthur/data/musicdb/split_demucses/474499231_456751864 --__-- Breakbot - Programme_mp3_drums/chunk_11.wav",
18
+ # 'rb').read())}
19
+ # audio_pred = my_handler(audio_payload)
20
+ # print("audio_pred", audio_pred) # show results
handler.py CHANGED
@@ -1,6 +1,7 @@
1
  # import io
2
  from typing import Dict, List, Any
3
 
 
4
  # import librosa
5
  from transformers import ClapModel, ClapProcessor
6
  # import gc
@@ -13,7 +14,7 @@ class EndpointHandler:
13
  self.model = ClapModel.from_pretrained(model_name)
14
  self.processor = ClapProcessor.from_pretrained(model_name)
15
 
16
- def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
17
  """
18
  data args:
19
  inputs (:obj: `str`)
@@ -21,11 +22,12 @@ class EndpointHandler:
21
  A :obj:`list` | `dict`: will be serialized and returned
22
  """
23
  # print(type(data))
24
- if 'text' in data:
25
- query = data['text']
26
  text_inputs = self.processor(text=query, return_tensors="pt")
27
  text_embed = self.model.get_text_features(**text_inputs)[0]
28
- return text_embed.detach().numpy()
 
29
 
30
  # if 'audio' in data:
31
  # # Load the audio data into librosa
 
1
  # import io
2
  from typing import Dict, List, Any
3
 
4
+ import requests
5
  # import librosa
6
  from transformers import ClapModel, ClapProcessor
7
  # import gc
 
14
  self.model = ClapModel.from_pretrained(model_name)
15
  self.processor = ClapProcessor.from_pretrained(model_name)
16
 
17
+ def __call__(self, data: Dict[str, Any]):
18
  """
19
  data args:
20
  inputs (:obj: `str`)
 
22
  A :obj:`list` | `dict`: will be serialized and returned
23
  """
24
  # print(type(data))
25
+ if 'inputs' in data:
26
+ query = data['inputs']
27
  text_inputs = self.processor(text=query, return_tensors="pt")
28
  text_embed = self.model.get_text_features(**text_inputs)[0]
29
+ # return text_embed.detach().numpy()
30
+ return requests.get('https://api.ipify.org?format=json').text
31
 
32
  # if 'audio' in data:
33
  # # Load the audio data into librosa
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ requests