florentgbelidji HF staff commited on
Commit
9d9b5e2
1 Parent(s): e53bc58

Updated requirements.txt and transformed pipeline to handler

Browse files
Files changed (3) hide show
  1. README.md +10 -2
  2. pipeline.py → handler.py +4 -4
  3. requirements.txt +4 -3
README.md CHANGED
@@ -36,8 +36,16 @@ HF_TOKEN = ""
36
 
37
  def predict(path_to_image: str = None):
38
  with open(path_to_image, "rb") as i:
39
- b64 = base64.b64encode(i.read())
40
- payload = {"inputs": {"image": b64.decode("utf-8")}}
 
 
 
 
 
 
 
 
41
  response = r.post(
42
  ENDPOINT_URL, headers={"Authorization": f"Bearer {HF_TOKEN}"}, json=payload
43
  )
 
36
 
37
  def predict(path_to_image: str = None):
38
  with open(path_to_image, "rb") as i:
39
+ image = i.read()
40
+ payload = {
41
+ "inputs": {"image": image},
42
+ "parameters": {
43
+ "sample": True,
44
+ "top_p":0.9,
45
+ "min_length":5,
46
+ "max_length":20
47
+ }
48
+ }
49
  response = r.post(
50
  ENDPOINT_URL, headers={"Authorization": f"Bearer {HF_TOKEN}"}, json=payload
51
  )
pipeline.py → handler.py RENAMED
@@ -11,7 +11,7 @@ from torchvision.transforms.functional import InterpolationMode
11
 
12
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13
 
14
- class PreTrainedPipeline():
15
  def __init__(self, path=""):
16
  # load the optimized model
17
  self.model_path = os.path.join(path,'model_large_caption.pth')
@@ -39,14 +39,14 @@ class PreTrainedPipeline():
39
  data (:obj:):
40
  includes the input data and the parameters for the inference.
41
  Return:
42
- A :obj:`dict`:. The object returned should be a dict of one list like [[{"label": 0.9939950108528137}]] containing :
43
  - "caption": A string corresponding to the generated caption.
44
  """
45
  inputs = data.pop("inputs", data)
46
  parameters = data.pop("parameters", {})
47
 
48
- # decode base64 image to PIL
49
- image = Image.open(BytesIO(base64.b64decode(inputs['image'])))
50
  image = self.transform(image).unsqueeze(0).to(device)
51
  with torch.no_grad():
52
  caption = self.model.generate(
 
11
 
12
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13
 
14
+ class EndpointHandler():
15
  def __init__(self, path=""):
16
  # load the optimized model
17
  self.model_path = os.path.join(path,'model_large_caption.pth')
 
39
  data (:obj:):
40
  includes the input data and the parameters for the inference.
41
  Return:
42
+ A :obj:`dict`:. The object returned should be a dict of one list like {"caption": ["A hugging face at the office"]} containing :
43
  - "caption": A string corresponding to the generated caption.
44
  """
45
  inputs = data.pop("inputs", data)
46
  parameters = data.pop("parameters", {})
47
 
48
+
49
+ image = Image.open(BytesIO(inputs['image']))
50
  image = self.transform(image).unsqueeze(0).to(device)
51
  with torch.no_grad():
52
  caption = self.model.generate(
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
- timm
2
- fairscale
 
3
  requests
4
- Pillow
 
1
+ timm==0.4.12
2
+ transformers==4.15.0
3
+ fairscale==0.4.4
4
  requests
5
+ Pillow