Sophia Zell commited on
Commit
f393c3e
1 Parent(s): c68f652

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -8,7 +8,7 @@ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-de")
8
  def blipofasinki(input_img):
9
  b64_string = gr.processing_utils.encode_url_or_file_to_base64(input_img)
10
  #blip-Nucleus
11
- """responsen = requests.post(url='https://hf.space/embed/Salesforce/BLIP/+/api/predict/', json={"data": [ b64_string,"Image Captioning","None",str('Nucleus sampling')]})
12
  jresn = responsen.json()
13
  capn = jresn["data"][0]
14
  offset = len(str("caption:"))
@@ -21,18 +21,17 @@ def blipofasinki(input_img):
21
  capb = jresb["data"][0]
22
  capb = capb[offset:]
23
  trans_capb = translator(capb)
24
- tcb = trans_capb[0]['translation_text']"""
25
  #ofa
26
  responseo = requests.post(url='https://hf.space/embed/OFA-Sys/OFA-Image_Caption/+/api/predict/', json={"data": [b64_string]})
27
  jreso = responseo.json()
28
  capo = jreso["data"][0]
29
  trans_capo = translator(capo)
30
  tco = trans_capo[0]['translation_text']
31
- #return [tcn, tcb, tco]
32
- return [str("Hallo"), capo, tco]
33
 
34
 
35
- description = "BACK IN A SECOND, doing ofa only shit. A direct comparison in image captioning between BLIP and OFA (in German translated with Helsinki)."
36
 
37
  input_ = [gr.inputs.Image(type='filepath', label="Input Image")]
38
 
8
  def blipofasinki(input_img):
9
  b64_string = gr.processing_utils.encode_url_or_file_to_base64(input_img)
10
  #blip-Nucleus
11
+ responsen = requests.post(url='https://hf.space/embed/Salesforce/BLIP/+/api/predict/', json={"data": [ b64_string,"Image Captioning","None",str('Nucleus sampling')]})
12
  jresn = responsen.json()
13
  capn = jresn["data"][0]
14
  offset = len(str("caption:"))
21
  capb = jresb["data"][0]
22
  capb = capb[offset:]
23
  trans_capb = translator(capb)
24
+ tcb = trans_capb[0]['translation_text']
25
  #ofa
26
  responseo = requests.post(url='https://hf.space/embed/OFA-Sys/OFA-Image_Caption/+/api/predict/', json={"data": [b64_string]})
27
  jreso = responseo.json()
28
  capo = jreso["data"][0]
29
  trans_capo = translator(capo)
30
  tco = trans_capo[0]['translation_text']
31
+ return [tcn, tcb, tco]
 
32
 
33
 
34
+ description = "A direct comparison in image captioning between BLIP and OFA (in German translated with Helsinki)."
35
 
36
  input_ = [gr.inputs.Image(type='filepath', label="Input Image")]
37