nabendu1 commited on
Commit
1a03bae
1 Parent(s): a16765f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -5
app.py CHANGED
@@ -2,10 +2,13 @@
2
  import gradio as gr
3
  import requests
4
  import os
 
5
 
6
  API_URL1 = "https://api-inference.huggingface.co/models/cardiffnlp/twitter-roberta-base-sentiment"
7
  API_URL2 = "https://api-inference.huggingface.co/models/facebook/convnext-xlarge-384-22k-1k"
8
  API_URL3 = "https://api-inference.huggingface.co/models/microsoft/trocr-base-handwritten"
 
 
9
 
10
  bt = os.environ['HACKAITHONBEARERTOKEN']
11
  headers = {"Authorization": bt }
@@ -21,16 +24,32 @@ def query(mood, select_model, filepath):
21
  elif (select_model=="WhatIsThat"):
22
  data = open(filepath, 'rb' ).read()
23
  response = requests.post(API_URL2, headers=headers, data=data)
24
- else:
25
  data = open(filepath, 'rb' ).read()
26
  response = requests.post(API_URL3, headers=headers, data=data)
27
- return str(response.json())
 
 
 
 
 
 
 
 
 
 
28
 
 
29
  def greet(mood,select_model,image):
30
  output = query({"inputs":mood}, select_model, image)
31
- print (str(output))
32
- return str(output)
 
 
 
 
 
33
 
34
  iface = gr.Interface(
35
- fn=greet, inputs=["text", gr.Radio(choices=["Sentiment", "WhatIsThat", "HandWriting"],value="Sentiment"),gr.Image(type="filepath")], outputs="text")
36
  iface.launch()
 
2
  import gradio as gr
3
  import requests
4
  import os
5
+ import io
6
 
7
  API_URL1 = "https://api-inference.huggingface.co/models/cardiffnlp/twitter-roberta-base-sentiment"
8
  API_URL2 = "https://api-inference.huggingface.co/models/facebook/convnext-xlarge-384-22k-1k"
9
  API_URL3 = "https://api-inference.huggingface.co/models/microsoft/trocr-base-handwritten"
10
+ API_URL4 = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
11
+
12
 
13
  bt = os.environ['HACKAITHONBEARERTOKEN']
14
  headers = {"Authorization": bt }
 
24
  elif (select_model=="WhatIsThat"):
25
  data = open(filepath, 'rb' ).read()
26
  response = requests.post(API_URL2, headers=headers, data=data)
27
+ elif (select_model=="HandWriting"):
28
  data = open(filepath, 'rb' ).read()
29
  response = requests.post(API_URL3, headers=headers, data=data)
30
+ else:
31
+ response = requests.post(API_URL4, headers=headers, json=mood)
32
+
33
+ if (select_model=="Sentiment"):
34
+ return str(response.json())
35
+ elif (select_model=="WhatIsThat"):
36
+ return str(response.json())
37
+ elif (select_model=="HandWriting"):
38
+ return str(response.json())
39
+ else:
40
+ return response.content
41
 
42
+
43
  def greet(mood,select_model,image):
44
  output = query({"inputs":mood}, select_model, image)
45
+
46
+ if (select_model=="Text2Image"):
47
+ from PIL import Image
48
+ image = Image.open(io.BytesIO(output))
49
+ else:
50
+ print (str(output))
51
+ return str(output)
52
 
53
  iface = gr.Interface(
54
+ fn=greet, inputs=["text", gr.Radio(choices=["Sentiment", "WhatIsThat", "HandWriting","Text2Image"],value="Sentiment"),gr.Image(type="filepath")], outputs="text","image")
55
  iface.launch()