import gradio as gr import requests import os import io API_URL1 = "https://api-inference.huggingface.co/models/cardiffnlp/twitter-roberta-base-sentiment" API_URL2 = "https://api-inference.huggingface.co/models/facebook/convnext-xlarge-384-22k-1k" API_URL3 = "https://api-inference.huggingface.co/models/microsoft/trocr-base-handwritten" API_URL4 = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5" bt = os.environ['HACKAITHONBEARERTOKEN'] headers = {"Authorization": bt } def query(mood, select_model, filepath): print (select_model); print (filepath); if (select_model=="Sentiment"): response = requests.post(API_URL1, headers=headers, json=mood) elif (select_model=="WhatIsThat"): data = open(filepath, 'rb' ).read() response = requests.post(API_URL2, headers=headers, data=data) elif (select_model=="HandWriting"): data = open(filepath, 'rb' ).read() response = requests.post(API_URL3, headers=headers, data=data) else: response = requests.post(API_URL4, headers=headers, json=mood) if (select_model=="Sentiment"): return str(response.json()) elif (select_model=="WhatIsThat"): return str(response.json()) elif (select_model=="HandWriting"): return str(response.json()) else: return response.content def greet(mood,select_model,image): output = query({"inputs":mood}, select_model, image) if (select_model=="Text2Image"): from PIL import Image image = Image.open(io.BytesIO(output)) else: print (str(output)) return str(output) iface = gr.Interface( fn=greet, inputs=["text", gr.Radio(choices=["Sentiment", "WhatIsThat", "HandWriting","Text2Image"],value="Sentiment"),gr.Image(type="filepath")], outputs="image") iface.launch()