ChatGPT-Vision / app.py
Kvikontent's picture
Update app.py
a397059 verified
raw
history blame
1.95 kB
import streamlit as st
import freeGPT
from freeGPT import Client as GPT
import io
import os
from PIL import Image
import requests
st.title("ChatGPT Vision")
st.write("Tap 'upload image' to upload image to vision and give question, if you want to generate image, then type 'Generate-- [prompt]' or 'Draw-- [prompt]' in your query")
msgs = st.container(height=500)
st.write("Upload image to use vision (optinal)")
file = st.file_uploader("Upload image")
inpt = st.chat_input(placeholder="Enter question...")
API_URL_BLIP = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-large"
API_URL_IMG = "https://api-inference.huggingface.co/models/stablediffusionapi/realistic-vision-v51"
api_token = os.environ.get("api_token")
headers = {"Authorization": f"Bearer {api_token}"}
def blip_query(filename):
with open(filename, "rb") as f:
data = f.read()
response = requests.post(API_URL_BLIP, headers=headers, data=data)
return response.json()
def generate_answer(prompt):
resp = Client.create_completion("gpt3", prompt)
return resp
def generate_image(payload):
response = requests.post(API_URL_IMG, headers=headers, json=payload)
return response.content
if inpt and file is not None:
imgp = blip_query(file.read())
pp = "Generate answer on this question: " + inpt + ". Use this image description to give answer: " + imgp[0]['generated_text']
output = generate_answer(pp)
aimsg = msgs.chat_message("Assistant")
aimsg.write(output)
elif inpt and file is None:
output = generate_answer(inpt)
aimsg = msgs.chat_message("Assistant")
aimsg.write(output)
elif inpt and ("Generate" in inpt or "Draw" in inpt or "Imagine" in inpt):
payload = {"prompts": prompt}
output = generate_image(payload)
if output:
aimsg = msgs.chat_message("Assistant")
aimsg.write("Image generated successfully!")
aimsg.image(output, caption="Generated Image")