Spaces:
Running
Running
File size: 4,425 Bytes
8d37eb3 3d98680 8d37eb3 ce223ca 8394675 3d98680 8d37eb3 3d98680 8394675 3d98680 8d37eb3 3d98680 8394675 3d98680 8394675 8d37eb3 b2766e7 8394675 b2766e7 8394675 b2766e7 45ccb5b b2766e7 45ccb5b b2766e7 8d37eb3 8394675 8d37eb3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
from openai import OpenAI
import streamlit as st
from utils import im_2_b64
import pickle
from upload import upload_file, get_file
st.title("ChatGPT with Vision")
client = OpenAI(api_key=st.secrets["OPENAI_KEY"])
if "messages" not in st.session_state:
st.session_state.messages = []
if "uploader_key" not in st.session_state:
st.session_state["uploader_key"] = 0
if len(st.session_state.messages) == 0 and "id" in st.query_params:
with st.spinner("Loading chat..."):
id = st.query_params["id"]
data = get_file(id, 'chatgpt-vision-007')
st.session_state.messages = pickle.loads(data)
def clear_uploader():
st.session_state["uploader_key"] += 1
st.rerun()
def undo():
if len(st.session_state.messages) > 0:
st.session_state.messages.pop()
st.session_state.messages.pop()
st.rerun()
def share():
data = pickle.dumps(st.session_state.messages)
id = upload_file(data, 'chatgpt-vision-007')
url = f"https://umbc-nlp-chatgpt-vision.hf.space/?id={id}"
st.success(f"Share URL: {url}")
with st.sidebar:
if st.button("Share"):
share()
cols = st.columns(2)
with cols[0]:
if st.button("Undo"):
undo()
with cols[1]:
if st.button("Clear chat"):
st.session_state.messages = []
clear_uploader()
with st.expander("Advanced Configuration"):
st.subheader("Temperature")
temperature = st.slider(label="x", min_value=0.1, max_value=1.0, value=0.2, step=0.1, label_visibility='collapsed')
st.subheader("Max Tokens")
max_tokens = st.slider(label="x", min_value=32, max_value=1024, value=256, step=32, label_visibility='collapsed')
st.subheader("Random Seed")
random_seed = st.number_input("Seed", min_value=0, max_value=1000000, value=42, step=1, label_visibility='collapsed')
with st.expander("Image Input", expanded=True):
images = st.file_uploader(
"Image Upload",
accept_multiple_files=True,
type=["png", "jpg", "jpeg"],
key=st.session_state["uploader_key"],
label_visibility="collapsed",
)
for message in st.session_state.messages:
with st.chat_message(message["role"]):
contents = message["content"]
for content in contents:
if content["type"] == "text":
st.markdown(content["text"])
number_of_images = sum(1 for c in contents if c["type"] == "image_url")
if number_of_images > 0:
cols = st.columns(number_of_images)
i = 0
for content in contents:
if content["type"] == "image_url":
with cols[i]:
st.image(content["image_url"]["url"])
i += 1
def push_message(role, content, images=None):
contents = []
contents.append({"type": "text", "text": content})
if images:
for image in images:
image_b64 = im_2_b64(image)
image_url = f"data:image/jpeg;base64,{image_b64.decode('utf-8')}"
obj = {
"type": "image_url",
"image_url": {
"url": image_url,
},
}
contents.append(obj)
message = {"role": role, "content": contents}
st.session_state.messages.append(message)
return message
chat_input_disabled = False
if prompt := st.chat_input("Type a message", key="chat_input", disabled=chat_input_disabled):
push_message("user", prompt, images)
with st.chat_message("user"):
st.markdown(prompt)
if images:
cols = st.columns(len(images))
for i, image in enumerate(images):
with cols[i]:
st.image(image)
with st.chat_message("assistant"):
messages = [
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
]
chat_input_disabled = True
stream = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=messages,
stream=True,
seed=random_seed,
temperature=temperature,
max_tokens=max_tokens,
)
response = st.write_stream(stream)
push_message("assistant", response)
chat_input_disabled = False
clear_uploader()
|