Spaces:
Runtime error
Runtime error
LanHarmony
commited on
Commit
•
ae5d7c3
1
Parent(s):
9a49cde
api key
Browse files
app.py
CHANGED
@@ -70,7 +70,6 @@ def cut_dialogue_history(history_memory, keep_last_n_words=500):
|
|
70 |
class ConversationBot:
|
71 |
def __init__(self):
|
72 |
print("Initializing VisualChatGPT")
|
73 |
-
self.llm = OpenAI(temperature=0, openai_api_key="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")
|
74 |
self.edit = ImageEditing(device="cuda:0")
|
75 |
self.i2t = ImageCaptioning(device="cuda:0")
|
76 |
self.t2i = T2I(device="cuda:0")
|
@@ -161,6 +160,9 @@ class ConversationBot:
|
|
161 |
# description="useful for when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. "
|
162 |
# "The input to this tool should be a comma seperated string of two, representing the image_path and the user description")
|
163 |
]
|
|
|
|
|
|
|
164 |
self.agent = initialize_agent(
|
165 |
self.tools,
|
166 |
self.llm,
|
@@ -170,6 +172,8 @@ class ConversationBot:
|
|
170 |
return_intermediate_steps=True,
|
171 |
agent_kwargs={'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions': VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}, )
|
172 |
|
|
|
|
|
173 |
def run_text(self, text, state):
|
174 |
print("===============Running run_text =============")
|
175 |
print("Inputs:", text, state)
|
@@ -221,7 +225,7 @@ with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
|
|
221 |
chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT")
|
222 |
state = gr.State([])
|
223 |
|
224 |
-
with gr.Row():
|
225 |
with gr.Column(scale=0.7):
|
226 |
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
|
227 |
with gr.Column(scale=0.15, min_width=0):
|
@@ -229,6 +233,7 @@ with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
|
|
229 |
with gr.Column(scale=0.15, min_width=0):
|
230 |
btn = gr.UploadButton("Upload", file_types=["image"])
|
231 |
|
|
|
232 |
txt.submit(bot.run_text, [txt, state], [chatbot, state])
|
233 |
txt.submit(lambda: "", None, txt)
|
234 |
|
|
|
70 |
class ConversationBot:
|
71 |
def __init__(self):
|
72 |
print("Initializing VisualChatGPT")
|
|
|
73 |
self.edit = ImageEditing(device="cuda:0")
|
74 |
self.i2t = ImageCaptioning(device="cuda:0")
|
75 |
self.t2i = T2I(device="cuda:0")
|
|
|
160 |
# description="useful for when you want to generate a new real image from both the user desciption and a human pose image. like: generate a real image of a human from this human pose image, or generate a new real image of a human from this pose. "
|
161 |
# "The input to this tool should be a comma seperated string of two, representing the image_path and the user description")
|
162 |
]
|
163 |
+
|
164 |
+
def init_agent(self, openai_api_key):
|
165 |
+
self.llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
|
166 |
self.agent = initialize_agent(
|
167 |
self.tools,
|
168 |
self.llm,
|
|
|
172 |
return_intermediate_steps=True,
|
173 |
agent_kwargs={'prefix': VISUAL_CHATGPT_PREFIX, 'format_instructions': VISUAL_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': VISUAL_CHATGPT_SUFFIX}, )
|
174 |
|
175 |
+
return gr.update(visible = True)
|
176 |
+
|
177 |
def run_text(self, text, state):
|
178 |
print("===============Running run_text =============")
|
179 |
print("Inputs:", text, state)
|
|
|
225 |
chatbot = gr.Chatbot(elem_id="chatbot", label="Visual ChatGPT")
|
226 |
state = gr.State([])
|
227 |
|
228 |
+
with gr.Row(visible=False) as input_raws:
|
229 |
with gr.Column(scale=0.7):
|
230 |
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
|
231 |
with gr.Column(scale=0.15, min_width=0):
|
|
|
233 |
with gr.Column(scale=0.15, min_width=0):
|
234 |
btn = gr.UploadButton("Upload", file_types=["image"])
|
235 |
|
236 |
+
openai_api_key_textbox.submit(bot.init_agent, [openai_api_key_textbox], [input_raws])
|
237 |
txt.submit(bot.run_text, [txt, state], [chatbot, state])
|
238 |
txt.submit(lambda: "", None, txt)
|
239 |
|