Ashhar
commited on
Commit
·
0b392f8
1
Parent(s):
5d5d235
better logging
Browse files
app.py
CHANGED
@@ -158,7 +158,7 @@ def __getImagePromptDetails(prompt: str, response: str):
|
|
158 |
(enhancePrompt, imagePrompt, loaderText) = __getRawImagePromptDetails(prompt, response)
|
159 |
|
160 |
if imagePrompt or enhancePrompt:
|
161 |
-
U.pprint(f"[Raw] {enhancePrompt=} | {imagePrompt=}")
|
162 |
|
163 |
promptEnhanceModelType: ModelType = "LLAMA"
|
164 |
U.pprint(f"{promptEnhanceModelType=}")
|
@@ -170,18 +170,20 @@ def __getImagePromptDetails(prompt: str, response: str):
|
|
170 |
|
171 |
systemPrompt = "You help in creating prompts for image generation"
|
172 |
promptPrefix = f"{enhancePrompt}\nAnd then use the above to" if enhancePrompt else "Use the text below to"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
|
174 |
llmArgs = {
|
175 |
"model": model,
|
176 |
"messages": [{
|
177 |
"role": "user",
|
178 |
-
"content":
|
179 |
-
{promptPrefix} create a prompt for image generation (limit to less than 500 words)
|
180 |
-
|
181 |
-
{imagePrompt}
|
182 |
-
|
183 |
-
Return only the final Image Generation Prompt, and nothing else
|
184 |
-
"""
|
185 |
}],
|
186 |
"temperature": 1,
|
187 |
"max_tokens": 2000
|
|
|
158 |
(enhancePrompt, imagePrompt, loaderText) = __getRawImagePromptDetails(prompt, response)
|
159 |
|
160 |
if imagePrompt or enhancePrompt:
|
161 |
+
# U.pprint(f"[Raw] {enhancePrompt=} | {imagePrompt=}")
|
162 |
|
163 |
promptEnhanceModelType: ModelType = "LLAMA"
|
164 |
U.pprint(f"{promptEnhanceModelType=}")
|
|
|
170 |
|
171 |
systemPrompt = "You help in creating prompts for image generation"
|
172 |
promptPrefix = f"{enhancePrompt}\nAnd then use the above to" if enhancePrompt else "Use the text below to"
|
173 |
+
enhancePrompt = f"""
|
174 |
+
{promptPrefix} create a prompt for image generation (limit to less than 500 words)
|
175 |
+
|
176 |
+
{imagePrompt}
|
177 |
+
|
178 |
+
Return only the final Image Generation Prompt, and nothing else
|
179 |
+
"""
|
180 |
+
U.pprint(f"[Raw] {enhancePrompt=}")
|
181 |
|
182 |
llmArgs = {
|
183 |
"model": model,
|
184 |
"messages": [{
|
185 |
"role": "user",
|
186 |
+
"content":
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
}],
|
188 |
"temperature": 1,
|
189 |
"max_tokens": 2000
|