Spaces:
Runtime error
Runtime error
logan
commited on
Commit
·
a60c07a
1
Parent(s):
9023d12
meshy generation
Browse files- api_keys.py +1 -1
- app.py +18 -10
- prompt.py +1 -1
api_keys.py
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
-
meshy_api_key = ""
|
2 |
gpt_api_key = "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"
|
|
|
1 |
+
meshy_api_key = "msy_xLyNkJ7taYmF4g3KJ3w08W02lUVdcO1ZT0QK"
|
2 |
gpt_api_key = "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"
|
app.py
CHANGED
@@ -4,6 +4,7 @@ import openai
|
|
4 |
from prompt import setup_prompt
|
5 |
from api_keys import gpt_api_key
|
6 |
import utils
|
|
|
7 |
|
8 |
openai.api_key = gpt_api_key
|
9 |
messages = []
|
@@ -19,7 +20,7 @@ reply = response["choices"][0]["message"]["content"]
|
|
19 |
messages.append({"role": "assistant", "content": reply})
|
20 |
print("\n" + reply + "\n")
|
21 |
|
22 |
-
|
23 |
|
24 |
model_path = "../house_light/model.glb"
|
25 |
|
@@ -28,13 +29,20 @@ cnt = 0
|
|
28 |
|
29 |
def solve():
|
30 |
global model_path
|
31 |
-
time.sleep(3)
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
def slow_echo(message, history):
|
35 |
global cnt
|
36 |
global messages
|
37 |
-
global
|
38 |
global model_path
|
39 |
messages.append({"role": "user", "content": message})
|
40 |
response = openai.ChatCompletion.create(
|
@@ -45,13 +53,13 @@ def slow_echo(message, history):
|
|
45 |
print("\n" + reply + "\n")
|
46 |
if reply[len(reply)-1] == '1' or reply[len(reply)-2] == '1':
|
47 |
print("start generating")
|
48 |
-
|
49 |
reply = "Generating..."
|
50 |
-
cnt += 1
|
51 |
-
if cnt == 1 :
|
52 |
-
|
53 |
-
elif cnt == 2 :
|
54 |
-
|
55 |
|
56 |
for i in range(len(reply)):
|
57 |
time.sleep(0.02)
|
|
|
4 |
from prompt import setup_prompt
|
5 |
from api_keys import gpt_api_key
|
6 |
import utils
|
7 |
+
import os
|
8 |
|
9 |
openai.api_key = gpt_api_key
|
10 |
messages = []
|
|
|
20 |
messages.append({"role": "assistant", "content": reply})
|
21 |
print("\n" + reply + "\n")
|
22 |
|
23 |
+
meshy_prompt = ""
|
24 |
|
25 |
model_path = "../house_light/model.glb"
|
26 |
|
|
|
29 |
|
30 |
def solve():
|
31 |
global model_path
|
32 |
+
# time.sleep(3)
|
33 |
+
extracted_text = utils.extract_text_surrounded_by_backticks(meshy_prompt)
|
34 |
+
begin = meshy_prompt.find("{")
|
35 |
+
end = meshy_prompt.find("}")
|
36 |
+
print(meshy_prompt[begin:end+1])
|
37 |
+
payload = utils.text_to_3d_gen(meshy_prompt[begin:end+1])
|
38 |
+
taskid = utils.create_meshy_object(payload)
|
39 |
+
utils.download_model(taskid)
|
40 |
+
return os.path.join(os.path.dirname(os.path.abspath(__file__)), f"{taskid}.glb")
|
41 |
|
42 |
def slow_echo(message, history):
|
43 |
global cnt
|
44 |
global messages
|
45 |
+
global meshy_prompt
|
46 |
global model_path
|
47 |
messages.append({"role": "user", "content": message})
|
48 |
response = openai.ChatCompletion.create(
|
|
|
53 |
print("\n" + reply + "\n")
|
54 |
if reply[len(reply)-1] == '1' or reply[len(reply)-2] == '1':
|
55 |
print("start generating")
|
56 |
+
meshy_prompt = reply
|
57 |
reply = "Generating..."
|
58 |
+
# cnt += 1
|
59 |
+
# if cnt == 1 :
|
60 |
+
# model_path = "../house_light/model.glb"
|
61 |
+
# elif cnt == 2 :
|
62 |
+
# model_path = "../house_dark/model.glb"
|
63 |
|
64 |
for i in range(len(reply)):
|
65 |
time.sleep(0.02)
|
prompt.py
CHANGED
@@ -31,7 +31,7 @@ Second, you should guide users step by step in the following procedure to help u
|
|
31 |
6 - Confirmation: Show user the prompt you generated
|
32 |
- Style: <style> \n - Negative Prompt: <negative prompt> \n - Ary Style: <art style> \n - Texture Resolution: <resolution> \n <0>}. Note: everything in <> should be keywords, not a complete sentence or verbs. After showing, ask user if he/she wants to add more things
|
33 |
7 - Output: Show the final prompt in JSON with the following keys:
|
34 |
-
object_prompt, style_prompt, negative_prompt, art_style. Then, you
|
35 |
- ```art_style``` key words to code conversion:
|
36 |
- Realistic style -> realistic
|
37 |
- 2.5D Cartoon style -> fake-3d-cartoon
|
|
|
31 |
6 - Confirmation: Show user the prompt you generated
|
32 |
- Style: <style> \n - Negative Prompt: <negative prompt> \n - Ary Style: <art style> \n - Texture Resolution: <resolution> \n <0>}. Note: everything in <> should be keywords, not a complete sentence or verbs. After showing, ask user if he/she wants to add more things
|
33 |
7 - Output: Show the final prompt in JSON with the following keys:
|
34 |
+
object_prompt, style_prompt, negative_prompt, art_style. Then, you must add number ```1``` to the end of your output response(not in json), indicating the prompt generation is done.
|
35 |
- ```art_style``` key words to code conversion:
|
36 |
- Realistic style -> realistic
|
37 |
- 2.5D Cartoon style -> fake-3d-cartoon
|