vasilisklv
commited on
Commit
•
f249649
1
Parent(s):
a37d179
Update app.py
Browse files
app.py
CHANGED
@@ -16,10 +16,13 @@ api_key = os.environ["genai_stories"]
|
|
16 |
# define LLM model for story creation, with OpenAI's format
|
17 |
client = OpenAI(
|
18 |
base_url="https://api-inference.huggingface.co/v1/",
|
19 |
-
api_key=api_key
|
|
|
20 |
)
|
|
|
21 |
# the model to utilize
|
22 |
-
model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
|
|
23 |
|
24 |
# define model for image creation
|
25 |
# API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
@@ -105,10 +108,10 @@ def create_character_prompt(story_type):
|
|
105 |
|
106 |
|
107 |
# Initialize the story based on the story_type selected by the player
|
108 |
-
def generate_story(story_type):
|
109 |
global model, messages
|
110 |
messages = []
|
111 |
-
messages.append({"role": "system", "content": f"You are a structured storytelling assistant. Your purpose is to generate stories, questions, and answers in a specific format. Always adhere strictly to the rules provided by the user. This is the topic that you must create a story about: story_type = {story_type}"})
|
112 |
messages.append({"role": "user", "content": f"You must create and return one story part, one question and four answers. To do that, you must explicitly follow these steps: Step 1) Create the initial part of the story, according to the story_type from your system content, within 50 and 60 words, without switching to new line. Then you must switch line by adding this: '\n\n'. Step 2) Create a question, within 10 and 20 words, on how to proceed the story from step 1. Then you must switch line by adding this: '\n\n'. Step 3) Create the 4 potential answers for the question in step 2. The answers of the question must be given in the format: '1:... | 2:... | 3:... | 4:...'. Do not change this format and do not add any new lines between the answers. Every answer must be maximum 20 words. All answers must be separated from each other with '|'. Now some general guidelines for your response: 1) Don't explicitly specify 'Story', 'Question', or 'Answer'. 2) You must ALWAYS reply in this format: '[story from step 1]\n\n[question from step 2]\n\n[answers from step 3]'. 3) Do not return any other stuff in your response. 4) Always change lines with '\n\n' between ther story part and question and between the question and the answers."})
|
113 |
messages = [messages[0]] + [messages[-1]]
|
114 |
response = chat_with_llm(model, messages)
|
@@ -117,15 +120,14 @@ def generate_story(story_type):
|
|
117 |
question = lines[1] # The second to last line is the question
|
118 |
answers = [i.strip() for i in lines[2].split('|')] # The last line contains answers separated by '|'
|
119 |
messages.append({"role": "assistant", "content": "I am waiting for next command."})
|
120 |
-
# main_character = story_type.split(' ')[2]
|
121 |
character_prompt = create_character_prompt(story_type)
|
122 |
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + story + character_prompt
|
123 |
image = create_image(image_prompt)
|
124 |
-
return story, question, gr.Radio(choices=answers, interactive=True), gr.Radio(choices=[story_type], interactive=False), gr.Button(interactive=False), image
|
125 |
|
126 |
|
127 |
# Continue the story based on what happened so far and the player's latest answer
|
128 |
-
def continue_story(previous_story, selected_option, story_type):
|
129 |
global model, messages
|
130 |
messages.append({"role": "user", "content": f"You must create and return one story part, one question and four answers. To do that, you must explicitly follow these steps: Step 1) Based on this story so far: '{previous_story} {selected_option}', continue the story and create the next part of the story within 50 and 60 words, without changing lines. You must provide ONLY the new part that you created. Then you must switch line by adding this: '\n\n'. Step 2) Create a question, within 10 and 20 words, on how to proceed the story from step 1. Then you must switch line by adding this: '\n\n'. Step 3) Create the 4 potential answers for the question in step 2. The answers of the question must be given in the format: '1:... | 2:... | 3:... | 4:...'. Do not change this format and do not add any new lines between the answers. Every answer must be maximum 20 words. All answers must be separated from each other with '|'. Now some general guidelines for your response: 1) Don't explicitly specify 'Story', 'Question', or 'Answer'. 2) You must ALWAYS reply in this format: '[story from step 1]\n\n[question from step 2]\n\n[answers from step 3]'. 3) Do not return any other stuff in your response. 4) Always change lines with '\n\n' between story part and question that you generate. 5) Always change lines with '\n\n' between question and the answers that you generate"})
|
131 |
messages = [messages[0]] + [messages[-1]]
|
@@ -136,19 +138,24 @@ def continue_story(previous_story, selected_option, story_type):
|
|
136 |
answers = [i.strip() for i in lines[2].split('|')] # The last line contains answers separated by '|'
|
137 |
messages.append({"role": "assistant", "content": "I am waiting for next command."})
|
138 |
story = previous_story + '\n\n' + next_story
|
139 |
-
#
|
|
|
|
|
|
|
|
|
|
|
140 |
main_character = story_type.split(' ')[2].lower()
|
141 |
character_prompt = create_character_prompt(story_type)
|
142 |
if main_character in next_story.lower():
|
143 |
-
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " +
|
144 |
else:
|
145 |
-
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " +
|
146 |
image = create_image(image_prompt)
|
147 |
return story, question, gr.Radio(choices=answers, interactive=True), image
|
148 |
|
149 |
|
150 |
# End the story based on what happened so far and the player's latest answer
|
151 |
-
def end_story(previous_story, selected_option, story_type):
|
152 |
global model, messages
|
153 |
messages.append({"role": "user", "content": f"You must create an ending for this story: '{previous_story}' and also considering the latest answer: '{selected_option}'. You must provide only the ending of the story in an exciting way. Do not return any other stuff in your response."})
|
154 |
end_story = chat_with_llm(model, messages)
|
@@ -156,13 +163,18 @@ def end_story(previous_story, selected_option, story_type):
|
|
156 |
# end_story = lines[0] # Everything before the last two lines is the story
|
157 |
messages.append({"role": "assistant", "content": "I ended the story successfully. Now I am not waiting for any more responses from the player."})
|
158 |
story = previous_story + '\n\n' + end_story
|
159 |
-
#
|
|
|
|
|
|
|
|
|
|
|
160 |
main_character = story_type.split(' ')[2].lower()
|
161 |
character_prompt = create_character_prompt(story_type)
|
162 |
if main_character in end_story.lower():
|
163 |
-
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " +
|
164 |
else:
|
165 |
-
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " +
|
166 |
image = create_image(image_prompt)
|
167 |
return story, image
|
168 |
|
@@ -192,6 +204,8 @@ with gr.Blocks() as game_ui:
|
|
192 |
|
193 |
# Right column for the question, answers, and buttons
|
194 |
with gr.Column(scale=1):
|
|
|
|
|
195 |
story_type = gr.Radio(label="What story to create?", choices=["A fearless king who leads an army attacking a castle.",
|
196 |
"A kid alien who lands on earth and explores around.",
|
197 |
"A joyful rabbit who participates in song contest.",
|
@@ -209,12 +223,15 @@ with gr.Blocks() as game_ui:
|
|
209 |
# reset_button = gr.Button("Reset and play again")
|
210 |
|
211 |
# what the buttons do
|
212 |
-
start_button.click(fn=generate_story, inputs=[story_type], outputs=[story, question, answers, story_type, start_button, story_image])
|
213 |
-
submit_button.click(fn=continue_story, inputs=[story, answers, story_type], outputs=[story, question, answers, story_image])
|
214 |
-
end_button.click(fn=end_story, inputs=[story, answers, story_type], outputs=[story, story_image])
|
215 |
# reset_button.click(fn=reset_app, inputs=[], outputs=[story, question, answers, story_type, start_button])
|
216 |
|
217 |
|
218 |
# Launch the Gradio interface
|
219 |
game_ui.launch()
|
220 |
-
# game_ui.launch(share=True)
|
|
|
|
|
|
|
|
16 |
# define LLM model for story creation, with OpenAI's format
|
17 |
client = OpenAI(
|
18 |
base_url="https://api-inference.huggingface.co/v1/",
|
19 |
+
# api_key=api_key
|
20 |
+
api_key="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"
|
21 |
)
|
22 |
+
|
23 |
# the model to utilize
|
24 |
+
# model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
25 |
+
model = "gpt-4o-mini"
|
26 |
|
27 |
# define model for image creation
|
28 |
# API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
|
|
108 |
|
109 |
|
110 |
# Initialize the story based on the story_type selected by the player
|
111 |
+
def generate_story(story_type, language):
|
112 |
global model, messages
|
113 |
messages = []
|
114 |
+
messages.append({"role": "system", "content": f"You are a structured storytelling assistant. Your purpose is to generate stories, questions, and answers in a specific format. Always adhere strictly to the rules provided by the user. This is the topic that you must create a story about: story_type = {story_type}. You must reply in {language}."})
|
115 |
messages.append({"role": "user", "content": f"You must create and return one story part, one question and four answers. To do that, you must explicitly follow these steps: Step 1) Create the initial part of the story, according to the story_type from your system content, within 50 and 60 words, without switching to new line. Then you must switch line by adding this: '\n\n'. Step 2) Create a question, within 10 and 20 words, on how to proceed the story from step 1. Then you must switch line by adding this: '\n\n'. Step 3) Create the 4 potential answers for the question in step 2. The answers of the question must be given in the format: '1:... | 2:... | 3:... | 4:...'. Do not change this format and do not add any new lines between the answers. Every answer must be maximum 20 words. All answers must be separated from each other with '|'. Now some general guidelines for your response: 1) Don't explicitly specify 'Story', 'Question', or 'Answer'. 2) You must ALWAYS reply in this format: '[story from step 1]\n\n[question from step 2]\n\n[answers from step 3]'. 3) Do not return any other stuff in your response. 4) Always change lines with '\n\n' between ther story part and question and between the question and the answers."})
|
116 |
messages = [messages[0]] + [messages[-1]]
|
117 |
response = chat_with_llm(model, messages)
|
|
|
120 |
question = lines[1] # The second to last line is the question
|
121 |
answers = [i.strip() for i in lines[2].split('|')] # The last line contains answers separated by '|'
|
122 |
messages.append({"role": "assistant", "content": "I am waiting for next command."})
|
|
|
123 |
character_prompt = create_character_prompt(story_type)
|
124 |
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + story + character_prompt
|
125 |
image = create_image(image_prompt)
|
126 |
+
return story, question, gr.Radio(choices=answers, interactive=True), gr.Radio(choices=[story_type], interactive=False), gr.Radio(choices=[language], interactive=False), gr.Button(interactive=False), image
|
127 |
|
128 |
|
129 |
# Continue the story based on what happened so far and the player's latest answer
|
130 |
+
def continue_story(previous_story, selected_option, story_type, language):
|
131 |
global model, messages
|
132 |
messages.append({"role": "user", "content": f"You must create and return one story part, one question and four answers. To do that, you must explicitly follow these steps: Step 1) Based on this story so far: '{previous_story} {selected_option}', continue the story and create the next part of the story within 50 and 60 words, without changing lines. You must provide ONLY the new part that you created. Then you must switch line by adding this: '\n\n'. Step 2) Create a question, within 10 and 20 words, on how to proceed the story from step 1. Then you must switch line by adding this: '\n\n'. Step 3) Create the 4 potential answers for the question in step 2. The answers of the question must be given in the format: '1:... | 2:... | 3:... | 4:...'. Do not change this format and do not add any new lines between the answers. Every answer must be maximum 20 words. All answers must be separated from each other with '|'. Now some general guidelines for your response: 1) Don't explicitly specify 'Story', 'Question', or 'Answer'. 2) You must ALWAYS reply in this format: '[story from step 1]\n\n[question from step 2]\n\n[answers from step 3]'. 3) Do not return any other stuff in your response. 4) Always change lines with '\n\n' between story part and question that you generate. 5) Always change lines with '\n\n' between question and the answers that you generate"})
|
133 |
messages = [messages[0]] + [messages[-1]]
|
|
|
138 |
answers = [i.strip() for i in lines[2].split('|')] # The last line contains answers separated by '|'
|
139 |
messages.append({"role": "assistant", "content": "I am waiting for next command."})
|
140 |
story = previous_story + '\n\n' + next_story
|
141 |
+
# -----------
|
142 |
+
if language != "English":
|
143 |
+
messages.append({"role": "user", "content": f"translate this story to english: {next_story}"})
|
144 |
+
next_story = chat_with_llm(model, messages)
|
145 |
+
messages.append({"role": "assistant", "content": "I am waiting for next command."})
|
146 |
+
# -----------
|
147 |
main_character = story_type.split(' ')[2].lower()
|
148 |
character_prompt = create_character_prompt(story_type)
|
149 |
if main_character in next_story.lower():
|
150 |
+
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + next_story + character_prompt
|
151 |
else:
|
152 |
+
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + next_story
|
153 |
image = create_image(image_prompt)
|
154 |
return story, question, gr.Radio(choices=answers, interactive=True), image
|
155 |
|
156 |
|
157 |
# End the story based on what happened so far and the player's latest answer
|
158 |
+
def end_story(previous_story, selected_option, story_type, language):
|
159 |
global model, messages
|
160 |
messages.append({"role": "user", "content": f"You must create an ending for this story: '{previous_story}' and also considering the latest answer: '{selected_option}'. You must provide only the ending of the story in an exciting way. Do not return any other stuff in your response."})
|
161 |
end_story = chat_with_llm(model, messages)
|
|
|
163 |
# end_story = lines[0] # Everything before the last two lines is the story
|
164 |
messages.append({"role": "assistant", "content": "I ended the story successfully. Now I am not waiting for any more responses from the player."})
|
165 |
story = previous_story + '\n\n' + end_story
|
166 |
+
# -----------
|
167 |
+
if language != "English":
|
168 |
+
messages.append({"role": "user", "content": f"translate this story to english: {end_story}"})
|
169 |
+
end_story = chat_with_llm(model, messages)
|
170 |
+
messages.append({"role": "assistant", "content": "I am waiting for next command."})
|
171 |
+
# -----------
|
172 |
main_character = story_type.split(' ')[2].lower()
|
173 |
character_prompt = create_character_prompt(story_type)
|
174 |
if main_character in end_story.lower():
|
175 |
+
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + end_story + character_prompt
|
176 |
else:
|
177 |
+
image_prompt = "Cartoon image, with bright colors and simple shapes, that describes this story: " + end_story
|
178 |
image = create_image(image_prompt)
|
179 |
return story, image
|
180 |
|
|
|
204 |
|
205 |
# Right column for the question, answers, and buttons
|
206 |
with gr.Column(scale=1):
|
207 |
+
language = gr.Radio(label="Select language", choices=['Greek', 'English'])
|
208 |
+
|
209 |
story_type = gr.Radio(label="What story to create?", choices=["A fearless king who leads an army attacking a castle.",
|
210 |
"A kid alien who lands on earth and explores around.",
|
211 |
"A joyful rabbit who participates in song contest.",
|
|
|
223 |
# reset_button = gr.Button("Reset and play again")
|
224 |
|
225 |
# what the buttons do
|
226 |
+
start_button.click(fn=generate_story, inputs=[story_type, language], outputs=[story, question, answers, story_type, language, start_button, story_image])
|
227 |
+
submit_button.click(fn=continue_story, inputs=[story, answers, story_type, language], outputs=[story, question, answers, story_image])
|
228 |
+
end_button.click(fn=end_story, inputs=[story, answers, story_type, language], outputs=[story, story_image])
|
229 |
# reset_button.click(fn=reset_app, inputs=[], outputs=[story, question, answers, story_type, start_button])
|
230 |
|
231 |
|
232 |
# Launch the Gradio interface
|
233 |
game_ui.launch()
|
234 |
+
# game_ui.launch(share=True)
|
235 |
+
|
236 |
+
|
237 |
+
|