papayaga commited on
Commit
18f7b17
·
1 Parent(s): 49a7070

multiple fixes

Browse files
Files changed (7) hide show
  1. .gitignore +1 -0
  2. README.md +1 -0
  3. adaptors/llm.py +3 -1
  4. adaptors/voice.py +1 -1
  5. data/stories.db +0 -0
  6. homeros.py +113 -9
  7. main.py +57 -105
.gitignore CHANGED
@@ -1,5 +1,6 @@
1
  # app specific
2
  outputs/*
 
3
 
4
  # Byte-compiled / optimized / DLL files
5
  __pycache__/
 
1
  # app specific
2
  outputs/*
3
+ data/stories.db
4
 
5
  # Byte-compiled / optimized / DLL files
6
  __pycache__/
README.md CHANGED
@@ -74,6 +74,7 @@ It puts the user in charge of a how the story is going to develop.
74
  - [x] GPT-4 story generation in a gradio interface
75
  - [x] Do the evaluator (if it's time to end)
76
  - [x] Inerchange text output for play.ht voice generation
 
77
  - [ ] Interchange text input for whisper
78
  - [ ] Clear input on submit
79
  - [ ] Dockerfile and deploy (including magic word for access control)
 
74
  - [x] GPT-4 story generation in a gradio interface
75
  - [x] Do the evaluator (if it's time to end)
76
  - [x] Inerchange text output for play.ht voice generation
77
+ - [x] Expose switch to the user on what's the max lenght of story and whether ask about details or not
78
  - [ ] Interchange text input for whisper
79
  - [ ] Clear input on submit
80
  - [ ] Dockerfile and deploy (including magic word for access control)
adaptors/llm.py CHANGED
@@ -10,6 +10,7 @@ openai.api_key = os.getenv('OPENAI_KEY')
10
 
11
  MODEL = 'gpt-4'
12
  #MODEL = 'gpt-3.5-turbo'
 
13
 
14
  @retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
15
  def answer(system_message, user_and_assistant_messages):
@@ -23,7 +24,8 @@ def answer(system_message, user_and_assistant_messages):
23
  try:
24
  chat_completion = openai.ChatCompletion.create(
25
  model=MODEL,
26
- messages=messages
 
27
  )
28
  output = chat_completion.choices[0].message.content
29
  return output
 
10
 
11
  MODEL = 'gpt-4'
12
  #MODEL = 'gpt-3.5-turbo'
13
+ TEMPERATURE = 0.8
14
 
15
  @retry(wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3))
16
  def answer(system_message, user_and_assistant_messages):
 
24
  try:
25
  chat_completion = openai.ChatCompletion.create(
26
  model=MODEL,
27
+ messages=messages,
28
+ temperature = TEMPERATURE
29
  )
30
  output = chat_completion.choices[0].message.content
31
  return output
adaptors/voice.py CHANGED
@@ -46,7 +46,7 @@ def say_new(text, voice="dylan"):
46
  client = sseclient.SSEClient(resp)
47
  for event in client.events():
48
  if event.data:
49
- #pprint(event.data)
50
  if helpers.is_valid_json(event.data): # play.ht api is unrealiable
51
  e = json.loads(event.data)
52
  if e["stage"] == "complete":
 
46
  client = sseclient.SSEClient(resp)
47
  for event in client.events():
48
  if event.data:
49
+ print(event.data)
50
  if helpers.is_valid_json(event.data): # play.ht api is unrealiable
51
  e = json.loads(event.data)
52
  if e["stage"] == "complete":
data/stories.db DELETED
Binary file (90.1 kB)
 
homeros.py CHANGED
@@ -3,19 +3,124 @@ from models.story import Story
3
  from loguru import logger
4
  import json
5
  from pprint import pprint
6
- from helpers import gen_unique_id
7
  import prompts
 
8
  from adaptors.llm import answer
9
  from adaptors.voice import say_new
10
 
11
- MAX_STORY_LEN = 3 #after how many chunks we force the story to end
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  '''
14
  initiates a new story and saves in DB
15
  '''
16
  def init_story(story_data):
17
- story_data["uuid"] = gen_unique_id()
18
- pprint(story_data)
19
  story = Story(
20
  uuid=story_data['uuid'],
21
  status=story_data['status']
@@ -40,6 +145,7 @@ creates the first chunk of the story
40
  def start_story(story_data):
41
  return continue_story("Please begin", story_data)
42
 
 
43
  '''
44
  main function that manages adding the next chunk to a story (first text, then audio)
45
  '''
@@ -54,9 +160,6 @@ def continue_story(user_input, story_data):
54
  })
55
  next_chunk_text = create_next_chunk_text(user_input, story)
56
 
57
- if len(chunks) == 0:
58
- next_chunk_text = "May our story begin!\n\n"+next_chunk_text
59
-
60
  next_chunk_audio = create_next_chunk_audio(next_chunk_text)
61
  messages.append({
62
  "role":"assistant",
@@ -74,6 +177,7 @@ def continue_story(user_input, story_data):
74
  save_story(story)
75
  return story.to_dict()
76
 
 
77
  '''
78
  generates the last part of the story and changes status to "finished"
79
  '''
@@ -138,11 +242,11 @@ def create_next_chunk_text(user_input, story):
138
  '''
139
  evaluates the story up until now and returns a dict with the result of the evaluation
140
  '''
141
- def evaluate_story(story):
142
  evaluation = {}
143
  story_len = len(story["chunks"])
144
  logger.debug(story_len)
145
- evaluation["is_time_to_end"] = story_len >= MAX_STORY_LEN
146
 
147
  return evaluation
148
 
 
3
  from loguru import logger
4
  import json
5
  from pprint import pprint
6
+ import helpers
7
  import prompts
8
+ import random
9
  from adaptors.llm import answer
10
  from adaptors.voice import say_new
11
 
12
+ DEFAULT_WORLD = "J.R.R. Tolkien's Middle Earth"
13
+ DEFAULT_HERO = "I don't know, please choose something unusual"
14
+ DEFAULT_PLOT = "I don't know, please come up with something unexpected"
15
+ DEFAULT_ENDING = (lambda: random.choice(["happy", "tragic", "funny", "unexpected"]))()
16
+ DEFAULT_STYLE = (lambda: random.choice(["Epic", "Funny", "Poetic"]))()
17
+
18
+ '''
19
+ Here we manage the flow and state of the story
20
+ '''
21
+ def do_homeros(user_input, story_data, settings):
22
+
23
+ #TODO refactor. naming in the function is old
24
+ story = story_data
25
+
26
+ # story hasn't started
27
+ if story["status"] == "not_started":
28
+ logger.debug("status: initiating a new story")
29
+ next_message = helpers.get_fixed_msg("welcome")
30
+ story["status"] = "checking_magic_word"
31
+
32
+ # we are checking the magic word or it is wrong and we need to ask for it again
33
+ elif story["status"] == "checking_magic_word" or story["status"] == "wrong_magic_word":
34
+ logger.debug("status: checking magic word")
35
+ magic_word_correct = helpers.check_magic_word(user_input)
36
+ if magic_word_correct:
37
+
38
+ story = init_story(story)
39
+ # if default settings is true - skip the asking, including magic word and just start the story
40
+ if settings["default_settings"] and len(story["chunks"]) == 0:
41
+ story = define_metadata(DEFAULT_WORLD, "world", story)
42
+ story = define_metadata(DEFAULT_HERO, "hero", story)
43
+ story = define_metadata(DEFAULT_PLOT, "plot", story)
44
+ story = define_metadata(DEFAULT_ENDING, "ending", story)
45
+ story = define_metadata(DEFAULT_STYLE, "style", story)
46
+ story["status"] = "ongoing"
47
+ story = start_story(story)
48
+ next_message = story["chunks"][-1]["audio_url"]
49
+ return next_message, story, settings
50
+
51
+ else:
52
+ story["status"] = "defining_metadata_world"
53
+ next_message = helpers.get_fixed_msg("ask_world")
54
+
55
+ else:
56
+ story["status"] = "wrong_magic_word"
57
+ next_message = helpers.get_fixed_msg("wrong_magic_word")
58
+
59
+ # defining the world
60
+ elif story["status"] == "defining_metadata_world":
61
+ logger.debug("status: magic word is wrong")
62
+ story = define_metadata(user_input, "world", story)
63
+ story["status"] = "defining_metadata_hero"
64
+ next_message = helpers.get_fixed_msg("ask_hero")
65
+
66
+ # defining the hero
67
+ elif story["status"] == "defining_metadata_hero":
68
+ logger.debug("status: defining the hero")
69
+ story = define_metadata(user_input, "hero", story)
70
+ story["status"] = "defining_metadata_plot"
71
+ next_message = helpers.get_fixed_msg("ask_plot")
72
+
73
+ # defining the plot
74
+ elif story["status"] == "defining_metadata_plot":
75
+ logger.debug("status: defining the plot")
76
+ story = define_metadata(user_input, "plot", story)
77
+ story["status"] = "defining_metadata_ending"
78
+ next_message = helpers.get_fixed_msg("ask_ending")
79
+
80
+ # defining the ending
81
+ elif story["status"] == "defining_metadata_ending":
82
+ logger.debug("status: defining the ending")
83
+ story = define_metadata(user_input, "ending", story)
84
+ story["status"] = "defining_metadata_style"
85
+ next_message = helpers.get_fixed_msg("ask_style")
86
+
87
+ # defining the style and starting the story with the first chunk
88
+ elif story["status"] == "defining_metadata_style":
89
+ logger.debug("status: defining the style")
90
+ story = define_metadata(user_input, "style", story)
91
+ story["status"] = "ongoing"
92
+ story = start_story(story)
93
+ next_message = story["chunks"][-1]["audio_url"]
94
+
95
+ # we are in the middle of the story - evaluate if time to end, or continue
96
+ elif story["status"] == "ongoing":
97
+ if evaluate_story(story, settings)["is_time_to_end"]:
98
+ logger.debug("status: activating story finish")
99
+ story = finish_story(user_input, story)
100
+ story["status"] = "finished"
101
+ else:
102
+ story = continue_story(user_input, story)
103
+ story["status"] = "ongoing"
104
+
105
+ next_message = story["chunks"][-1]["audio_url"]
106
+
107
+ # story has ended, but the user still inputting. tell them it's over
108
+ elif story["status"] == "finished":
109
+ next_message = helpers.get_fixed_msg("no_more_story")
110
+ story["status"] = "finished"
111
+
112
+ else:
113
+ raise Exception("strange story status")
114
+ logger.error(f"we have a story status {story['status']} we didn't catch...")
115
+ logger.debug(story)
116
+
117
+ return next_message, story, settings
118
 
119
  '''
120
  initiates a new story and saves in DB
121
  '''
122
  def init_story(story_data):
123
+ story_data["uuid"] = helpers.gen_unique_id()
 
124
  story = Story(
125
  uuid=story_data['uuid'],
126
  status=story_data['status']
 
145
  def start_story(story_data):
146
  return continue_story("Please begin", story_data)
147
 
148
+
149
  '''
150
  main function that manages adding the next chunk to a story (first text, then audio)
151
  '''
 
160
  })
161
  next_chunk_text = create_next_chunk_text(user_input, story)
162
 
 
 
 
163
  next_chunk_audio = create_next_chunk_audio(next_chunk_text)
164
  messages.append({
165
  "role":"assistant",
 
177
  save_story(story)
178
  return story.to_dict()
179
 
180
+
181
  '''
182
  generates the last part of the story and changes status to "finished"
183
  '''
 
242
  '''
243
  evaluates the story up until now and returns a dict with the result of the evaluation
244
  '''
245
+ def evaluate_story(story, settings):
246
  evaluation = {}
247
  story_len = len(story["chunks"])
248
  logger.debug(story_len)
249
+ evaluation["is_time_to_end"] = story_len >= settings["max_len"]
250
 
251
  return evaluation
252
 
main.py CHANGED
@@ -6,105 +6,23 @@ from dotenv import load_dotenv
6
  load_dotenv()
7
  import helpers
8
 
9
- from homeros import init_story, start_story, continue_story, finish_story, define_metadata, evaluate_story
10
 
11
- DEFAULT_PARAMS = True
 
12
 
13
  '''
14
- Here we manage the flow and state of the story
15
  '''
16
- def do_homeros(user_input, story):
17
-
18
- # if default params is true - skip the asking, including magic word and just start the story
19
- if DEFAULT_PARAMS and len(story["chunks"]) == 0:
20
- story = init_story(story)
21
- story = define_metadata("J.R.R. Tolkien's Middle Earth", "world", story)
22
- story = define_metadata("I don't know. Please choose something unusual.", "hero", story)
23
- story = define_metadata("I don't know. Please choose something unusual.", "plot", story)
24
- story = define_metadata("Happy", "ending", story)
25
- story = define_metadata("epic", "style", story)
26
- story["status"] = "ongoing"
27
- story = start_story(story)
28
- next_message = story["chunks"][-1]["audio_url"]
29
- return next_message, story
30
-
31
- # story hasn't started
32
- if story["status"] == "not_started":
33
- logger.debug("status: initiating a new story")
34
- next_message = helpers.get_fixed_msg("welcome")
35
- story["status"] = "checking_magic_word"
36
-
37
- # we are checking the magic word or it is wrong and we need to ask for it again
38
- elif story["status"] == "checking_magic_word" or story["status"] == "wrong_magic_word":
39
- logger.debug("status: checking magic word")
40
- magic_word_correct = helpers.check_magic_word(user_input)
41
- if magic_word_correct:
42
- story["status"] = "defining_metadata_world"
43
- story = init_story(story)
44
- next_message = helpers.get_fixed_msg("ask_world")
45
- else:
46
- story["status"] = "wrong_magic_word"
47
- next_message = helpers.get_fixed_msg("wrong_magic_word")
48
-
49
- # defining the world
50
- elif story["status"] == "defining_metadata_world":
51
- logger.debug("status: magic word is wrong")
52
- story = define_metadata(user_input, "world", story)
53
- story["status"] = "defining_metadata_hero"
54
- next_message = helpers.get_fixed_msg("ask_hero")
55
-
56
- # defining the hero
57
- elif story["status"] == "defining_metadata_hero":
58
- logger.debug("status: defining the hero")
59
- story = define_metadata(user_input, "hero", story)
60
- story["status"] = "defining_metadata_plot"
61
- next_message = helpers.get_fixed_msg("ask_plot")
62
-
63
- # defining the plot
64
- elif story["status"] == "defining_metadata_plot":
65
- logger.debug("status: defining the plot")
66
- story = define_metadata(user_input, "plot", story)
67
- story["status"] = "defining_metadata_ending"
68
- next_message = helpers.get_fixed_msg("ask_ending")
69
-
70
- # defining the ending
71
- elif story["status"] == "defining_metadata_ending":
72
- logger.debug("status: defining the ending")
73
- story = define_metadata(user_input, "ending", story)
74
- story["status"] = "defining_metadata_style"
75
- next_message = helpers.get_fixed_msg("ask_style")
76
-
77
- # defining the style and starting the story with the first chunk
78
- elif story["status"] == "defining_metadata_style":
79
- logger.debug("status: defining the style")
80
- story = define_metadata(user_input, "style", story)
81
- story["status"] = "ongoing"
82
- story = start_story(story)
83
- next_message = story["chunks"][-1]["audio_url"]
84
-
85
- # we are in the middle of the story - evaluate if time to end, or continue
86
- elif story["status"] == "ongoing":
87
- if evaluate_story(story)["is_time_to_end"]:
88
- logger.debug("status: activating story finish")
89
- story = finish_story(user_input, story)
90
- story["status"] = "finished"
91
- else:
92
- story = continue_story(user_input, story)
93
- story["status"] = "ongoing"
94
-
95
- next_message = story["chunks"][-1]["audio_url"]
96
-
97
- # story has ended, but the user still inputting. tell them it's over
98
- elif story["status"] == "finished":
99
- next_message = helpers.get_fixed_msg("no_more_story")
100
- story["status"] = "finished"
101
-
102
- else:
103
- raise Exception("strange story status")
104
- logger.error(f"we have a story status {story['status']} we didn't catch...")
105
- logger.debug(story)
106
-
107
- return next_message, story
108
 
109
  demo = gr.Blocks()
110
 
@@ -126,6 +44,12 @@ with demo:
126
  "full_story_text": ""
127
  })
128
 
 
 
 
 
 
 
129
  with gr.Row():
130
  gr.Markdown('''
131
  # HOMEROS
@@ -133,36 +57,64 @@ with demo:
133
  This demo is exploring the future of interactive storytelling.
134
  It puts the user in charge and blurs the boundary between the reader and the author.
135
 
136
- Hit "Tell me!" to get started.
137
-
138
- When Homeros asks you something - hit record, answer with your voice and then hit "Tell me!" again.
139
-
140
  ''')
141
 
142
  with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
 
 
144
  text_input = gr.Textbox(
145
- label="you say"
 
146
  )
147
 
148
  with gr.Row():
149
  story_chunk = gr.Audio(
150
  label="storyteller says",
151
- autoplay=True
 
 
152
  )
153
 
154
  with gr.Row():
155
-
156
  go_btn = gr.Button(
157
- "Tell me!",
 
158
  )
159
 
160
  go_btn.click(
161
  do_homeros,
162
- inputs=[text_input, story],
163
- outputs=[story_chunk, story]
164
  )
165
 
 
 
 
 
 
 
 
 
166
  demo.queue(
167
  concurrency_count=5
168
  )
 
6
  load_dotenv()
7
  import helpers
8
 
9
+ from homeros import do_homeros
10
 
11
+ DEFAULT_STORY_LEN = 3 #default : after how many chunks we force the story to end
12
+ USE_DEFAULT_SETTINGS = "Go with the defaults" #default : are we using default story config or asking the user
13
 
14
  '''
15
+ update settings
16
  '''
17
+ def save_settings(how_many_chunks, go_with_the_defaults, save_params_btn, go_btn, text_input, story_chunk, settings, story):
18
+
19
+ #save settings
20
+ settings["default_settings"] = go_with_the_defaults == "Go with the defaults"
21
+ settings["max_len"] = how_many_chunks + 1
22
+
23
+ #update ui
24
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), settings, story
25
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  demo = gr.Blocks()
28
 
 
44
  "full_story_text": ""
45
  })
46
 
47
+ settings = gr.State(value={
48
+ "max_len" : DEFAULT_STORY_LEN,
49
+ "default_settings": USE_DEFAULT_SETTINGS
50
+ })
51
+
52
+
53
  with gr.Row():
54
  gr.Markdown('''
55
  # HOMEROS
 
57
  This demo is exploring the future of interactive storytelling.
58
  It puts the user in charge and blurs the boundary between the reader and the author.
59
 
 
 
 
 
60
  ''')
61
 
62
  with gr.Row():
63
+ how_many_chunks = gr.Slider(
64
+ minimum = 1,
65
+ value = DEFAULT_STORY_LEN,
66
+ step = 1,
67
+ maximum = 10,
68
+ label = "How long would you like your story to be?",
69
+ interactive = True
70
+ )
71
+
72
+ with gr.Row():
73
+ go_with_defaults = gr.Radio(
74
+ label = "Would you like to go with the defaults or should the storyteller ask you about the details?",
75
+ value = USE_DEFAULT_SETTINGS,
76
+ choices = [
77
+ "Go with the defaults",
78
+ "I want full control"
79
+ ],
80
+ interactive = True
81
+ )
82
 
83
+
84
+ with gr.Row():
85
  text_input = gr.Textbox(
86
+ label="you say",
87
+ visible=False
88
  )
89
 
90
  with gr.Row():
91
  story_chunk = gr.Audio(
92
  label="storyteller says",
93
+ interactive=False,
94
+ autoplay=True,
95
+ visible=False
96
  )
97
 
98
  with gr.Row():
 
99
  go_btn = gr.Button(
100
+ value="Tell the story!",
101
+ visible=False
102
  )
103
 
104
  go_btn.click(
105
  do_homeros,
106
+ inputs=[text_input, story, settings],
107
+ outputs=[story_chunk, story, settings]
108
  )
109
 
110
+ with gr.Row():
111
+ save_params_btn = gr.Button("Save Settings")
112
+ save_params_btn.click(
113
+ save_settings,
114
+ inputs=[how_many_chunks, go_with_defaults, save_params_btn, go_btn, story_chunk, text_input, settings, story],
115
+ outputs=[how_many_chunks, go_with_defaults, save_params_btn, go_btn, story_chunk, text_input, settings, story]
116
+ )
117
+
118
  demo.queue(
119
  concurrency_count=5
120
  )