umang-immersfy commited on
Commit
6e0fda9
·
1 Parent(s): 5c88e58

regeneratio added, WIP

Browse files
__pycache__/core.cpython-310.pyc CHANGED
Binary files a/__pycache__/core.cpython-310.pyc and b/__pycache__/core.cpython-310.pyc differ
 
__pycache__/inout.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
__pycache__/openai_wrapper.cpython-310.pyc CHANGED
Binary files a/__pycache__/openai_wrapper.cpython-310.pyc and b/__pycache__/openai_wrapper.cpython-310.pyc differ
 
__pycache__/parameters.cpython-310.pyc CHANGED
Binary files a/__pycache__/parameters.cpython-310.pyc and b/__pycache__/parameters.cpython-310.pyc differ
 
app.py CHANGED
@@ -5,11 +5,13 @@ import parameters
5
 
6
  ############################################ LAYOUT ############################################
7
  with gr.Blocks() as demo:
 
8
  selected_image = gr.State(None)
9
  current_episode = gr.State(-1)
10
  current_scene = gr.State(-1)
11
  current_frame = gr.State(-1)
12
  episodes_data = gr.State({})
 
13
  current_frame_data = gr.State(None)
14
 
15
  with gr.Row():
@@ -57,23 +59,19 @@ with gr.Blocks() as demo:
57
  with gr.Column():
58
  gr.Markdown("## Composition #1")
59
  prompt_1 = gr.TextArea(label="Image Prompt")
60
- # shot_1 = gr.Textbox(label="Shot Type")
61
  seed_1 = gr.Textbox(label="Generation Seed")
62
  with gr.Column():
63
  gr.Markdown("## Composition #2")
64
  prompt_2 = gr.TextArea(label="Image Prompt")
65
- # shot_2 = gr.Textbox(label="Shot Type")
66
  seed_2 = gr.Textbox(label="Generation Seed")
67
  with gr.Row():
68
  with gr.Column():
69
  gr.Markdown("## Composition #3")
70
  prompt_3 = gr.TextArea(label="Image Prompt")
71
- # shot_3 = gr.Textbox(label="Shot Type")
72
  seed_3 = gr.Textbox(label="Generation Seed")
73
  with gr.Column():
74
  gr.Markdown("## Composition #4")
75
  prompt_4 = gr.TextArea(label="Image Prompt")
76
- # shot_4 = gr.Textbox(label="Shot Type")
77
  seed_4 = gr.Textbox(label="Generation Seed")
78
  regenerate_comps_btn = gr.Button(value="Regenerate Compositions")
79
 
@@ -99,7 +97,8 @@ with gr.Blocks() as demo:
99
  episode_dropdown,
100
  frame_dropdown,
101
  episodes_data,
102
- developer,
 
103
  ],
104
  )
105
 
@@ -239,12 +238,31 @@ with gr.Blocks() as demo:
239
  current_scene,
240
  current_frame,
241
  episodes_data
242
- ],
243
- outputs=[]
 
 
 
 
 
 
 
 
 
244
  )
245
  regenerate_btn.click(
246
  core.regenerate_data,
247
- inputs=[],
 
 
 
 
 
 
 
 
 
 
248
  outputs=[]
249
  )
250
 
 
5
 
6
  ############################################ LAYOUT ############################################
7
  with gr.Blocks() as demo:
8
+ # ... [previous state variables remain the same] ...
9
  selected_image = gr.State(None)
10
  current_episode = gr.State(-1)
11
  current_scene = gr.State(-1)
12
  current_frame = gr.State(-1)
13
  episodes_data = gr.State({})
14
+ character_data = gr.State({})
15
  current_frame_data = gr.State(None)
16
 
17
  with gr.Row():
 
59
  with gr.Column():
60
  gr.Markdown("## Composition #1")
61
  prompt_1 = gr.TextArea(label="Image Prompt")
 
62
  seed_1 = gr.Textbox(label="Generation Seed")
63
  with gr.Column():
64
  gr.Markdown("## Composition #2")
65
  prompt_2 = gr.TextArea(label="Image Prompt")
 
66
  seed_2 = gr.Textbox(label="Generation Seed")
67
  with gr.Row():
68
  with gr.Column():
69
  gr.Markdown("## Composition #3")
70
  prompt_3 = gr.TextArea(label="Image Prompt")
 
71
  seed_3 = gr.Textbox(label="Generation Seed")
72
  with gr.Column():
73
  gr.Markdown("## Composition #4")
74
  prompt_4 = gr.TextArea(label="Image Prompt")
 
75
  seed_4 = gr.Textbox(label="Generation Seed")
76
  regenerate_comps_btn = gr.Button(value="Regenerate Compositions")
77
 
 
97
  episode_dropdown,
98
  frame_dropdown,
99
  episodes_data,
100
+ character_data,
101
+ developer
102
  ],
103
  )
104
 
 
238
  current_scene,
239
  current_frame,
240
  episodes_data
241
+ ],
242
+ outputs=[
243
+ prompt_1,
244
+ seed_1,
245
+ prompt_2,
246
+ seed_2,
247
+ prompt_3,
248
+ seed_3,
249
+ prompt_4,
250
+ seed_4
251
+ ]
252
  )
253
  regenerate_btn.click(
254
  core.regenerate_data,
255
+ inputs=[
256
+ comic_id,
257
+ current_episode,
258
+ current_scene,
259
+ current_frame,
260
+ episodes_data,
261
+ character_data,
262
+ visual_style,
263
+ height,
264
+ width
265
+ ],
266
  outputs=[]
267
  )
268
 
core.py CHANGED
@@ -10,7 +10,7 @@ import base64
10
  import aws_utils
11
  import parameters
12
  import script_gen
13
- import io as iowrapper
14
  import openai_wrapper
15
 
16
  AWS_BUCKET = parameters.AWS_BUCKET
@@ -57,11 +57,11 @@ def load_data_inner(
57
  ):
58
  images = []
59
  curr_frame = episodes_data[current_episode][current_frame]
 
60
  # Loading the 0th frame of 0th scene in 0th episode.
61
  for comp in curr_frame.compositions:
62
  data = aws_utils.fetch_from_s3(comp.image)
63
  images.append(Image.open(io.BytesIO(data)))
64
-
65
  return (
66
  images,
67
  episodes_data,
@@ -91,6 +91,12 @@ def load_metadata_fn(comic_id: str):
91
  print(f"Getting episodes for comic id: {comic_id}")
92
  episodes_data = {}
93
  episode_idx = []
 
 
 
 
 
 
94
  for folder in list_current_dir(AWS_BUCKET, f"{comic_id}/episodes/"):
95
  if "episode" in folder:
96
  json_path = f"s3://{AWS_BUCKET}/{folder}episode.json"
@@ -124,6 +130,7 @@ def load_metadata_fn(comic_id: str):
124
  choices=range(len(episodes_data[current_episode])), value=current_frame
125
  ),
126
  episodes_data,
 
127
  gr.Checkbox(visible=True),
128
  )
129
 
@@ -223,11 +230,10 @@ def regenerate_composition_data(
223
  print(
224
  f"Generating compositions for episode: {current_episode} and scene: {current_scene} and frame: {current_frame}."
225
  )
226
-
227
  # Retrieve the current frame data
228
  frame = episodes_data[current_episode][current_frame]
229
- print(frame)
230
-
231
  # Generate the prompt for compositions
232
  prompt_dict = {
233
  "system": script_gen.generate_image_compositions_instruction,
@@ -235,55 +241,89 @@ def regenerate_composition_data(
235
  source=script_gen.generate_image_compositions_user_prompt
236
  ).render(
237
  {
238
- "FRAME": frame,
 
 
 
 
 
 
 
 
239
  }
240
  ),
241
  }
242
-
243
- # Generate compositions using LLM
244
  compositions = llm.generate_valid_json_response(prompt_dict)
245
- print(compositions)
246
-
247
  # Update frame with new compositions
248
  frame.compositions = [
249
  Composition(
250
  **composition,
251
- seed="", # Set default empty seed
252
- image="" # Set default empty image
253
- ) # Create a Composition dataclass from the response
254
- for composition in compositions["compositions"]
 
 
255
  ]
256
-
257
  # Update the episodes_data dictionary with the modified frame
258
  episodes_data[current_episode][current_frame] = frame
259
- print(f"Updated frame {current_frame} in episode {current_episode} with new compositions.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
 
261
 
262
  def regenerate_data(
263
- frame_data: ComicFrame,
 
 
 
 
 
 
 
 
264
  ):
265
- pass
266
- # # for
267
- # payload = {
268
- # "prompt": composition.prompt,
269
- # "characters": related_chars,
270
- # "parameters": {
271
- # "height": parameters.IMG_HEIGHT,
272
- # "width": parameters.IMG_WIDTH,
273
- # "visual_style": visual_style,
274
- # "seed": seed_val,
275
- # },
276
- # }
277
-
278
- # data = iowrapper.get_valid_post_response(
279
- # url=parameters.MODEL_SERVER_URL + "/generate_image",
280
- # payload=payload,
281
- # )
282
- # image_data = io.BytesIO(base64.b64decode(data["image"]))
283
- # path = aws_utils.save_to_s3(
284
- # parameters.AWS_BUCKET,
285
- # f"{self.id}/episodes/episode-{episode_num}/compositions/scene-{scene_num}/frame-{frame_num}",
286
- # image_data,
287
- # f"{num}.jpg",
288
- # )
 
 
 
 
 
289
  # pass
 
10
  import aws_utils
11
  import parameters
12
  import script_gen
13
+ import inout as iowrapper
14
  import openai_wrapper
15
 
16
  AWS_BUCKET = parameters.AWS_BUCKET
 
57
  ):
58
  images = []
59
  curr_frame = episodes_data[current_episode][current_frame]
60
+ print(episodes_data[current_episode][current_frame])
61
  # Loading the 0th frame of 0th scene in 0th episode.
62
  for comp in curr_frame.compositions:
63
  data = aws_utils.fetch_from_s3(comp.image)
64
  images.append(Image.open(io.BytesIO(data)))
 
65
  return (
66
  images,
67
  episodes_data,
 
91
  print(f"Getting episodes for comic id: {comic_id}")
92
  episodes_data = {}
93
  episode_idx = []
94
+ character_data = {}
95
+ character_path = f"s3://blix-demo-v0/{comic_id}/characters/characters.json"
96
+ char_data = eval(aws_utils.fetch_from_s3(source=character_path).decode("utf-8"))
97
+ for name, char in char_data.items():
98
+ character_data[name] = char["profile_image"]
99
+ print(character_data)
100
  for folder in list_current_dir(AWS_BUCKET, f"{comic_id}/episodes/"):
101
  if "episode" in folder:
102
  json_path = f"s3://{AWS_BUCKET}/{folder}episode.json"
 
130
  choices=range(len(episodes_data[current_episode])), value=current_frame
131
  ),
132
  episodes_data,
133
+ character_data,
134
  gr.Checkbox(visible=True),
135
  )
136
 
 
230
  print(
231
  f"Generating compositions for episode: {current_episode} and scene: {current_scene} and frame: {current_frame}."
232
  )
233
+
234
  # Retrieve the current frame data
235
  frame = episodes_data[current_episode][current_frame]
236
+
 
237
  # Generate the prompt for compositions
238
  prompt_dict = {
239
  "system": script_gen.generate_image_compositions_instruction,
 
241
  source=script_gen.generate_image_compositions_user_prompt
242
  ).render(
243
  {
244
+ "FRAME": {
245
+ "description": frame.description,
246
+ "narration": frame.narration,
247
+ "character_dilouge": frame.character_dilouge,
248
+ "character": frame.character,
249
+ "location": frame.location,
250
+ "setting": frame.setting,
251
+ "all_characters": frame.all_characters,
252
+ },
253
  }
254
  ),
255
  }
256
+ # Generate composition s using LLM
 
257
  compositions = llm.generate_valid_json_response(prompt_dict)
 
 
258
  # Update frame with new compositions
259
  frame.compositions = [
260
  Composition(
261
  **composition,
262
+ seed=frame.compositions[idx].seed if idx < len(frame.compositions) else "",
263
+ image=(
264
+ frame.compositions[idx].image if idx < len(frame.compositions) else ""
265
+ ),
266
+ )
267
+ for idx, composition in enumerate(compositions["compositions"])
268
  ]
269
+
270
  # Update the episodes_data dictionary with the modified frame
271
  episodes_data[current_episode][current_frame] = frame
272
+ print(
273
+ f"Updated frame {current_frame} in episode {current_episode} with new compositions."
274
+ )
275
+
276
+ # Return the updated composition values for the UI
277
+ return [
278
+ frame.compositions[0].prompt,
279
+ frame.compositions[0].seed,
280
+ frame.compositions[1].prompt,
281
+ frame.compositions[1].seed,
282
+ frame.compositions[2].prompt,
283
+ frame.compositions[2].seed,
284
+ frame.compositions[3].prompt,
285
+ frame.compositions[3].seed,
286
+ ]
287
 
288
 
289
  def regenerate_data(
290
+ comic_id,
291
+ current_episode,
292
+ current_scene,
293
+ current_frame,
294
+ episodes_data,
295
+ character_data,
296
+ visual_style,
297
+ height,
298
+ width,
299
  ):
300
+
301
+ frame = episodes_data[current_episode][current_frame]
302
+ related_chars = [character_data[ch] for ch in frame.all_characters]
303
+ for i, composition in enumerate(frame.compositions):
304
+ payload = {
305
+ "prompt": composition.prompt,
306
+ "characters": related_chars,
307
+ "parameters": {
308
+ "height": height,
309
+ "width": width,
310
+ "visual_style": visual_style,
311
+ "seed": composition.seed,
312
+ },
313
+ }
314
+
315
+ data = iowrapper.get_valid_post_response(
316
+ url="http://10.100.111.13:4389/generate_image",
317
+ payload=payload,
318
+ )
319
+ image_data = io.BytesIO(base64.b64decode(data["image"]))
320
+ path = aws_utils.save_to_s3(
321
+ parameters.AWS_BUCKET,
322
+ f"{comic_id}/episodes/episode-{current_episode}/compositions/scene-{0}/frame-{current_frame}",
323
+ image_data,
324
+ f"{i}.jpg",
325
+ )
326
+ load_data_inner(episodes_data, current_episode, current_frame, is_developer=True)
327
+
328
+
329
  # pass
io.py → inout.py RENAMED
@@ -33,7 +33,7 @@ def get_valid_post_response(url: str, payload: dict) -> Dict[str, Any]:
33
  Returns:
34
  Dict[str, Any]: The JSON response from the request.
35
  """
36
- for _ in range(parameters.MAX_TRIES):
37
  try:
38
  response, status_code = send_post_request(url, payload)
39
  if status_code != 200:
 
33
  Returns:
34
  Dict[str, Any]: The JSON response from the request.
35
  """
36
+ for _ in range(int(parameters.MAX_TRIES)):
37
  try:
38
  response, status_code = send_post_request(url, payload)
39
  if status_code != 200:
openai_wrapper.py CHANGED
@@ -67,7 +67,7 @@ class OpenAIModel(abc.ABC):
67
  self,
68
  prompt_dict: Mapping[str, str],
69
  max_output_tokens: int = None,
70
- temperature: int = 0.6,
71
  ) -> str:
72
  """Generate a response with retries, returning a valid JSON."""
73
  for _ in range(int(parameters.MAX_TRIES)):
 
67
  self,
68
  prompt_dict: Mapping[str, str],
69
  max_output_tokens: int = None,
70
+ temperature: int = 0.7,
71
  ) -> str:
72
  """Generate a response with retries, returning a valid JSON."""
73
  for _ in range(int(parameters.MAX_TRIES)):
parameters.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
- from dotenv import load_dotenv
3
 
4
- load_dotenv()
5
 
6
 
7
  AWS_BUCKET = os.getenv("AWS_BUCKET")
 
1
  import os
2
+ # from dotenv import load_dotenv
3
 
4
+ # load_dotenv()
5
 
6
 
7
  AWS_BUCKET = os.getenv("AWS_BUCKET")