Jofthomas HF staff commited on
Commit
dc130e9
β€’
1 Parent(s): ebd0b97

Update TextGen/router.py

Browse files
Files changed (1) hide show
  1. TextGen/router.py +8 -52
TextGen/router.py CHANGED
@@ -9,6 +9,7 @@ from fastapi.middleware.cors import CORSMiddleware
9
  from langchain.chains import LLMChain
10
  from langchain.prompts import PromptTemplate
11
  from TextGen.suno import custom_generate_audio, get_audio_information,generate_lyrics
 
12
  #from TextGen.diffusion import generate_image
13
  #from coqui import predict
14
  from langchain_google_genai import (
@@ -181,10 +182,12 @@ def story(prompt: Invoke):
181
  def placement(input: Rooms):
182
  print(input)
183
  markdown_map=generate_map_markdown(input)
184
- print(markdown_map)
185
- answer={
186
- "key":"value"
187
- }
 
 
188
  return answer
189
 
190
  #Dummy function for now
@@ -318,51 +321,4 @@ async def generate_song():
318
  # img_byte_arr = img_byte_arr.getvalue()
319
  #
320
  # Return the image as a PNG response
321
- # return Response(content=img_byte_arr, media_type="image/png")
322
-
323
- def generate_map_markdown(data):
324
- import numpy as np
325
-
326
- # Define the room structure with walls and markers
327
- def create_room(room_char):
328
- return [
329
- f"╔═══╗",
330
- f"β•‘ {room_char} β•‘",
331
- f"β•šβ•β•β•β•"
332
- ]
333
-
334
- # Extract rooms and rooms of interest
335
- rooms = [eval(room) for room in data["rooms"]]
336
- rooms_of_interest = [eval(room) for room in data["room_of_interest"]]
337
-
338
- # Determine grid size
339
- min_x = min(room[0] for room in rooms)
340
- max_x = max(room[0] for room in rooms)
341
- min_y = min(room[1] for room in rooms)
342
- max_y = max(room[1] for room in rooms)
343
-
344
- # Create grid with empty spaces represented by a room-like structure
345
- map_height = (max_y - min_y + 1) * 3
346
- map_width = (max_x - min_x + 1) * 5
347
- grid = np.full((map_height, map_width), " ")
348
-
349
- # Populate grid with rooms and their characteristics
350
- for i, room in enumerate(rooms):
351
- x, y = room
352
- x_offset = (x - min_x) * 5
353
- y_offset = (max_y - y) * 3
354
- if room == (0, 0):
355
- room_char = "X"
356
- elif room in rooms_of_interest:
357
- room_char = "P" if i == data["index_exit"] else "?"
358
- else:
359
- room_char = " "
360
- room_structure = create_room(room_char)
361
- for j, row in enumerate(room_structure):
362
- grid[y_offset + j, x_offset:x_offset + 5] = list(row)
363
-
364
- # Convert grid to a string format suitable for display
365
- markdown_map = "\n".join("".join(row) for row in grid)
366
-
367
- # Return the map wrapped in triple backticks for proper display in markdown
368
- return f"```\n{markdown_map}\n```"
 
9
  from langchain.chains import LLMChain
10
  from langchain.prompts import PromptTemplate
11
  from TextGen.suno import custom_generate_audio, get_audio_information,generate_lyrics
12
+ from TextGen.gemini import generate_story,place_objects,generate_map_markdown
13
  #from TextGen.diffusion import generate_image
14
  #from coqui import predict
15
  from langchain_google_genai import (
 
182
  def placement(input: Rooms):
183
  print(input)
184
  markdown_map=generate_map_markdown(input)
185
+ story=generate_story(input.possible_entities)
186
+ print(story)
187
+ placements=place_objects(input.possible_entities,story,markdown_map)
188
+ print(placements)
189
+ answer={"objective":story,
190
+ "placements":placements}
191
  return answer
192
 
193
  #Dummy function for now
 
321
  # img_byte_arr = img_byte_arr.getvalue()
322
  #
323
  # Return the image as a PNG response
324
+ # return Response(content=img_byte_arr, media_type="image/png")