ka1kuk commited on
Commit
efbaaff
1 Parent(s): 2eb1363

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +10 -36
main.py CHANGED
@@ -1,7 +1,5 @@
1
  from fastapi import FastAPI
2
  from fastapi.middleware.cors import CORSMiddleware
3
- import asyncio
4
- from Linlada import Chatbot, ConversationStyle
5
  from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
6
  import torch
7
 
@@ -15,42 +13,18 @@ app.add_middleware(
15
  allow_credentials=True,
16
  )
17
 
18
- async def generate(prompt):
19
- bot = await Chatbot.create()
20
- result = await bot.ask(prompt=prompt, conversation_style=ConversationStyle.precise)
21
- return result
22
 
23
  def dummy(images, **kwargs):
24
  return images, False
25
 
26
- async def generate_image(prompt):
27
- model_id = "runwayml/stable-diffusion-v1-5"
28
- pipe = await StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
29
- pipe = pipe.to("cuda")
30
- pipe.safety_checker = dummy
31
- image = await pipe(prompt).images[0]
32
- return image
33
 
34
- @app.get("/")
35
- def read_root():
36
- return "Hello, I'm Linlada"
37
-
38
- @app.get("/test/{hello}")
39
- def hi(hello: str):
40
- return {"text": hello}
41
-
42
- @app.get("/image/{image}")
43
- def img(image: str):
44
- loop = asyncio.new_event_loop()
45
- asyncio.set_event_loop(loop)
46
- result = loop.run_until_complete(generate_image(image))
47
- loop.close()
48
- return result
49
-
50
- @app.get('/linlada/{prompt}')
51
- def generate_image_route(prompt: str):
52
- loop = asyncio.new_event_loop()
53
- asyncio.set_event_loop(loop)
54
- result = loop.run_until_complete(generate(prompt))
55
- loop.close()
56
- return result
 
1
  from fastapi import FastAPI
2
  from fastapi.middleware.cors import CORSMiddleware
 
 
3
  from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
4
  import torch
5
 
 
13
  allow_credentials=True,
14
  )
15
 
16
+ model_id = "runwayml/stable-diffusion-v1-5"
17
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
18
+ pipe = pipe.to("cuda")
 
19
 
20
  def dummy(images, **kwargs):
21
  return images, False
22
 
23
+ pipe.safety_checker = dummy
 
 
 
 
 
 
24
 
25
+ @app.get('/')
26
+ def generate_image():
27
+ prompt = request.args.get('prompt')
28
+ image = pipe(prompt).images[0]
29
+ # do something with the generated image
30
+ return image