Jamiiwej2903 commited on
Commit
f566057
1 Parent(s): e593310

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +26 -28
main.py CHANGED
@@ -1,44 +1,42 @@
1
- from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from huggingface_hub import InferenceClient
4
  import uvicorn
5
  from fastapi.responses import StreamingResponse
6
  import io
 
7
 
8
  app = FastAPI()
9
 
10
- client = InferenceClient("stabilityai/stable-diffusion-2-1")
11
 
12
  class Item(BaseModel):
13
- prompt: str
14
- negative_prompt: str = ""
15
- num_inference_steps: int = 50
16
- guidance_scale: float = 7.5
17
- width: int = 512
18
- height: int = 512
19
 
20
- def generate_image(item: Item):
21
- image = client.text_to_image(
22
- prompt=item.prompt,
23
- negative_prompt=item.negative_prompt,
24
- num_inference_steps=item.num_inference_steps,
25
- guidance_scale=item.guidance_scale,
26
- width=item.width,
27
- height=item.height,
28
- )
29
- return image
30
-
31
- @app.post("/generate_image/")
32
- async def generate_image_api(item: Item):
33
- image = generate_image(item)
34
 
35
- # Convert the image to a byte stream
36
- img_byte_arr = io.BytesIO()
37
- image.save(img_byte_arr, format='PNG')
38
- img_byte_arr.seek(0)
 
 
 
 
 
 
 
 
39
 
40
- # Return the image as a streaming response
41
- return StreamingResponse(img_byte_arr, media_type="image/png")
42
 
43
  if __name__ == "__main__":
44
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ from fastapi import FastAPI, File, UploadFile
2
  from pydantic import BaseModel
3
  from huggingface_hub import InferenceClient
4
  import uvicorn
5
  from fastapi.responses import StreamingResponse
6
  import io
7
+ import base64
8
 
9
  app = FastAPI()
10
 
11
+ client = InferenceClient("stabilityai/stable-video-diffusion-img2vid-xt-1-1-tensorrt")
12
 
13
  class Item(BaseModel):
14
+ fps: int = 7
15
+ num_frames: int = 14
16
+ motion_bucket_id: int = 127
17
+ cond_aug: float = 0.02
18
+ seed: int = 0
 
19
 
20
+ @app.post("/generate_video/")
21
+ async def generate_video_api(item: Item, file: UploadFile = File(...)):
22
+ # Read the uploaded image file
23
+ image_content = await file.read()
 
 
 
 
 
 
 
 
 
 
24
 
25
+ # Encode the image to base64
26
+ image_base64 = base64.b64encode(image_content).decode('utf-8')
27
+
28
+ # Generate the video
29
+ video = client.image_to_video(
30
+ image=image_base64,
31
+ fps=item.fps,
32
+ num_frames=item.num_frames,
33
+ motion_bucket_id=item.motion_bucket_id,
34
+ cond_aug=item.cond_aug,
35
+ seed=item.seed
36
+ )
37
 
38
+ # Return the video as a streaming response
39
+ return StreamingResponse(io.BytesIO(video), media_type="video/mp4")
40
 
41
  if __name__ == "__main__":
42
  uvicorn.run(app, host="0.0.0.0", port=7860)