from fastapi import FastAPI, File, UploadFile, Form from huggingface_hub import InferenceClient import uvicorn from fastapi.responses import StreamingResponse import io import base64 app = FastAPI() client = InferenceClient("stabilityai/stable-video-diffusion-img2vid-xt-1-1-tensorrt") @app.post("/generate_video/") async def generate_video_api( file: UploadFile = File(...), fps: int = Form(7), num_frames: int = Form(14), motion_bucket_id: int = Form(127), cond_aug: float = Form(0.02), seed: int = Form(0) ): # Read the uploaded image file image_content = await file.read() # Encode the image to base64 image_base64 = base64.b64encode(image_content).decode('utf-8') # Generate the video video = client.image_to_video( image=image_base64, fps=fps, num_frames=num_frames, motion_bucket_id=motion_bucket_id, cond_aug=cond_aug, seed=seed ) # Return the video as a streaming response return StreamingResponse(io.BytesIO(video), media_type="video/mp4") if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)