3v324v23 commited on
Commit
c44af3b
โ€ข
1 Parent(s): 3b6cf03
.gitignore CHANGED
@@ -1,2 +1,4 @@
1
  flagged/
2
- .ipynb_checkpoints/
 
 
 
1
  flagged/
2
+ .ipynb_checkpoints
3
+ .ipynb_checkpoints
4
+ */.ipynb_checkpoints/*
.ipynb_checkpoints/.gitignore-checkpoint DELETED
@@ -1,2 +0,0 @@
1
- flagged/
2
- .ipynb_checkpoints/
 
 
 
.ipynb_checkpoints/README-checkpoint.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Stablepod
3
- emoji: ๐ŸŒ–
4
- colorFrom: pink
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.4
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/app-checkpoint.py DELETED
@@ -1,47 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from torch import autocast
4
- from diffusers import StableDiffusionPipeline
5
- import argparse
6
- from moviepy.editor import AudioFileClip, ImageClip
7
-
8
- parser = argparse.ArgumentParser()
9
- setshare = parser.add_argument('--setshare', default=True, action=argparse.BooleanOptionalAction)
10
-
11
- def process_inputs(prompt, audio):
12
- image = get_stable_diffusion_image(prompt)
13
- video = add_static_image_to_audio(image, audio)
14
- return video
15
-
16
-
17
-
18
- def add_static_image_to_audio(image, audio):
19
- """Create and save a video file to `output_path` after
20
- combining a static image that is located in `image_path`
21
- with an audio file in `audio_path`"""
22
- # create the audio clip object
23
- audio_clip = AudioFileClip(audio)
24
- # create the image clip object
25
- image_clip = ImageClip(image)
26
- # use set_audio method from image clip to combine the audio with the image
27
- video_clip = image_clip.set_audio(audio)
28
- # specify the duration of the new clip to be the duration of the audio clip
29
- video_clip.duration = audio.duration
30
- # set the FPS to 1
31
- video_clip.fps = 1
32
- # write the resuling video clip
33
- return video_clip
34
-
35
- def get_stable_diffusion_image(prompt):
36
- model_id = "CompVis/stable-diffusion-v1-4"
37
- device = "cuda"
38
- pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True)
39
- pipe = pipe.to(device)
40
- with autocast("cuda"):
41
- image = pipe(prompt, guidance_scale=7.5)["sample"][0]
42
- print(image)
43
- return image
44
-
45
-
46
- iface = gr.Interface(fn=process_inputs, inputs=["text", "audio"], outputs="video")
47
- iface.launch(share=setshare)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,12 +1,14 @@
1
  ---
2
  title: Stablepod
3
- emoji: ๐ŸŒ–
4
- colorFrom: pink
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 3.4
 
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
  ---
2
  title: Stablepod
3
+ emoji: ๐ŸŽผ
4
+ colorFrom: green
5
+ colorTo: blue
6
  sdk: gradio
7
  sdk_version: 3.4
8
+ python_version: 3.9.7
9
  app_file: app.py
10
  pinned: false
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+ s
app.py CHANGED
@@ -14,7 +14,6 @@ def process_inputs(prompt, audio):
14
  return video
15
 
16
 
17
-
18
  def add_static_image_to_audio(image, audio):
19
  """Create and save a video file to `output_path` after
20
  combining a static image that is located in `image_path`
 
14
  return video
15
 
16
 
 
17
  def add_static_image_to_audio(image, audio):
18
  """Create and save a video file to `output_path` after
19
  combining a static image that is located in `image_path`