Epoching commited on
Commit
e4ae5c8
β€’
1 Parent(s): 88460b6
Files changed (3) hide show
  1. app.py +120 -0
  2. packages.txt +1 -0
  3. requirements.txt +16 -0
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ import io, base64
4
+ from PIL import Image
5
+
6
+ import numpy as np
7
+ import tensorflow as tf
8
+ import mediapy
9
+
10
+ import os
11
+ os.system("git clone https://github.com/google-research/frame-interpolation")
12
+ import sys
13
+ sys.path.append("frame-interpolation")
14
+ from eval import interpolator, util
15
+
16
+ from huggingface_hub import snapshot_download
17
+
18
+ ffmpeg_path = util.get_ffmpeg_path()
19
+ mediapy.set_ffmpeg(ffmpeg_path)
20
+
21
+ story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
22
+ image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
23
+
24
+ # spaces/akhaliq/frame-interpolation/tree/main
25
+ model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
26
+ interpolator = interpolator.Interpolator(model, None)
27
+
28
+ def generate_story(choice, input_text):
29
+ print(choice)
30
+ print(input_text)
31
+
32
+ query = "<BOS> <{0}> {1}".format(choice, input_text)
33
+
34
+ print(query)
35
+ generated_text = story_gen(query)
36
+ generated_text = generated_text[0]['generated_text']
37
+ generated_text = generated_text.split('> ')[2]
38
+
39
+ return generated_text
40
+
41
+ def generate_images(generated_text):
42
+ steps=45
43
+ width=256
44
+ height=256
45
+ num_images=4
46
+ diversity=6
47
+ image_bytes = image_gen(generated_text, steps, width, height, num_images, diversity)
48
+
49
+ # Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py
50
+ print(len(image_bytes))
51
+ generated_images = []
52
+ for image in image_bytes[1]:
53
+ image_str = image[0]
54
+ image_str = image_str.replace("data:image/png;base64,","")
55
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
56
+ img = Image.open(io.BytesIO(decoded_bytes))
57
+ generated_images.append(img)
58
+
59
+ return generated_images
60
+
61
+ def generate_interpolation(gallery):
62
+ times_to_interpolate = 4
63
+
64
+ generated_images = []
65
+ for image_str in gallery:
66
+ image_str = image_str.replace("data:image/png;base64,","")
67
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
68
+ img = Image.open(io.BytesIO(decoded_bytes))
69
+ generated_images.append(img)
70
+
71
+ generated_images[0].save('frame_0.png')
72
+ generated_images[1].save('frame_1.png')
73
+ generated_images[2].save('frame_2.png')
74
+ generated_images[3].save('frame_3.png')
75
+
76
+ input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"]
77
+
78
+ frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator))
79
+
80
+ mediapy.write_video("out.mp4", frames, fps=15)
81
+
82
+ return "out.mp4"
83
+
84
+
85
+
86
+ demo = gr.Blocks()
87
+
88
+ with demo:
89
+ with gr.Row():
90
+
91
+ # Left column (inputs)
92
+ with gr.Column():
93
+ input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], label="Genre")
94
+ input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
95
+
96
+ gr.Markdown("Be sure to run each of the buttons one at a time, they depend on each others' outputs!")
97
+
98
+ # Rows of instructions & buttons
99
+ with gr.Row():
100
+ gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story!")
101
+ button_gen_story = gr.Button("Generate Story")
102
+ with gr.Row():
103
+ gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
104
+ button_gen_images = gr.Button("Generate Images")
105
+ with gr.Row():
106
+ gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
107
+ button_gen_video = gr.Button("Generate Video")
108
+
109
+ # Right column (outputs)
110
+ with gr.Column():
111
+ output_generated_story = gr.Textbox(label="Generated Story")
112
+ output_gallery = gr.Gallery(label="Generated Story Images")
113
+ output_interpolation = gr.Video(label="Generated Video")
114
+
115
+ # Bind functions to buttons
116
+ button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
117
+ button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery)
118
+ button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation)
119
+
120
+ demo.launch(debug=True)
packages.txt ADDED
@@ -0,0 +1 @@
 
1
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+
4
+ # spaces/akhaliq/frame-interpolation/blob/main/requirements.txt
5
+ tensorflow==2.6.2 # The latest should include tensorflow-gpu
6
+ tensorflow-datasets==4.4.0
7
+ tensorflow-addons==0.15.0
8
+ absl-py==0.12.0
9
+ gin-config==0.5.0
10
+ parameterized==0.8.1
11
+ mediapy==1.0.3
12
+ scikit-image==0.19.1
13
+ apache-beam==2.34.0
14
+ google-cloud-bigquery-storage==1.1.0 # Suppresses a harmless error from beam
15
+ natsort==8.1.0
16
+ image-tools