Jankidepala commited on
Commit
bb08efb
1 Parent(s): 7578128

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +127 -1
app.py CHANGED
@@ -1,3 +1,129 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
2
 
3
- gr.load("models/pentagoniac/SEMIKONG-8b-GPTQ").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
+ import io, base64
4
+ from PIL import Image
5
+ import numpy as np
6
+ import tensorflow as tf
7
+ import mediapy
8
+ import os
9
+ import sys
10
+ from huggingface_hub import snapshot_download
11
 
12
+ # 1. GPT-J: Story Generation Pipeline
13
+ story_gen = pipeline("text-generation", "pranavpsv/gpt2-genre-story-generator")
14
+
15
+ # 2. LatentDiffusion: Latent Diffusion Interface
16
+ image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion")
17
+
18
+ # 3. FILM: Frame Interpolation Model (code re-use from spaces/akhaliq/frame-interpolation/tree/main)
19
+ os.system("git clone https://github.com/google-research/frame-interpolation")
20
+ sys.path.append("frame-interpolation")
21
+ from eval import interpolator, util
22
+
23
+ ffmpeg_path = util.get_ffmpeg_path()
24
+ mediapy.set_ffmpeg(ffmpeg_path)
25
+
26
+ model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
27
+ interpolator = interpolator.Interpolator(model, None)
28
+
29
+ def generate_story(choice, input_text):
30
+ query = "<BOS> <{0}> {1}".format(choice, input_text)
31
+
32
+ print(query)
33
+ generated_text = story_gen(query)
34
+ generated_text = generated_text[0]['generated_text']
35
+ generated_text = generated_text.split('> ')[2]
36
+
37
+ return generated_text
38
+
39
+ def generate_images(generated_text):
40
+ steps=50
41
+ width=256
42
+ height=256
43
+ num_images=4
44
+ diversity=6
45
+ image_bytes = image_gen(generated_text, steps, width, height, num_images, diversity)
46
+
47
+ # Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py
48
+ generated_images = []
49
+ for image in image_bytes[1]:
50
+ image_str = image[0]
51
+ image_str = image_str.replace("data:image/png;base64,","")
52
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
53
+ img = Image.open(io.BytesIO(decoded_bytes))
54
+ generated_images.append(img)
55
+
56
+ return generated_images
57
+
58
+ def generate_interpolation(gallery):
59
+ times_to_interpolate = 4
60
+
61
+ generated_images = []
62
+ for image_str in gallery:
63
+ image_str = image_str.replace("data:image/png;base64,","")
64
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
65
+ img = Image.open(io.BytesIO(decoded_bytes))
66
+ generated_images.append(img)
67
+
68
+ generated_images[0].save('frame_0.png')
69
+ generated_images[1].save('frame_1.png')
70
+ generated_images[2].save('frame_2.png')
71
+ generated_images[3].save('frame_3.png')
72
+
73
+ input_frames = ["frame_0.png", "frame_1.png", "frame_2.png", "frame_3.png"]
74
+
75
+ frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator))
76
+
77
+ mediapy.write_video("out.mp4", frames, fps=15)
78
+
79
+ return "out.mp4"
80
+
81
+
82
+
83
+ demo = gr.Blocks()
84
+
85
+ with demo:
86
+ with gr.Row():
87
+
88
+ # Left column (inputs)
89
+ with gr.Column():
90
+ input_story_type = gr.Radio(choices=['superhero', 'action', 'drama', 'horror', 'thriller', 'sci_fi'], value='sci_fi', label="Genre")
91
+ input_start_text = gr.Textbox(placeholder='A teddy bear outer space', label="Starting Text")
92
+
93
+ gr.Markdown("Be sure to run each of the buttons one at a time, they depend on each others' outputs!")
94
+
95
+ # Rows of instructions & buttons
96
+ with gr.Row():
97
+ gr.Markdown("1. Select a type of story, then write some starting text! Then hit the 'Generate Story' button to generate a story! Feel free to edit the generated story afterwards!")
98
+ button_gen_story = gr.Button("Generate Story")
99
+ with gr.Row():
100
+ gr.Markdown("2. After generating a story, hit the 'Generate Images' button to create some visuals for your story! (Can re-run multiple times!)")
101
+ button_gen_images = gr.Button("Generate Images")
102
+ with gr.Row():
103
+ gr.Markdown("3. After generating some images, hit the 'Generate Video' button to create a short video by interpolating the previously generated visuals!")
104
+ button_gen_video = gr.Button("Generate Video")
105
+
106
+ # Rows of references
107
+ with gr.Row():
108
+ gr.Markdown("--Models Used--")
109
+ with gr.Row():
110
+ gr.Markdown("Story Generation: [GPT-J](https://huggingface.co/pranavpsv/gpt2-genre-story-generator)")
111
+ with gr.Row():
112
+ gr.Markdown("Image Generation Conditioned on Text: [Latent Diffusion](https://huggingface.co/spaces/multimodalart/latentdiffusion) | [Github Repo](https://github.com/CompVis/latent-diffusion)")
113
+ with gr.Row():
114
+ gr.Markdown("Interpolations: [FILM](https://huggingface.co/spaces/akhaliq/frame-interpolation) | [Github Repo](https://github.com/google-research/frame-interpolation)")
115
+ with gr.Row():
116
+ gr.Markdown("![visitor badge](https://visitor-badge.glitch.me/badge?page_id=gradio-blocks_story_and_video_generation)")
117
+
118
+ # Right column (outputs)
119
+ with gr.Column():
120
+ output_generated_story = gr.Textbox(label="Generated Story")
121
+ output_gallery = gr.Gallery(label="Generated Story Images")
122
+ output_interpolation = gr.Video(label="Generated Video")
123
+
124
+ # Bind functions to buttons
125
+ button_gen_story.click(fn=generate_story, inputs=[input_story_type , input_start_text], outputs=output_generated_story)
126
+ button_gen_images.click(fn=generate_images, inputs=output_generated_story, outputs=output_gallery)
127
+ button_gen_video.click(fn=generate_interpolation, inputs=output_gallery, outputs=output_interpolation)
128
+
129
+ demo.launch(debug=True, enable_queue=True)