JoPmt commited on
Commit
f6e4309
·
1 Parent(s): a16ce6e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +178 -0
app.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
2
+ import gradio as gr
3
+ from PIL import Image
4
+ import cv2
5
+ import os, random, gc
6
+ import numpy as np
7
+ from transformers import pipeline
8
+ import PIL.Image
9
+ from diffusers.utils import load_image, export_to_video
10
+ from accelerate import Accelerator
11
+ from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
12
+ import torch
13
+ from moviepy.video.fx.all import crop
14
+ from diffusers.utils import export_to_gif
15
+ import mediapy
16
+ from image_tools.sizes import resize_and_crop
17
+ from moviepy.editor import *
18
+ from pathlib import Path
19
+ from typing import Optional, List
20
+ from tqdm import tqdm
21
+ import supervision as sv
22
+
23
+
24
+ accelerator = Accelerator(cpu=True)
25
+ models =[
26
+ "runwayml/stable-diffusion-v1-5",
27
+ "prompthero/openjourney-v4",
28
+ "CompVis/stable-diffusion-v1-4",
29
+ "stabilityai/stable-diffusion-2-1",
30
+ "stablediffusionapi/edge-of-realism",
31
+ "wavymulder/lomo-diffusion",
32
+ "sd-dreambooth-library/fashion",
33
+ "DucHaiten/DucHaitenDreamWorld",
34
+ "kandinsky-community/kandinsky-2-1",
35
+ "MirageML/lowpoly-cyberpunk",
36
+ "plasmo/woolitize-768sd1-5",
37
+ "plasmo/food-crit",
38
+ "johnslegers/epic-diffusion-v1.1",
39
+ "Fictiverse/ElRisitas",
40
+ "robotjung/SemiRealMix",
41
+ "herpritts/FFXIV-Style",
42
+ "RayHell/popupBook-diffusion",
43
+ "deadman44/SD_Photoreal_Merged_Models",
44
+ "johnslegers/epic-diffusion",
45
+ "tilake/China-Chic-illustration",
46
+ "wavymulder/modelshoot",
47
+ "prompthero/openjourney-lora",
48
+ "Fictiverse/Stable_Diffusion_VoxelArt_Model",
49
+ "darkstorm2150/Protogen_v2.2_Official_Release",
50
+ "hassanblend/HassanBlend1.5.1.2",
51
+ "hassanblend/hassanblend1.4",
52
+ "nitrosocke/redshift-diffusion",
53
+ "prompthero/openjourney-v2",
54
+ "nitrosocke/Arcane-Diffusion",
55
+ "Lykon/DreamShaper",
56
+ "nitrosocke/mo-di-diffusion",
57
+ "dreamlike-art/dreamlike-diffusion-1.0",
58
+ "dreamlike-art/dreamlike-photoreal-2.0",
59
+ "digiplay/RealismEngine_v1",
60
+ "digiplay/AIGEN_v1.4_diffusers",
61
+ "stablediffusionapi/dreamshaper-v6",
62
+ "JackAnon/GorynichMix",
63
+ "p1atdev/liminal-space-diffusion",
64
+ "nadanainone/gigaschizonegs",
65
+ "darkVOYAGE/dvMJv4",
66
+ "digiplay/GhostMix",
67
+ "ThePioneer/MISA",
68
+ "TheLastBen/froggy-style-v21-768",
69
+ "FloydianSound/Nixeu_Diffusion_v1-5",
70
+ "kakaobrain/karlo-v1-alpha-image-variations",
71
+ "digiplay/PotoPhotoRealism_v1",
72
+ "ConsistentFactor/Aurora-By_Consistent_Factor",
73
+ "rim0/quadruped_mechas",
74
+ "Akumetsu971/SD_Samurai_Anime_Model",
75
+ "sd-dreambooth-library/original-character-cyclps",
76
+ "AIArtsChannel/steampunk-diffusion",
77
+ ]
78
+
79
+ controlnet = accelerator.prepare(ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32))
80
+ def plex(fpath, text, neg_prompt, modil, one, two, three, four, five):
81
+ gc.collect()
82
+ modal=""+modil+""
83
+ pipe = accelerator.prepare(StableDiffusionControlNetImg2ImgPipeline.from_pretrained(modal, controlnet=controlnet, torch_dtype=torch.float32, use_safetensors=False, safety_checker=None))
84
+ pipe.unet.to(memory_format=torch.channels_last)
85
+ pipe.scheduler = accelerator.prepare(DPMSolverMultistepScheduler.from_config(pipe.scheduler.config))
86
+ pipe = pipe.to("cpu")
87
+ prompt = text
88
+ video = './video.mp4'
89
+ orvid = './orvid.mp4'
90
+ canvid = './canvid.mp4'
91
+ frames = []
92
+ canframes = []
93
+ orframes = []
94
+ fin_frames = []
95
+ max_frames=0
96
+ cap = cv2.VideoCapture(fpath)
97
+ clip = VideoFileClip(fpath)
98
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
99
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
100
+ fps = cap.get(cv2.CAP_PROP_FPS)
101
+ aspect = width / height
102
+ if aspect == 1 and height >= 512:
103
+ nwidth = 512
104
+ nheight = 512
105
+ prep = clip.resize(height=nheight)
106
+ left = 0
107
+ top = 0
108
+ right = 512
109
+ bottom = 512
110
+ if aspect > 1 and height >= 512:
111
+ nheight = 512
112
+ nwidth = int(nheight * aspect)
113
+ prep = clip.resize(height=nheight)
114
+ left = (nwidth - width) / 2
115
+ top = 0
116
+ right = (nwidth + width) / 2
117
+ bottom = nheight
118
+ if aspect < 1 and width >= 512:
119
+ nwidth = 512
120
+ nheight = int(nwidth / aspect)
121
+ prep = clip.resize(height=nheight)
122
+ left = 0
123
+ top = (height - nheight) / 2
124
+ right = nwidth
125
+ bottom = (height + nheight) / 2
126
+ if aspect < 1 and width < 512:
127
+ return None
128
+ if aspect > 1 and height < 512:
129
+ return None
130
+ closer = crop(clip, x1=left, y1=top, x2=right, y2=bottom)
131
+ if fps > 33:
132
+ closer.write_videofile('./video.mp4', fps=33)
133
+ fps = 33
134
+ else:
135
+ closer.write_videofile('./video.mp4', fps=fps)
136
+ fps = fps
137
+ max_frames = int(fps * 4)
138
+ for frame in tqdm(sv.get_video_frames_generator(source_path=video,)):
139
+ frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
140
+ cap.release()
141
+ cv2.destroyAllWindows()
142
+ ncap = cv2.VideoCapture(video)
143
+ total_frames = int(ncap.get(cv2.CAP_PROP_FRAME_COUNT))
144
+ if total_frames <= 0:
145
+ return None
146
+ b = 0
147
+ if total_frames > max_frames:
148
+ max_frames = int(max_frames - 1)
149
+ if total_frames < max_frames:
150
+ max_frames = int(total_frames - 1)
151
+ for b in range(int(max_frames)):
152
+ frame = frames[b]
153
+ original = load_image(Image.fromarray(frame))
154
+ original.save('./image.png', 'PNG')
155
+ original = original.resize((512, 512))
156
+ original = original.convert("RGB")
157
+ original.save('./image.png', 'PNG')
158
+ orframes.append(original)
159
+ cannyimage = np.array(original)
160
+ cannyimage = cv2.Canny(cannyimage, 100, 200)
161
+ cannyimage = cannyimage[:, :, None]
162
+ cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2)
163
+ cannyimage = Image.fromarray(cannyimage)
164
+ canframes.append(cannyimage)
165
+ generator = torch.Generator(device="cpu").manual_seed(five)
166
+ imoge = pipe(prompt=prompt,image=[original],control_image=[cannyimage],guidance_scale=four,num_inference_steps=one,generator=generator,strength=two,negative_prompt=neg_prompt,controlnet_conditioning_scale=three,width=512,height=512)
167
+ fin_frames.append(imoge.images[0])
168
+ b += 1
169
+ ncap.release()
170
+ cv2.destroyAllWindows()
171
+ export_to_video(fin_frames, video, fps=fps)
172
+ export_to_video(orframes, orvid, fps=fps)
173
+ export_to_video(canframes, canvid, fps=fps)
174
+ return video, canvid, orvid
175
+
176
+ iface = gr.Interface(fn=plex, inputs=[gr.File(label="Your video",interactive=True),gr.Textbox(label="prompt"),gr.Textbox(label="neg prompt"),gr.Dropdown(choices=models, label="some sd models", value=models[0], type="value"), gr.Slider(label="num inference steps", minimum=1, step=1, maximum=10, value=5), gr.Slider(label="prompt strength", minimum=0.01, step=0.01, maximum=20.00, value=5.00), gr.Slider(label="controlnet scale", minimum=0.01, step=0.01, maximum=1.00, value=0.80), gr.Slider(label="Guidance scale", minimum=0.01, step=0.01, maximum=10.00, value=2.00), gr.Slider(label="Number of permutations", minimum=1, step=32, maximum=4836928, value=1)], outputs=[gr.Video(label="final"), gr.Video(label="canny vid"), gr.Video(label="orig")],description="Running on cpu, very slow! by JoPmt.")
177
+ iface.queue(max_size=1,api_open=False)
178
+ iface.launch(max_threads=1)