File size: 5,389 Bytes
76ce7ec
 
 
 
 
 
 
 
 
 
e2153ed
76ce7ec
 
 
 
 
 
 
 
 
 
 
 
d78a4ca
 
76ce7ec
 
1afcad4
76ce7ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
736c07e
 
 
76ce7ec
 
 
736c07e
76ce7ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d497512
76ce7ec
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
import gradio as gr
from PIL import Image
import cv2
import os, random, gc
import numpy as np
from transformers import pipeline
import PIL.Image
from diffusers.utils import load_image, export_to_video
from accelerate import Accelerator
from diffusers import StableDiffusionXLControlNetImg2ImgPipeline, ControlNetModel, EulerDiscreteScheduler
import torch
from moviepy.video.fx.all import crop
from diffusers.utils import export_to_gif
import mediapy
from image_tools.sizes import resize_and_crop
from moviepy.editor import *
from pathlib import Path
from typing import Optional, List
from tqdm import tqdm
import supervision as sv

accelerator = Accelerator(cpu=True)
controlnet = accelerator.prepare(ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.bfloat16, use_safetensors=True, variant="fp16", safety_checker=None))
pipe = accelerator.prepare(StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained("stabilityai/sdxl-turbo", controlnet=controlnet, torch_dtype=torch.bfloat16, use_safetensors=True, variant="fp16", safety_checker=None))
pipe.unet.to(memory_format=torch.channels_last)
pipe.scheduler = accelerator.prepare(EulerDiscreteScheduler.from_config(pipe.scheduler.config))
pipe = accelerator.prepare(pipe.to("cpu"))

def plex(fpath, text, neg_prompt, one, two, three, four, five):
    gc.collect()
    prompt = text
    video = './video.mp4'
    orvid = './orvid.mp4'
    canvid = './canvid.mp4'
    frames = []
    canframes = []
    orframes = []
    fin_frames = []
    max_frames=0
    cap = cv2.VideoCapture(fpath)
    clip = VideoFileClip(fpath)
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    fps = cap.get(cv2.CAP_PROP_FPS)
    aspect = width / height
    if aspect == 1 and height >= 512:
        nwidth = 512
        nheight = 512
        prep = clip.resize(height=nheight)
        left = 0
        top = 0
        right = 512
        bottom = 512
    if aspect > 1 and height >= 512:
        nheight = 512
        nwidth = int(nheight * aspect)
        prep = clip.resize(height=nheight)
        left = (nwidth - width) / 2
        top = 0
        right = (nwidth + width) / 2
        bottom = nheight
    if aspect < 1 and width >= 512:
        nwidth = 512
        nheight = int(nwidth / aspect)
        prep = clip.resize(height=nheight)
        left = 0
        top = (height - nheight) / 2
        right = nwidth
        bottom = (height + nheight) / 2
    if aspect < 1 and width < 512:
        return None
    if aspect > 1 and height < 512:
        return None
    closer = crop(clip, x1=left, y1=top, x2=right, y2=bottom)
    if fps > 10:
        closer.write_videofile('./video.mp4', fps=10)
        fps = 10
    else:
        closer.write_videofile('./video.mp4', fps=fps)
        fps = fps
    max_frames = int(fps * 2)
    for frame in tqdm(sv.get_video_frames_generator(source_path=video,)):
        frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
    cap.release()
    cv2.destroyAllWindows()
    ncap = cv2.VideoCapture(video)
    total_frames = int(ncap.get(cv2.CAP_PROP_FRAME_COUNT))
    if total_frames <= 0:
        return None
    b = 0
    if total_frames > max_frames:
        max_frames = int(max_frames)
    if total_frames < max_frames:
        max_frames = int(total_frames)
    for b in range(int(max_frames)):
        frame = frames[b]
        original = load_image(Image.fromarray(frame))
        original.save('./image.png', 'PNG')
        original = original.resize((512, 512))
        original = original.convert("RGB")
        original.save('./image.png', 'PNG')
        orframes.append(original)
        cannyimage = np.array(original)
        cannyimage = cv2.Canny(cannyimage, 100, 200)
        cannyimage = cannyimage[:, :, None]
        cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2)
        cannyimage = Image.fromarray(cannyimage)
        canframes.append(cannyimage)
        generator = torch.Generator(device="cpu").manual_seed(five)
        imoge = pipe(prompt=prompt,image=[original],control_image=[cannyimage],guidance_scale=four,num_inference_steps=one,generator=generator,strength=two,negative_prompt=neg_prompt,controlnet_conditioning_scale=three,width=512,height=512)
        fin_frames.append(imoge.images[0])
        b += 1
    ncap.release()
    cv2.destroyAllWindows()
    export_to_video(fin_frames, video, fps=fps)
    export_to_video(orframes, orvid, fps=fps)
    export_to_video(canframes, canvid, fps=fps)
    return video, canvid, orvid

iface = gr.Interface(fn=plex, inputs=[gr.File(label="Your video",interactive=True),gr.Textbox(label="prompt"),gr.Textbox(label="neg prompt"), gr.Slider(label="num inference steps", minimum=2, step=1, maximum=5, value=2), gr.Slider(label="prompt strength", minimum=0.5, step=0.5, maximum=1.0, value=0.5), gr.Slider(label="controlnet scale", minimum=0.01, step=0.01, maximum=0.99, value=0.80), gr.Slider(label="Guidance scale", minimum=0.0, step=0.1, maximum=9.9, value=0.0), gr.Slider(label="Manual seed", minimum=0, step=32, maximum=4836928, value=0)], outputs=[gr.Video(label="final"), gr.Video(label="canny vid"), gr.Video(label="orig")],description="Running on cpu, very slow! by JoPmt.")
iface.queue(max_size=1,api_open=False)
iface.launch(max_threads=1)