chaitali commited on
Commit
fe30080
1 Parent(s): 176de74

Add application file

Browse files
Files changed (4) hide show
  1. Dockerfile +41 -0
  2. README.md +3 -3
  3. app.py +220 -0
  4. requirements.txt +5 -0
Dockerfile ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.8.10
2
+
3
+ WORKDIR /content
4
+ RUN apt-get update -y && apt-get upgrade -y && apt-get install -y sudo && apt-get install -y python3-pip && pip3 install --upgrade pip
5
+ RUN apt-get install -y gnupg wget htop sudo git git-lfs software-properties-common build-essential cmake curl
6
+ RUN apt-get install -y ffmpeg libavcodec-dev libavformat-dev libavdevice-dev libgl1 libgtk2.0-0 jq libdc1394-22-dev libraw1394-dev libopenblas-base
7
+
8
+
9
+ RUN pip3 install pandas scipy matplotlib torch torchvision torchaudio gradio altair imageio-ffmpeg pocketsphinx jq "numpy==1.23.1"
10
+
11
+
12
+ RUN sudo apt remove cmake
13
+
14
+ RUN sudo apt-get install build-essential libssl-dev
15
+ RUN wget https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0.tar.gz
16
+ RUN tar -zxvf cmake-3.20.0.tar.gz
17
+ RUN cd cmake-3.20.0
18
+ RUN ./bootstrap
19
+ RUN make
20
+ RUN sudo make install
21
+
22
+ RUN cmake --version
23
+
24
+
25
+
26
+ RUN git lfs install
27
+ RUN git clone https://huggingface.co/camenduru/pocketsphinx-20.04-t4 pocketsphinx && cd pocketsphinx && cmake -S . -B build && cmake --build build --target install
28
+
29
+ RUN git clone https://huggingface.co/camenduru/one-shot-talking-face-20.04-t4 one-shot-talking-face && cd one-shot-talking-face && pip install -r requirements.txt && chmod 755 OpenFace/FeatureExtraction
30
+ RUN mkdir /content/out
31
+
32
+ COPY app.py /content/app.py
33
+
34
+ RUN git clone https://github.com/TencentARC/GFPGAN.git && cd GFPGAN && pip install basicsr && pip install facexlib && pip install -r requirements.txt && python setup.py develop && pip install realesrgan
35
+
36
+ RUN git clone https://github.com/chi0tzp/PyVideoFramesExtractor && cd PyVideoFramesExtractor && pip install -r requirements.txt
37
+
38
+
39
+ EXPOSE 7860
40
+
41
+ CMD ["python3", "app.py"]
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: One Shot Talking Face From Text
3
- emoji: 📈
4
- colorFrom: pink
5
- colorTo: indigo
6
  sdk: docker
7
  pinned: false
8
  ---
 
1
  ---
2
  title: One Shot Talking Face From Text
3
+ emoji: 🐠
4
+ colorFrom: yellow
5
+ colorTo: pink
6
  sdk: docker
7
  pinned: false
8
  ---
app.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ # import os, subprocess, torchaudio
3
+ # import torch
4
+ from PIL import Image
5
+ from gtts import gTTS
6
+ import tempfile
7
+ from pydub import AudioSegment
8
+ from pydub.generators import Sine
9
+ # from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
10
+ # from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
11
+ import soundfile
12
+
13
+ import dlib
14
+ import cv2
15
+ import imageio
16
+ import os
17
+ import gradio as gr
18
+ import os, subprocess, torchaudio
19
+ from PIL import Image
20
+ import ffmpeg
21
+
22
+
23
+
24
+ block = gr.Blocks()
25
+
26
+ def merge_frames():
27
+ path = '/content/video_results/restored_imgs'
28
+ image_folder = os.fsencode(path)
29
+ print(image_folder)
30
+ filenames = []
31
+
32
+ for file in os.listdir(image_folder):
33
+ filename = os.fsdecode(file)
34
+ if filename.endswith( ('.jpg', '.png', '.gif') ):
35
+ filenames.append(filename)
36
+
37
+ filenames.sort() # this iteration technique has no built in order, so sort the frames
38
+ images = list(map(lambda filename: imageio.imread("/content/video_results/restored_imgs/"+filename), filenames))
39
+
40
+
41
+ imageio.mimsave('/content/video_output.mp4', images, fps=25.0) # modify the frame duration as needed
42
+
43
+
44
+ block = gr.Blocks()
45
+
46
+
47
+
48
+ def audio_video():
49
+
50
+ input_video = ffmpeg.input('/content/video_output.mp4')
51
+
52
+ input_audio = ffmpeg.input('/content/audio.wav')
53
+
54
+ ffmpeg.concat(input_video, input_audio, v=1, a=1).output('final_output.mp4').run()
55
+
56
+
57
+ def compute_aspect_preserved_bbox(bbox, increase_area, h, w):
58
+ left, top, right, bot = bbox
59
+ width = right - left
60
+ height = bot - top
61
+
62
+ width_increase = max(increase_area, ((1 + 2 * increase_area) * height - width) / (2 * width))
63
+ height_increase = max(increase_area, ((1 + 2 * increase_area) * width - height) / (2 * height))
64
+
65
+ left_t = int(left - width_increase * width)
66
+ top_t = int(top - height_increase * height)
67
+ right_t = int(right + width_increase * width)
68
+ bot_t = int(bot + height_increase * height)
69
+
70
+ left_oob = -min(0, left_t)
71
+ right_oob = right - min(right_t, w)
72
+ top_oob = -min(0, top_t)
73
+ bot_oob = bot - min(bot_t, h)
74
+
75
+ if max(left_oob, right_oob, top_oob, bot_oob) > 0:
76
+ max_w = max(left_oob, right_oob)
77
+ max_h = max(top_oob, bot_oob)
78
+ if max_w > max_h:
79
+ return left_t + max_w, top_t + max_w, right_t - max_w, bot_t - max_w
80
+ else:
81
+ return left_t + max_h, top_t + max_h, right_t - max_h, bot_t - max_h
82
+
83
+ else:
84
+ return (left_t, top_t, right_t, bot_t)
85
+
86
+ def crop_src_image(src_img, detector=None):
87
+ if detector is None:
88
+ detector = dlib.get_frontal_face_detector()
89
+ save_img='/content/image_pre.png'
90
+ img = cv2.imread(src_img)
91
+ faces = detector(img, 0)
92
+ h, width, _ = img.shape
93
+ if len(faces) > 0:
94
+ bbox = [faces[0].left(), faces[0].top(),faces[0].right(), faces[0].bottom()]
95
+ l = bbox[3]-bbox[1]
96
+ bbox[1]= bbox[1]-l*0.1
97
+ bbox[3]= bbox[3]-l*0.1
98
+ bbox[1] = max(0,bbox[1])
99
+ bbox[3] = min(h,bbox[3])
100
+ bbox = compute_aspect_preserved_bbox(tuple(bbox), 0.5, img.shape[0], img.shape[1])
101
+ img = img[bbox[1] :bbox[3] , bbox[0]:bbox[2]]
102
+ img = cv2.resize(img, (256, 256))
103
+ cv2.imwrite(save_img,img)
104
+ else:
105
+ img = cv2.resize(img,(256,256))
106
+ cv2.imwrite(save_img, img)
107
+
108
+
109
+
110
+ def pad_image(image):
111
+ w, h = image.size
112
+ if w == h:
113
+ return image
114
+ elif w > h:
115
+ new_image = Image.new(image.mode, (w, w), (0, 0, 0))
116
+ new_image.paste(image, (0, (w - h) // 2))
117
+ return new_image
118
+ else:
119
+ new_image = Image.new(image.mode, (h, h), (0, 0, 0))
120
+ new_image.paste(image, ((h - w) // 2, 0))
121
+ return new_image
122
+
123
+ def calculate(image_in, audio_in):
124
+ waveform, sample_rate = torchaudio.load(audio_in)
125
+ torchaudio.save("/content/audio.wav", waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
126
+ image = Image.open(image_in)
127
+ image = pad_image(image)
128
+ image.save("image.png")
129
+
130
+ pocketsphinx_run = subprocess.run(['pocketsphinx', '-phone_align', 'yes', 'single', '/content/audio.wav'], check=True, capture_output=True)
131
+ jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
132
+ with open("test.json", "w") as f:
133
+ f.write(jq_run.stdout.decode('utf-8').strip())
134
+
135
+ os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/results/restored_imgs/image_pre.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
136
+ return "/content/train/image_audio.mp4"
137
+
138
+
139
+ def one_shot_talking(image_in,audio_in):
140
+
141
+ #Pre-processing of image
142
+ crop_src_image(image_in)
143
+
144
+ #Improve quality of input image
145
+ !python /content/GFPGAN/inference_gfpgan.py --upscale 2 -i /content/image_pre.png -o /content/results --bg_upsampler realesrgan
146
+
147
+ image_in_one_shot='/content/results/restored_imgs/image_pre.png'
148
+
149
+ #One Shot Talking Face algorithm
150
+ calculate(image_in_one_shot,audio_in)
151
+
152
+ #Video Quality Improvement
153
+
154
+ #1. Extract the frames from the video file using PyVideoFramesExtractor
155
+ !python /content/PyVideoFramesExtractor/extract.py --video=/content/train/image_pre_audio.mp4
156
+
157
+ #2. Improve image quality using GFPGAN on each frames
158
+ !python /content/GFPGAN/inference_gfpgan.py --upscale 2 -i /content/extracted_frames/image_pre_audio_frames -o /content/video_results --bg_upsampler realesrgan
159
+
160
+ #3. Merge all the frames to a one video using imageio
161
+ merge_frames()
162
+
163
+ audio_video()
164
+ return "Sucessufull"
165
+
166
+
167
+ def one_shot(image,input_text,gender):
168
+ if gender == 'Female' or gender == 'female':
169
+ tts = gTTS(input_text)
170
+ with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
171
+ tts.write_to_fp(f)
172
+ f.seek(0)
173
+ sound = AudioSegment.from_file(f.name, format="mp3")
174
+ sound.export("/content/audio.wav", format="wav")
175
+ one_shot_talking(image,'audio.wav')
176
+
177
+ elif gender == 'Male' or gender == 'male':
178
+ print(gender)
179
+ models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
180
+ "Voicemod/fastspeech2-en-male1",
181
+ arg_overrides={"vocoder": "hifigan", "fp16": False}
182
+ )
183
+
184
+ model = models[0].cuda()
185
+ TTSHubInterface.update_cfg_with_data_cfg(cfg, task.data_cfg)
186
+ generator = task.build_generator([model], cfg)
187
+ # next(model.parameters()).device
188
+
189
+ sample = TTSHubInterface.get_model_input(task, input_text)
190
+ sample["net_input"]["src_tokens"] = sample["net_input"]["src_tokens"].cuda()
191
+ sample["net_input"]["src_lengths"] = sample["net_input"]["src_lengths"].cuda()
192
+ sample["speaker"] = sample["speaker"].cuda()
193
+
194
+ wav, rate = TTSHubInterface.get_prediction(task, model, generator, sample)
195
+ # soundfile.write("/content/audio_before.wav", wav, rate)
196
+ soundfile.write("/content/audio_before.wav", wav.cpu().clone().numpy(), rate)
197
+ cmd='ffmpeg -i /content/audio_before.wav -filter:a "atempo=0.7" -vn /content/audio.wav'
198
+ os.system(cmd)
199
+ one_shot_talking(image,'audio.wav')
200
+
201
+
202
+
203
+
204
+ input_value = "Hello How are you?"
205
+
206
+
207
+
208
+ image = gr.Image(show_label=True, type="filepath",label="Input Image")
209
+ input_text=gr.Textbox(lines=3, value=input_value, label="Input Text")
210
+ gender = gr.Radio(["Female","Male"],value="Female",label="Gender")
211
+ output = gr.Video(show_label=True,label="Output")
212
+
213
+ demo = gr.Interface(
214
+ one_shot,
215
+ [image,input_text,gender],
216
+ [output],
217
+ title="One Shot Talking Face from Text",
218
+ )
219
+ demo.launch(enable_queue = False)
220
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gtts
2
+ soundfile
3
+ fairseq
4
+ huggingface-hub
5
+ g2p_en