from os import listdir, path
import numpy as np
import scipy, cv2, os, sys, argparse, audio
import json, subprocess, random, string
from tqdm import tqdm
import time
from glob import glob
import torch
from models import Wav2Lip
import platform

import face_detection
import gradio as gr

from mediapipe.tasks import python
from mediapipe.tasks.python import vision
import mediapipe as mp

parser = argparse.ArgumentParser(description='Inference code to lip-sync videos in the wild using Wav2Lip models')

parser.add_argument('--checkpoint_path', type=str,
                    help='Name of saved checkpoint to load weights from', default='checkpoints/best_288x288.pth')

parser.add_argument('--face', type=str,
                    help='Filepath of video/image that contains faces to use')
parser.add_argument('--audio', type=str,
                    help='Filepath of video/audio file to use as raw audio source')
parser.add_argument('--outfile', type=str, help='Video path to save result. See default for an e.g.')

parser.add_argument('--static', type=bool,
                    help='If True, then use only first video frame for inference', default=False)
parser.add_argument('--fps', type=float, help='Can be specified only if input is a static image (default: 25)',
                    default=25., required=False)

parser.add_argument('--pads', nargs='+', type=int, default=[0, 10, 0, 0],
                    help='Padding (top, bottom, left, right). Please adjust to include chin at least')

parser.add_argument('--face_det_batch_size', type=int,
                    help='Batch size for face detection', default=16)
parser.add_argument('--wav2lip_batch_size', type=int, help='Batch size for Wav2Lip model(s)', default=128)

parser.add_argument('--resize_factor', default=1, type=int,
                    help='Reduce the resolution by this factor. Sometimes, best results are obtained at 480p or 720p')

parser.add_argument('--crop', nargs='+', type=int, default=[0, -1, 0, -1],
                    help='Crop video to a smaller region (top, bottom, left, right). Applied after resize_factor and rotate arg. '
                         'Useful if multiple face present. -1 implies the value will be auto-inferred based on height, width')

parser.add_argument('--box', nargs='+', type=int, default=[-1, -1, -1, -1],
                    help='Specify a constant bounding box for the face. Use only as a last resort if the face is not detected.'
                         'Also, might work only if the face is not moving around much. Syntax: (top, bottom, left, right).')

parser.add_argument('--rotate', default=False, action='store_true',
                    help='Sometimes videos taken from a phone can be flipped 90deg. If true, will flip video right by 90deg.'
                         'Use if you get a flipped result, despite feeding a normal looking video')

parser.add_argument('--nosmooth', default=False, action='store_true',
                    help='Prevent smoothing face detections over a short temporal window')

parser.add_argument('--face_landmarks_detector_path', default='weights/face_landmarker_v2_with_blendshapes.task',
                    type=str, help='Path to face landmarks detector')
parser.add_argument('--with_face_mask', action='store_true',
                    help='Blend output into original frame using a face mask rather than directly blending the face box. This prevents a lower resolution square artifact around lower face')

args = parser.parse_args()
args.img_size = 144


# if os.path.isfile(args.face) and args.face.split('.')[1] in ['jpg', 'png', 'jpeg']:
# 	args.static = True

def face_mask_from_image(image, face_landmarks_detector):
    """
	Calculate face mask from image. This is done by

	Args:
		image: numpy array of an image
		face_landmarks_detector: mediapipa face landmarks detector
	Returns:
		A uint8 numpy array with the same height and width of the input image, containing a binary mask of the face in the image
	"""
    # initialize mask
    mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)

    # detect face landmarks
    mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=image)
    detection = face_landmarks_detector.detect(mp_image)

    if len(detection.face_landmarks) == 0:
        # no face detected - set mask to all of the image
        mask[:] = 1
        return mask

    # extract landmarks coordinates
    face_coords = np.array([[lm.x * image.shape[1], lm.y * image.shape[0]] for lm in detection.face_landmarks[0]])

    # calculate convex hull from face coordinates
    convex_hull = cv2.convexHull(face_coords.astype(np.float32))

    # apply convex hull to mask
    return cv2.fillPoly(mask, pts=[convex_hull.squeeze().astype(np.int32)], color=1)


def get_smoothened_boxes(boxes, T):
    for i in range(len(boxes)):
        if i + T > len(boxes):
            window = boxes[len(boxes) - T:]
        else:
            window = boxes[i: i + T]
        boxes[i] = np.mean(window, axis=0)
    return boxes


def face_detect(images):
    batch_size = args.face_det_batch_size

    detector = face_detection.build_detector("RetinaNetMobileNetV1")
    predictions = []
    for i in tqdm(range(0, len(images))):
        prediction = detector.detect(images[i])
        try:
            x1, y1, x2, y2, score = prediction[0]
        except IndexError:
            print('error 1')
            x1, x2, y1, y2 = 0, 0, 0, 0
        except ValueError:
            print('error 2')
            print(prediction[0])
            x1, y1, x2, y2 = prediction[0]
        box = [x1, y1, x2, y2]
        predictions.append(box)

    results = []
    pady1, pady2, padx1, padx2 = args.pads
    for rect, image in zip(predictions, images):
        if rect is None:
            cv2.imwrite('temp/faulty_frame.jpg', image)  # check this frame where the face was not detected.
            raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')

        y1 = int(max(0, rect[1] - pady1))
        y2 = int(min(image.shape[0], rect[3] + pady2))
        x1 = int(max(0, rect[0] - padx1))
        x2 = int(min(image.shape[1], rect[2] + padx2))

        results.append([x1, y1, x2, y2])

    boxes = np.array(results)
    if not args.nosmooth: boxes = get_smoothened_boxes(boxes, T=5)
    results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]

    # del detector
    return results

# def face_detect(images):
#     detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,
#                                             flip_input=False, device=device)
#
#     batch_size = args.face_det_batch_size
#
#     while 1:
#         predictions = []
#         try:
#             for i in tqdm(range(0, len(images), batch_size)):
#                 predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))
#         except RuntimeError:
#             if batch_size == 1:
#                 raise RuntimeError(
#                     'Image too big to run face detection on GPU. Please use the --resize_factor argument')
#             batch_size //= 2
#             print('Recovering from OOM error; New batch size: {}'.format(batch_size))
#             continue
#         break
#
#     results = []
#     pady1, pady2, padx1, padx2 = args.pads
#     for rect, image in zip(predictions, images):
#         if rect is None:
#             cv2.imwrite('temp/faulty_frame.jpg', image)  # check this frame where the face was not detected.
#             raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')
#
#         y1 = max(0, rect[1] - pady1)
#         y2 = min(image.shape[0], rect[3] + pady2)
#         x1 = max(0, rect[0] - padx1)
#         x2 = min(image.shape[1], rect[2] + padx2)
#
#         results.append([x1, y1, x2, y2])
#
#     boxes = np.array(results)
#     if not args.nosmooth: boxes = get_smoothened_boxes(boxes, T=5)
#     results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
#
#     del detector
#     return results


def datagen(frames, mels):
    img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []

    fd_time = time.time()

    if args.box[0] == -1:
        if not args.static:
            face_det_results = face_detect(frames)  # BGR2RGB for CNN face detection
        else:
            face_det_results = face_detect([frames[0]])
    else:
        print('Using the specified bounding box instead of face detection...')
        y1, y2, x1, x2 = args.box
        face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]

    # print('Time for face detection {} '.format(time.time() - fd_time))

    for i, m in enumerate(mels):
        if i % (args.wav2lip_batch_size) == 0:
            int_time = time.time()
        idx = 0 if args.static else i % len(frames)
        frame_to_save = frames[idx].copy()
        face, coords = face_det_results[idx].copy()

        face = cv2.resize(face, (args.img_size, args.img_size))

        img_batch.append(face)
        mel_batch.append(m)
        frame_batch.append(frame_to_save)
        coords_batch.append(coords)

        # if i%(args.wav2lip_batch_size) == 0:
        # 	print('{} Initializing time {} '.format(i, time.time() - int_time))
        batch_time = time.time()

        if len(img_batch) >= args.wav2lip_batch_size:
            img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)

            img_masked = img_batch.copy()
            img_masked[:, args.img_size // 2:] = 0

            img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
            mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
            # print('Returning batch time is {} '.format(time.time() - batch_time))
            yield img_batch, mel_batch, frame_batch, coords_batch
            img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []

    if len(img_batch) > 0:
        img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)

        img_masked = img_batch.copy()
        img_masked[:, args.img_size // 2:] = 0

        img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
        mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
        # print('Returning batch time is {} '.format(time.time() - batch_time))
        yield img_batch, mel_batch, frame_batch, coords_batch


mel_step_size = 16
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} for inference.'.format(device))


def _load(checkpoint_path):
    if device == 'cuda':
        checkpoint = torch.load(checkpoint_path)
    else:
        checkpoint = torch.load(checkpoint_path,
                                map_location=lambda storage, loc: storage)
    return checkpoint


def load_model(path):
    model = Wav2Lip()
    print("Load checkpoint from: {}".format(path))
    checkpoint = _load(path)
    s = checkpoint["state_dict"]
    new_s = {}
    for k, v in s.items():
        new_s[k.replace('module.', '')] = v
    model.load_state_dict(s)

    model = model.to(device)
    return model.eval()


def main(audio_input, video_input):
    if not os.path.isfile(video_input):
        raise ValueError('--face argument must be a valid path to video/image file')

    elif video_input.split('.')[1] in ['jpg', 'png', 'jpeg']:
        full_frames = [cv2.imread(video_input)]
        fps = args.fps

    else:
        video_stream = cv2.VideoCapture(video_input)
        fps = video_stream.get(cv2.CAP_PROP_FPS)

        print('Reading video frames...')

        full_frames = []
        while 1:
            still_reading, frame = video_stream.read()
            if not still_reading:
                video_stream.release()
                break
            if args.resize_factor > 1:
                frame = cv2.resize(frame, (frame.shape[1] // args.resize_factor, frame.shape[0] // args.resize_factor))

            if args.rotate:
                frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)

            y1, y2, x1, x2 = args.crop
            if x2 == -1: x2 = frame.shape[1]
            if y2 == -1: y2 = frame.shape[0]

            frame = frame[y1:y2, x1:x2]

            full_frames.append(frame)

    print("Number of frames available for inference: " + str(len(full_frames)))

    if not audio_input.endswith('.wav'):
        print('Extracting raw audio...')
        command = 'ffmpeg -y -i {} -strict -2 {}'.format(audio_input, 'temp/temp.wav')

        subprocess.call(command, shell=True)
        audio_input = 'temp/temp.wav'

    wav = audio.load_wav(audio_input, 16000)
    mel = audio.melspectrogram(wav)
    # print(mel.shape)

    if np.isnan(mel.reshape(-1)).sum() > 0:
        raise ValueError('Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again')

    mel_chunks = []
    mel_idx_multiplier = 80. / fps
    i = 0
    while 1:
        start_idx = int(i * mel_idx_multiplier)
        if start_idx + mel_step_size > len(mel[0]):
            mel_chunks.append(mel[:, len(mel[0]) - mel_step_size:])
            break
        mel_chunks.append(mel[:, start_idx: start_idx + mel_step_size])
        i += 1

    print("Length of mel chunks: {}".format(len(mel_chunks)))

    full_frames = full_frames[:len(mel_chunks)]

    batch_size = args.wav2lip_batch_size
    gen = datagen(full_frames.copy(), mel_chunks)

    base_options = python.BaseOptions(model_asset_path=args.face_landmarks_detector_path)
    options = vision.FaceLandmarkerOptions(base_options=base_options,
                                           output_face_blendshapes=True,
                                           output_facial_transformation_matrixes=True,
                                           num_faces=1)
    face_landmarks_detector = vision.FaceLandmarker.create_from_options(options)

    pred_time = time.time()

    for i, (img_batch, mel_batch, frames, coords) in enumerate(tqdm(gen,
                                                                    total=int(
                                                                        np.ceil(float(len(mel_chunks)) / batch_size)))):
        if i == 0:
            model = load_model(args.checkpoint_path)
            print("Model loaded")

            frame_h, frame_w = full_frames[0].shape[:-1]
            out = cv2.VideoWriter('temp/result.avi',
                                  cv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))

        pt1 = time.time()
        img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(device)
        mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(device)

        with torch.no_grad():
            pred = model(mel_batch, img_batch)

        # print('Before prediction time in loop {} '.format(time.time() - pt1))
        ##
        pt2 = time.time()
        pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.

        for p, f, c in zip(pred, frames, coords):
            y1, y2, x1, x2 = c
            p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1))

            # f[y1:y2, x1:x2] = p

            mask = face_mask_from_image(p, face_landmarks_detector)
            offset = 5
            f[y1:y2, x1:x2] = f[y1:y2, x1:x2] * (1 - mask[..., None]) + p * mask[..., None]

            out.write(f)
    # print('After writing the output file {} '.format(time.time() - pt2))

    out.release()

    # print('Time for prediction and generating output {}'.format(time.time() - pred_time))

    sync_time = time.time()
    out_path = 'temp/out.mp4'

    command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(audio_input, 'temp/result.avi', out_path)
    subprocess.call(command, shell=platform.system() != 'Windows')

    # print('Syncing time {} '.format(time.time() - sync_time))
    # print("Total time taken {}".format(time.time() - tt))
    # print("generated the first output!!")
    # print("cleaning the /tmp/ directory")
    # cln_tmp = 'rm -rf /tmp/*'
    # subprocess.call(cln_tmp, shell=True)
    return out_path


if __name__ == '__main__':
    print(1)
    with gr.Blocks() as demo:
        audio_input = gr.Audio(type="filepath", label="输入音频", show_label=True)
        video_input = gr.Video(label="输入视频", show_label=True)

        output_video = gr.Video()

        with gr.Row():
            btn = gr.Button("运行")
        btn.click(fn=main, inputs=[audio_input, video_input], outputs=[output_video])
    # demo = gr.Interface(
    # 				fn=main,
    # 				inputs=[audio_input, video_input],
    # 				outputs = [output_video]
    # 			)
    print(2)
    demo.launch(server_name='0.0.0.0', server_port=9000)
    print(3)
