import imageio
import torch
# from tqdm import tqdm
from AI.animate import normalize_kp
from AI.demo import load_checkpoints
import numpy as np
from skimage import img_as_ubyte
from skimage.transform import resize
import cv2
import os


def run(source_path="AI/Inputs/feynman.jpeg", checkpoint_path="AI/extract/vox-cpk.pth.tar",
        video_path="AI/driver_video/myself.mp4", output_path="AI/output/my_resault.mp4"):
    source_image = imageio.imread(source_path)
    source_image = resize(source_image, (256, 256))[..., :3]

    # 加载训练好的模型，获取这俩
    generator, kp_detector = load_checkpoints(config_path='AI/config/vox-256.yaml', checkpoint_path=checkpoint_path)

    if not os.path.exists('output'):
        os.mkdir('output')

    relative = True
    adapt_movement_scale = True
    cpu = False

    if video_path:
        cap = cv2.VideoCapture(video_path)
        print("[INFO] Loading video from the given path")
    else:
        cap = cv2.VideoCapture(0)
        print("[INFO] Initializing front camera...")

    fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
    # 设置视频帧率
    out1 = cv2.VideoWriter(output_path, fourcc, 25, (256, 256), True)

    cv2_source = cv2.cvtColor(source_image.astype('float32'), cv2.COLOR_BGR2RGB)

    # torch.no_grad() 是一个上下文管理器，被该语句 wrap 起来的部分将不会track 梯度
    # 在生成图像动画时，我们只需要使用模型的前向传递，而不需要计算梯度。因此，使用torch.no_grad()可以减少内存消耗并提高代码的执行效率。
    # 此外，使用torch.no_grad()还可以避免在生成图像动画时意外地修改模型的参数。
    with torch.no_grad():
        # 生成的结果
        predictions = []
        source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
        if not cpu:
            source = source.cuda()
        kp_source = kp_detector(source)
        count = 0
        while True:
            ret, frame = cap.read()

            # 用于翻转图像的函数。
            # 0：表示垂直翻转；
            # 1：表示水平翻转；
            # -1：表示同时进行水平和垂直翻转。
            frame = cv2.flip(frame, 1)
            if ret == True:

                if not video_path:
                    x = 143
                    y = 87
                    w = 322
                    h = 322
                    frame = frame[y:y + h, x:x + w]
                frame1 = resize(frame, (256, 256))[..., :3]

                if count == 0:
                    source_image1 = frame1
                    source1 = torch.tensor(source_image1[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
                    kp_driving_initial = kp_detector(source1)

                frame_test = torch.tensor(frame1[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)

                driving_frame = frame_test
                if not cpu:
                    driving_frame = driving_frame.cuda()
                kp_driving = kp_detector(driving_frame)
                kp_norm = normalize_kp(kp_source=kp_source,
                                       kp_driving=kp_driving,
                                       kp_driving_initial=kp_driving_initial,
                                       use_relative_movement=relative,
                                       use_relative_jacobian=relative,
                                       adapt_movement_scale=adapt_movement_scale)
                out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
                predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
                im = np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0]
                im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
                joinedFrame = np.concatenate((im, frame1), axis=1)
                print(im.shape, end='\r')
                # cv2.imshow('Test', im)
                out1.write(img_as_ubyte(im))
                count += 1
                print(count)
                if cv2.waitKey(20) & 0xFF == ord('q'):
                    break
            else:
                break

        cap.release()
        out1.release()
        cv2.destroyAllWindows()
