import imageio
import torch
from tqdm import tqdm
from animate import normalize_kp
from demo import load_checkpoints
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage import img_as_ubyte
from skimage.transform import resize
import cv2
import os
import argparse
import subprocess
import shutil
import sys



# def video2mp3(file_name):
#     outfile_name = file_name.split('.')[0] + '.mp3'
#     cmd = 'ffmpeg -i ' + file_name + ' -f mp3 ' + outfile_name
#     subprocess.call(cmd, shell=True)


def video_add_mp3(file_path, mp3_file):
    outfile_name = file_path.split('.')[0] + '_2.mp4'
    subprocess.call('ffmpeg -i ' + file_path
                    + ' -i ' + mp3_file + ' -strict -2 -f mp4 '
                    + outfile_name, shell=True)

def iflw_256(video_path):
    if width != 256 and height != 256:
        cap = cv2.VideoCapture(video_path)
        videowriter = cv2.VideoWriter("256_256.avi", cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                      cap.get(cv2.CAP_PROP_FPS), (256,256))
        while True:
            success, img1 = cap.read()
            try:
                 img = cv2.resize(img1, (256, 256), interpolation=cv2.INTER_LINEAR)
                 videowriter.write(img)
            except:
                 break
        video_add_mp3(file_path='256_256.avi',mp3_file=video_path.split('.')[0] + '.mp3')
        video_path = '256_256_2.mp4'
        mp4_del = True
    else:
        video_path = video_path
        mp4_del = None
    return video_path,mp4_del

#构造输入信息结构
ap = argparse.ArgumentParser()
#静态图片地址
ap.add_argument("-i", "--input_image", required=True,help="Path to image to animate")
#权重、模型地址
ap.add_argument("-c", "--checkpoint", required=True,help="Path to checkpoint")
#被捕捉的视频地址
ap.add_argument("-v","--input_video", required=False, help="Path to video input")

args = vars(ap.parse_args())
#打印正在加载权重函数
print("[INFO] loading source image and checkpoint...")
#把图片地址赋值给source_path
source_path = args['input_image']
checkpoint_path = args['checkpoint']
#检查输入信息中是否有被捕捉的视频地址
if args['input_video']:
    video_path = args['input_video']
else:
    video_path = None
#从视频中把音频信息分离出来
# video2mp3(file_name=video_path)
#获取MP3的地址 unreval.mp4
if video_path != None: 
    mp3_path = video_path.split('.')[0]+'.mp3'
#获取视频的宽和高
width = cv2.VideoCapture(video_path).get(cv2.CAP_PROP_FRAME_WIDTH)
height = cv2.VideoCapture(video_path).get(cv2.CAP_PROP_FRAME_HEIGHT)

#判断视频是否需要处理成256*256的,返回一些信息和处理好的视频地址
if video_path != None: 
    video_path,mp4_del = iflw_256(video_path)

#读取图片
source_image = imageio.imread(source_path)
#改变图片分辨率 读取出的图片shape是（256,256） 取前三列 即source image的shape是（256,3）
source_image = resize(source_image,(256,256))[..., :3]
#读取权重 返回的是图片生成器（一个函数） 和 关键点探测器
generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path=checkpoint_path)
#检查输出文件夹是否存在 不存在则新建一个

if not os.path.exists('output'):
    os.mkdir('output')

#初始化一些信息
relative=True
adapt_movement_scale=True
cpu=False
#如果视频地址存在，则读取视频
if video_path:
    cap = cv2.VideoCapture(video_path)
    print("[INFO] Loading video from the given path")
#如果不存在，则表示 调用电脑自带的摄像头 实时捕捉画面
else:
    cap = cv2.VideoCapture(0)
    print("[INFO] Initializing front camera...")
#获取视频FPS
fps = cap.get(cv2.CAP_PROP_FPS)
#获取视频的宽高
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

print('这个视频的长宽是：==================================>', size)

#定义视频编码方式
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
#定义视频写入格式和方式
out1 = cv2.VideoWriter('output/test.mp4', fourcc, fps, size, True)
#使用cv2读取静态图片 注意cvtColor和imread不同  因为CV2读取的是图片的BGR格式  使用cvtcolor可以转换图片格式，
#这句的意思就是把BGR转换为RGB
cv2_source = cv2.cvtColor(source_image.astype('float32'), cv2.COLOR_BGR2RGB)

#不需要计算图 梯度反向传播等操作
with torch.no_grad():
    predictions = []
    #将图片转换为tensor格式  并且将shape由BGR转换为RGB BGR的shape为（0，1，2,3） RGB为（0,3,1,2）
    source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
    if not cpu:
        source = source.cuda()
    #获取输入图片的关键点
    kp_source = kp_detector(source)
    count = 0
    while (True):
        # 读取视频信息，ret是布尔值，如果正常读取返回True，否则False，frame为每一帧的图像信息
        ret, frame = cap.read()
        # 将frame水平翻转 flip是图片翻转函数 1为水平翻转 0为垂直翻转
        frame = cv2.flip(frame, 1)
        if ret == True:
            if not video_path:
                x = 143
                y = 87
                w = 322
                h = 322
                frame = frame[y:y + h, x:x + w]
            #将读取到的图片调整分辨率
            frame1 = resize(frame, (256, 256))[..., :3]

            if count == 0:
                source_image1 = frame1
                #将图片转换为tensor格式  并且将shape由BGR转换为RGB GBR的shape为（0，1，2,3） RGB为（0,3,1,2）
                source1 = torch.tensor(source_image1[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
                #这一句是检测视频的每一帧图像的关键点  注意上一句检测关键点 检测的是静态图的关键点 这一句检测的是被模仿的视频每一帧图像的关键点
                kp_driving_initial = kp_detector(source1)

            frame_test = torch.tensor(frame1[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)

            driving_frame = frame_test
            if not cpu:
                driving_frame = driving_frame.cuda()
            kp_driving = kp_detector(driving_frame)
            # 这一句是检测视频的每一帧图像的关键点  注意上一句检测关键点 检测的是静态图的关键点 这一句检测的是被模仿的视频每一帧图像的关键点
            kp_norm = normalize_kp(kp_source=kp_source,
                                   kp_driving=kp_driving,
                                   kp_driving_initial=kp_driving_initial,
                                   use_relative_movement=relative,
                                   use_relative_jacobian=relative,
                                   adapt_movement_scale=adapt_movement_scale)
            out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
            predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
            im = np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0]
            im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)

            joinedFrame = np.concatenate((cv2_source,im,frame1),axis=1)

            cv2.imshow('Test',joinedFrame)
            out1.write(img_as_ubyte(im))
            out1.write(joinedFrame)
            #开始预测下一贞
            count += 1
            if cv2.waitKey(20) & 0xFF == ord('q'):
                break
        else:
            break
#释放资源
    cap.release()
    out1.release()
    cv2.destroyAllWindows()

# mp3_path = args['input_video'].split('.')[0]+'.mp3'

mp3_path = 'doudou2.mp3'

video_add_mp3(file_path='output/test.mp4', mp3_file=mp3_path)
#删除视频处理过程中生成的中间视频
# os.remove(args['input_video'].split('.')[0]+'.mp3')

