from image_animation1 import main
from image_animation1 import puts
import imageio
import torch
from tqdm import tqdm
from animate import normalize_kp
from demo import load_checkpoints
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage import img_as_ubyte
from skimage.transform import resize
import cv2
import os
import argparse
import subprocess
import shutil
import sys
from tkinter import *
import cv2
from PIL import Image, ImageTk#图像控件
import threading#多线程
from tkinter import filedialog
from face import puts_
from funtion import vector_centerpoint
from funtion import similarity
from funtion import video_brodcast
from openpose1 import Motionrecognition
# To use Inference Engine backend, specify location of plugins:
# export LD_LIBRARY_PATH=/opt/intel/deeplearning_deploymenttoolkit/deployment_tools/external/mklml_lnx/lib:$LD_LIBRARY_PATH
import math
from pykeyboard import *

#窗口
window = Tk()
window.title("这是一个窗口")
window.geometry("1200x550")
# window.overrideredirect(1)#删除标题栏
window.resizable(0,0) #禁止拉伸

#定义画布
canvas=Canvas(window,width=600,height=600)
canvas.place(x=0,y=0)

canvas1=Canvas(window,width=600,height=600)
canvas1.place(x=600,y=0)

# 构造输入信息结构
ap = argparse.ArgumentParser()
# 静态图片地址
ap.add_argument("-i", "--input_image", required=True, help="Path to image to animate")
ap.add_argument("-j", "--input_image1", required=True, help="Path to image to animate")
ap.add_argument("-m", "--input_image2", required=True, help="Path to image to animate")
# 权重、模型地址
ap.add_argument("-c", "--checkpoint", required=True, help="Path to checkpoint")
# 被捕捉的视频地址
ap.add_argument("-v", "--input_video", required=False, help="Path to video input")

# return [[source_path, checkpoint_path, video_path],
#         [width, height],
#         [source_image],
#         [generator, kp_detector],
#         [cap, fps, size, fourcc, out1, cv2_source],
#         [source, predictions, kp_source]
#         ]

ky = PyKeyboard()   # 键盘的实例k

#切换
def l():
    ky.tap_key("l")
#换头像
def k():
    ky.tap_key("k")
#校准
def r():
    ky.tap_key("r")
#退出
def q():
    ky.tap_key("q")

#导出图像
# flag=0
# flag2=0
# V_list,cv2_source1 = main(ap,flag)
def video_demo1():
    def cc():
        flag=0
        flag2=0
        V_list,cv2_source1 = main(ap,flag)
        count=0
        tmp=[]
        while True:
            x=cv2.waitKey(1)
            if x & 0xFF == ord('l'):
                flag2+=1
            if flag2%2==0:
                x=Motionrecognition()
                flag2+=1
                V_list,cv2_source1 = main(ap,flag)
                count=0
                tmp=[]
            if flag2%2==1:
                
                L=puts(V_list, count,tmp,cv2_source1)
                count = count + 1
                cv2_source, im, frame1,out1,tmp=L
                # print(frame1)

                # joinedFrame = np.concatenate((cv2_source, im, frame1), axis=1)
                # cv2.imshow('Test', joinedFrame) 
                # out1.write(img_as_ubyte(im))
                # out1.write(joinedFrame)

                frame1 = cv2.cvtColor(frame1.astype('float32'), cv2.COLOR_BGR2RGB)#转换颜色从BGR到RGB
                im=cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
                current_img = Image.fromarray(np.uint8(frame1*255)).resize((600,500))
                current_img1 = Image.fromarray(np.uint8(im*255)).resize((600,500))#图像从array转换成Image对象，并修改尺寸
                imagetk = ImageTk.PhotoImage(current_img)
                imagetk1 = ImageTk.PhotoImage(current_img1)#将图像转换成与tkinter兼容的照片
                canvas.create_image(0,0,anchor='nw',image=imagetk)
                canvas1.create_image(0,0,anchor='nw',image=imagetk1)#将图像更新到画布上
                canvas.image = imagetk
                canvas1.image = imagetk1#防闪烁
                # frame1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2BGR)#转换颜色从BGR到RGB
                if x & 0xFF == ord('k'):
                    flag+=1
                    count=0
                    V_list,cv2_source1 = main(ap,flag)
                if x & 0xFF == ord('r'):
                    count=0
                    V_list,cv2_source1 = main(ap,flag)
            if x & 0xFF == ord('q'):
                    break
    threading.Thread(target=cc).start()

video_demo1()

#按钮
bt_l= Button(window,text='切换',height=2,width=40,command=l)
bt_l.place(x=0,y=500)

bt_k= Button(window,text='换头',height=2,width=40,command=k)
bt_k.place(x=300,y=500)

bt_r= Button(window,text='校准',height=2,width=40,command=r)
bt_r.place(x=600,y=500)

bt_q = Button(window,text='退出',height=2,width=40,command=q)
bt_q.place(x=900,y=500)

window.mainloop()