# 根据自训练模型实现的相机应用
import cv2
import time
import os
from PIL import Image
import numpy as np
from photo_tool import have_gesture, predict_gesture
import shutil


tmp_path = "./photo/tmp/"  # 临时存储待处理照片文件夹
save_photo_path = "./photo/save_photo/"  # 存照片地址
face_cascade_path = "./secondary_file/haarcascade_frontalface_default.xml"
cat_ears_path = "./secondary_file/cat_ears.png"
rabbit_ears_path = "./secondary_file/rabbit_ears.png"
filter_path = './secondary_file/'  # filter_path+models为滤镜文件

choice = "Zero"
models = {'Two': 'la_muse.t7', 'Three': 'mosaic.t7', 'Four': 'starry_night.t7', 'Five': 'the_scream.t7'}
label_gestures = {0: 'Zero', 1: 'One', 2: 'Two', 3: 'Three',
                  4: 'Four', 5: 'Five', 6: 'ok'}


def start_camera():
    global choice

    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
    i = 0
    start_time = time.time()
    last_choice = "default"
    count_down = False

    while True:
        _, frame = cap.read()
        if choice == "default":
            process_frame = frame
            count_down = False
        elif choice == "ok" and not count_down:
            choice = last_choice
            shooting(cap, i)
            count_down = True
            continue
        else:
            process_frame = get_filter(frame)
            count_down = False

        # process_frame = cv2.flip(process_frame, 1)
        cv2.imshow("capture", process_frame)
        sec = int(time.time() - start_time)
        gestures = have_gesture(frame)
        if sec >= 2 and len(gestures) > 0:
            last_choice = choice
            cv2.imwrite(tmp_path + str(i) + ".jpg", frame)
            get_result(i, gestures)
            start_time = time.time()

        if cv2.waitKey(1) == 27:
            print("Quit!")
            break
        i += 1

    cap.release()
    cv2.destroyAllWindows()


def shooting(cap, i):
    start_time = time.time()
    secs = 0

    text = "start!"
    font_face = cv2.FONT_ITALIC | cv2.FONT_HERSHEY_COMPLEX
    font_scale = 3.5
    font_color = (81, 163, 81)
    thickness = 10
    text_size = cv2.getTextSize(text, font_face, font_scale, thickness)
    center = (int((cap.get(3) - text_size[0][0]) / 2), int((cap.get(4) + text_size[0][1]) / 2))

    while secs <= 0.8:
        ret, frame = cap.read()
        # frame = cv2.flip(frame, 1)
        frame = get_filter(frame)
        frame = np.ascontiguousarray(frame)
        cv2.putText(frame, text, center, font_face, font_scale, font_color, thickness, 4, 0)
        cv2.imshow("capture", frame)
        secs = time.time() - start_time
        cv2.waitKey(1)

    secs = 0
    start_time = time.time()

    text_size = cv2.getTextSize('0', font_face, font_scale, thickness)
    center = (int((cap.get(3) - text_size[0][0]) / 2), int((cap.get(4) + text_size[0][1]) / 2))

    while secs <= 3:
        secs = time.time() - start_time
        if 0 <= secs <= 3:  # 倒计时3秒每次显示1秒
            ret, frame = cap.read()
            # frame = cv2.flip(frame, 1)
            frame = get_filter(frame)
            frame = np.ascontiguousarray(frame)
            cv2.putText(frame, str(3 - int(secs)), center, font_face, font_scale, font_color, thickness, 4, 0)
            cv2.imshow("capture", frame)
        cv2.waitKey(1)

    ret, frame = cap.read()
    # frame = cv2.flip(frame, 1)
    frame = get_filter(frame)
    frame = np.ascontiguousarray(frame)
    noise_img_norm = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
    cv2.imwrite(save_photo_path + str(i) + '.jpg', noise_img_norm)  # 保存照片
    cv2.imshow("saved_photo", noise_img_norm)


# 根据choice选择滤镜
def get_filter(frame):
    if choice == "Zero":
        return frame
    elif choice == "One":
        return get_vintage(frame)
    elif choice == "cat" or choice == "rabbit":
        return get_ears(frame)
    else:
        return get_neural_style(frame)


def get_vintage(img):
    im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    im_color = cv2.applyColorMap(im_gray, cv2.COLORMAP_PINK)
    return im_color


def get_ears(img):
    face_cascade = cv2.CascadeClassifier(face_cascade_path)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        # 根据人脸识别数据添加头像
        img = rabbit_ears(img, x, y, w, h) if choice == "rabbit" else cat_ears(img, x, y, w, h)
    return img


def cat_ears(img, x, y, w, h):
    im = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    # 贴纸地址
    mark = Image.open(cat_ears_path)
    height = int(w * 987 / 1024)
    mark = mark.resize((w, height))
    layer = Image.new('RGBA', im.size, (0, 0, 0, 0))
    layer.paste(mark, (x, y-40))
    out = Image.composite(layer, im, layer)
    img = cv2.cvtColor(np.asarray(out), cv2.COLOR_RGB2BGR)
    return img


def rabbit_ears(img, x, y, w, h):
    im = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    # 贴纸地址
    mark = Image.open(rabbit_ears_path)
    height = int(w * 987 / 1024)
    mark = mark.resize((w, height))
    layer = Image.new('RGBA', im.size, (0, 0, 0, 0))
    layer.paste(mark, (x, y - height))
    out = Image.composite(layer, im, layer)
    img = cv2.cvtColor(np.asarray(out), cv2.COLOR_RGB2BGR)
    return img


# 艺术风格滤镜
def get_neural_style(img):
    choice_model = models[choice]
    net = cv2.dnn.readNetFromTorch(filter_path + choice_model)
    net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV);
    (h, w) = img.shape[:2]
    blob = cv2.dnn.blobFromImage(img, 1.0, (w, h), (103.939, 116.779, 123.680), swapRB=False, crop=False)
    net.setInput(blob)
    out = net.forward()
    out = out.reshape(3, out.shape[2], out.shape[3])
    out[0] += 103.939
    out[1] += 116.779
    out[2] += 123.68
    out /= 255
    out = out.transpose(1, 2, 0)
    return out


def get_result(i, gestures):
    global choice

    image = cv2.imread(tmp_path + str(i) + ".jpg")
    print("Get " + str(len(gestures)) + " gesture!")
    for i in range(len(gestures)-1, -1, -1):
        x, y, w, h = gestures[i]
        if y >= 25 and x >= 25:
            gestures[i] = label_gestures[int(predict_gesture(image, x, y, w, h))]
        else:
            gestures.remove(gestures[i])

    if len(gestures) == 1:
        choice = gestures[0]
    elif len(gestures) == 2 and 'Zero' in gestures:
        choice = 'cat'
    elif len(gestures) == 2 and 'Two' in gestures:
        choice = 'rabbit'


if __name__ == "__main__":
    if not os.path.exists(tmp_path):
        os.makedirs(tmp_path)
    if not os.path.exists(save_photo_path):
        os.makedirs(save_photo_path)
    start_camera()
    shutil.rmtree(tmp_path)
