import os
import time
import random

import common
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import cv2
from model.DBFace import DBFace
import shutil

HAS_CUDA = torch.cuda.is_available()
print(f"HAS_CUDA = {HAS_CUDA}")


def nms(objs, iou=0.5):
    if objs is None or len(objs) <= 1:
        return objs

    objs = sorted(objs, key=lambda obj: obj.score, reverse=True)
    keep = []
    flags = [0] * len(objs)
    for index, obj in enumerate(objs):

        if flags[index] != 0:
            continue

        keep.append(obj)
        for j in range(index + 1, len(objs)):
            if flags[j] == 0 and obj.iou(objs[j]) > iou:
                flags[j] = 1
    return keep


def detect(model, image, threshold=0.4, nms_iou=0.5):
    mean = [0.408, 0.447, 0.47]
    std = [0.289, 0.274, 0.278]

    image = common.pad(image)
    image = ((image / 255.0 - mean) / std).astype(np.float32)
    image = image.transpose(2, 0, 1)

    torch_image = torch.from_numpy(image)[None]
    if HAS_CUDA:
        torch_image = torch_image.cuda()

    hm, box, landmark = model(torch_image)
    hm_pool = F.max_pool2d(hm, 3, 1, 1)
    scores, indices = ((hm == hm_pool).float() * hm).view(1, -1).cpu().topk(1000)
    hm_height, hm_width = hm.shape[2:]

    scores = scores.squeeze()
    indices = indices.squeeze()
    ys = list((indices / hm_width).int().data.numpy())
    xs = list((indices % hm_width).int().data.numpy())
    scores = list(scores.data.numpy())
    box = box.cpu().squeeze().data.numpy()
    landmark = landmark.cpu().squeeze().data.numpy()

    stride = 4
    objs = []
    for cx, cy, score in zip(xs, ys, scores):
        if score < threshold:
            break

        x, y, r, b = box[:, cy, cx]
        xyrb = (np.array([cx, cy, cx, cy]) + [-x, -y, r, b]) * stride
        x5y5 = landmark[:, cy, cx]
        x5y5 = (common.exp(x5y5 * 4) + ([cx] * 5 + [cy] * 5)) * stride
        box_landmark = list(zip(x5y5[:5], x5y5[5:]))
        objs.append(common.BBox(0, xyrb=xyrb, score=score, landmark=box_landmark))
    return nms(objs, iou=nms_iou)


def detect_image(model, file):
    image = common.imread(file)
    objs = detect(model, image)

    for obj in objs:
        common.drawbbox(image, obj)
        # common.cut_face(image, obj)

    common.imwrite("detect_result/" + common.file_name_no_suffix(file) + ".draw.jpg", image)


def image_demo():
    dbface = DBFace()
    dbface.eval()

    if HAS_CUDA:
        dbface.cuda()

    dbface.load("model/dbface.pth")
    # detect_image(dbface, "datas/1.png")
    detect_image(dbface, "datas/12_Group_Group_12_Group_Group_12_728.jpg")


def camera_demo():
    dbface = DBFace()
    dbface.eval()

    if HAS_CUDA:
        dbface.cuda()

    dbface.load("model/dbface.pth")
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
    ok, frame = cap.read()

    while ok:
        objs = detect(dbface, frame)

        for obj in objs:
            common.drawbbox(frame, obj)

        cv2.imshow("demo DBFace", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            break

        ok, frame = cap.read()

    cap.release()
    cv2.destroyAllWindows()


def video_detect(video_list, img_src, param):
    dbface = DBFace()
    dbface.eval()

    if HAS_CUDA:
        dbface.cuda()

    dbface.load("model/dbface.pth")
    frame_interval = param[0]
    # 保存图片个数
    count = 0

    time_start = time.time()
    for video in video_list:
        # if count > 1000:
        #     break
        one_video_start = time.time()
        print("处理视频:", video)
        interval_weight = 0
        cap = cv2.VideoCapture(video)

        frames_num = cap.get(cv2.CAP_PROP_FRAME_COUNT)
        print("视频总帧数:", frames_num)
        # half = int(frames_num / 2)
        # # 随机获取前半段的一帧
        # pre = random.randint(1000, half)

        frame_count = 0
        detect_count = 0
        # cap.set(cv2.CAP_PROP_POS_FRAMES, float(pre))
        ok, frame = cap.read()
        while ok:
            if frame_count == detect_count:
                interval_weight += 400
                detect_count = frame_interval + interval_weight + detect_count
                objs = detect(dbface, frame)
                for obj in objs:
                    if common.cut_face(frame, obj, count, img_src, param[1]):
                        print("已捕获人脸图", count, "张")
                        interval_weight *= 1.5
                        count += 1
            frame_count += 1
            ok, frame = cap.read()

        # next = random.randint(frame_count, frames_num)
        # frame_count = next
        # cap.set(cv2.CAP_PROP_POS_FRAMES, float(next))
        # ok, frame = cap.read()
        # while ok:
        #     temp = count
        #     if frame_count % (frame_interval+interval_weight) == 0:
        #         objs = detect(dbface, frame)
        #         for obj in objs:
        #             if common.cut_face(frame, obj, count, img_src, param[1]):
        #                 print("已捕获人脸图", count, "张")
        #                 count += 1
        #     else:
        #         interval_weight += 10
        #     if temp < count:
        #         break
        #     frame_count += 1
        #     ok, frame = cap.read()

        # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        # ok, frame = cap.read()
        #
        # while ok:
        #     if frame_count % frame_interval == 0:
        #         objs = detect(dbface, frame)
        #         for obj in objs:
        #             if common.cut_face(frame, obj, count, img_src, param[1]):
        #                 print("已捕获人脸图", count, "张")
        #                 count += 1
        #         key = cv2.waitKey(1) & 0xFF
        #         if key == ord('q'):
        #             break
        #     frame_count += 1
        #     ok, frame = cap.read()
        cap.release()
        cv2.destroyAllWindows()
        one_video_end = time.time()
        print("视频总耗时", one_video_end - one_video_start, "秒")
    time_end = time.time()
    with open(os.path.join("logs", str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + ".txt"), "a+") as f:
        f.write(img_src + '\n' +
                "总共捕获图片:" + str(count) + "张" + '\n' +
                "总耗时:" + str((time_end - time_start) / 3600) + "小时" + '\n' +
                "截图间隔:" + str(frame_interval) + "帧" + '\n' +
                "分数阈值:" + str(param[1]) + '\n\n')
    print("总共捕获图片", count, "张")
    print("总耗时", time_end - time_start, "秒")


def video_search(src, video_list):
    video_type = ['.avi', '.mp4', '.MP4', 'wmv']
    for path in os.listdir(src):
        if os.path.isfile(os.path.join(src, path)) and os.path.splitext(path)[1] in video_type and "半" in path:
            video_list.append(os.path.join(src, path))
        if os.path.isdir(os.path.join(src, path)):
            video_search(os.path.join(src, path), video_list)


def video_handle(src, param):
    video_list = []
    video_search(src, video_list)
    if os.path.exists(src + "student_face"):
        shutil.rmtree(src + "student_face")
    video_detect(video_list, src + "student_face", param)


def image_handle(src):
    dbface = DBFace()
    dbface.eval()
    if HAS_CUDA:
        dbface.cuda()
    dbface.load("model/dbface.pth")
    count = 1
    for path in os.listdir(src):
        face_path = os.path.join("/home/zbzbzzz/datasets/student/face_0.5", path)
        base_path = os.path.join(src, path)
        if os.path.exists(face_path):
            shutil.rmtree(face_path)
        os.mkdir(face_path)
        for img in os.listdir(base_path):
            image = common.imread(os.path.join(base_path, img))
            image = cv2.resize(image, (300, 300))
            # try:
            objs = detect(dbface, image)
            for obj in objs:
                if common.cut_face_square(image, obj, count, face_path, 0.5):
                    print("已捕获人脸图", count, "张")
                    count += 1
            # except Exception as e:
            #     print("异常发生")
            #     continue


if __name__ == "__main__":
    if not os.path.exists("logs"):
        os.makedirs("logs")
    src_map = {'datas': (300, 0.7)}
    for src in src_map:
        video_handle(src, src_map[src])
    # video_demo("./datas/1.mp4")
    # image_demo()
    # camera_demo()
    # image_handle("/home/zbzbzzz/datasets/student/picture")
