# -*- coding: UTF-8 -*-
import argparse
import time
from pathlib import Path
import sys
import os

import numpy as np
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
import copy
from PIL import Image

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

from models.experimental import attempt_load
from utils.datasets import letterbox, img_formats, vid_formats, LoadImages, LoadStreams
from utils.general import check_img_size, non_max_suppression_face, apply_classifier, scale_coords, xyxy2xywh, \
    strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized


def load_model(weights, device):
    model = attempt_load(weights, map_location=device)  # load FP32 model
    return model


def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
    # Rescale coords (xyxy) from img1_shape to img0_shape
    if ratio_pad is None:  # calculate from img0_shape
        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding
    else:
        gain = ratio_pad[0][0]
        pad = ratio_pad[1]

    coords[:, [0, 2, 4, 6, 8]] -= pad[0]  # x padding
    coords[:, [1, 3, 5, 7, 9]] -= pad[1]  # y padding
    coords[:, :10] /= gain
    # clip_coords(coords, img0_shape)
    coords[:, 0].clamp_(0, img0_shape[1])  # x1
    coords[:, 1].clamp_(0, img0_shape[0])  # y1
    coords[:, 2].clamp_(0, img0_shape[1])  # x2
    coords[:, 3].clamp_(0, img0_shape[0])  # y2
    coords[:, 4].clamp_(0, img0_shape[1])  # x3
    coords[:, 5].clamp_(0, img0_shape[0])  # y3
    coords[:, 6].clamp_(0, img0_shape[1])  # x4
    coords[:, 7].clamp_(0, img0_shape[0])  # y4
    coords[:, 8].clamp_(0, img0_shape[1])  # x5
    coords[:, 9].clamp_(0, img0_shape[0])  # y5
    return coords


def show_results(img, xyxy, conf, landmarks, class_num):
    h, w, c = img.shape
    tl = 1 or round(0.002 * (h + w) / 2) + 1  # line/font thickness
    x1 = int(xyxy[0])
    y1 = int(xyxy[1])
    x2 = int(xyxy[2])
    y2 = int(xyxy[3])
    img = img.copy()

    cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), thickness=tl, lineType=cv2.LINE_AA)

    clors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255)]

    for i in range(5):
        point_x = int(landmarks[2 * i])
        point_y = int(landmarks[2 * i + 1])
        cv2.circle(img, (point_x, point_y), tl + 1, clors[i], -1)

    tf = max(tl - 1, 1)  # font thickness
    label = str(conf)[:5]
    cv2.putText(img, label, (x1, y1 - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
    return img

def calculate_hash(image):
    """
    计算给定图片的哈希值
    """
    #  转为灰度图像
    image = cv2.resize(image, (8, 8), interpolation=cv2.INTER_CUBIC)
    #  缩放为更小的尺寸方便计算
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    s = 0
    hash_str = ''
    for i in range(8):
        for j in range(8):
            s = s + gray[i, j]
    avg = s / 64
    for i in range(8):
        for j in range(8):
            if gray[i, j] > avg:
                hash_str = hash_str + '1'
            else:
                hash_str = hash_str + '0'
    return hash_str


def hamming_distance(str1, str2):
    """
    计算两个哈希值之间的汉明距离
    """
    if len(str1) != len(str2):
        return
    count = 0
    for i in range(len(str1)):
        if str1[i] != str2[i]:
            count += 1
    return count

#  创建保存相似人脸的文件夹
def create_folder(path):
    if not os.path.exists(path):
        os.makedirs(path)


def detect(
        model,
        source,
        device,
        project,
        name,
        exist_ok,
        save_img,
        view_img
):
    # Load model
    img_size = 640
    conf_thres = 0.6
    iou_thres = 0.5
    imgsz = (640, 640)

    # Directories
    save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # 增加运行的保存目录
    Path(save_dir).mkdir(parents=True, exist_ok=True)  # 创建目录
    is_file = Path(source).suffix[1:] in (img_formats + vid_formats)  # 检查是否为文件
    is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))  # 检查是否为URL
    webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)  # 检查是否为网络摄像头
    # 数据加载器
    if webcam:
        print('loading streams:', source)  # 加载流数据
        dataset = LoadStreams(source, img_size=imgsz)  # 创建流数据集
        bs = 1  # 批量大小为1
    else:
        print('loading images', source)  # 加载图像数据
        dataset = LoadImages(source, img_size=imgsz)  # 创建图像数据集
        bs = 1  # 批量大小为1
    vid_path, vid_writer = [None] * bs, [None] * bs  # 初始化视频路径和视频写入器

    #  用于保存当前帧的人脸信息  {哈希值:  [人脸图像,  目标文件夹]}
    faces_dict = {}
    frame_count = 0

    for path, im, im0s, vid_cap in dataset:
        if len(im.shape) == 4:  # 检查图像维度
            orgimg = np.squeeze(im.transpose(0, 2, 3, 1), axis=0)  # 转换图像维度顺序
        else:
            orgimg = im.transpose(1, 2, 0)  # 转换图像维度顺序
        orgimg = cv2.cvtColor(orgimg, cv2.COLOR_BGR2RGB)  # 将图像颜色空间转换为RGB
        img0 = copy.deepcopy(orgimg)  # 创建图像副本
        h0, w0 = orgimg.shape[:2]  # 获取原始图像的高度和宽度
        r = img_size / max(h0, w0)  # 将图像缩放到指定的大小
        if r != 1:  # 只有在使用数据增强训练时才会进行放大，否则只缩小图像
            interp = cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR  # 选择插值方法
            img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp)  # 调整图像大小
        imgsz = check_img_size(img_size, s=model.stride.max())  # 检查图像大小
        img = letterbox(img0, new_shape=imgsz)[0]  # 使用letterbox方法对图像进行填充

        # 将图像维度顺序从宽度、高度、通道数转换为通道数、宽度、高度
        img = img.transpose(2, 0, 1).copy()
        img = torch.from_numpy(img).to(device)  # 将图像转换为PyTorch张量并移动到指定设备
        img = img.float()  # 将图像转换为浮点型
        img /= 255.0  # 将图像像素值从0-255转换为0.0-1.0的范围
        if img.ndimension() == 3:  # 如果图像维度为3，则添加一个维度作为批处理维度
            img = img.unsqueeze(0)
        # 推理
        pred = model(img)[0]  # 使用模型进行推理
        # 应用非最大抑制
        pred = non_max_suppression_face(pred, conf_thres, iou_thres)  # 对预测结果进行非最大抑制
        print(len(pred[0]), 'face' if len(pred[0]) == 1 else 'faces')  # 打印检测到的人脸数量

        # Process detections
        for i, det in enumerate(pred):  # 对每个图像的检测结果进行迭代

            if webcam:  # 如果是摄像头输入
                p, im0, frame = path[i], im0s[i].copy(), dataset.count
            else:
                p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)

            p = Path(p)  # 将路径转换为Path对象
            save_path = str(Path(save_dir) / p.name)  # 保存路径

            image_array = []  # 人脸图片数组

            if len(det):  # 如果有检测结果
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()  # 将框从图像大小缩放到im0大小
                for c in det[:, -1].unique():  # 对每个类别
                    n = (det[:, -1] == c).sum()  # 计算每个类别的检测数量

                det[:, 5:15] = scale_coords_landmarks(img.shape[2:], det[:, 5:15], im0.shape).round()  # 缩放标记点

                for j in range(det.size()[0]):  # 对每个检测结果
                    xyxy = det[j, :4].view(-1).tolist()  # 获取框的坐标
                    conf = det[j, 4].cpu().numpy()  # 获取置信度
                    landmarks = det[j, 5:15].view(-1).tolist()  # 获取标记点
                    class_num = det[j, 15].cpu().numpy()  # 获取类别编号

                    xyxy = [int(x) for x in xyxy]  # 将坐标值转换为整数
                    image_array.append(im0[xyxy[1]:xyxy[3], xyxy[0]:xyxy[2]])  # 截取坐标框的内容
                    im0 = show_results(im0, xyxy, conf, landmarks, class_num)  # 显示结果

            if view_img:  # 如果需要显示图像
                cv2.imshow('result', im0)  # 显示图像
                k = cv2.waitKey(1)  # 等待按键

            if save_img:  # 如果需要保存图像
                if dataset.mode == 'image':  # 如果是图像模式
                    cv2.imwrite(save_path, im0)  # 保存图像
                    # im_crop = im0[xyxy[1]:xyxy[3], xyxy[0]:xyxy[2]]  # 截取坐标框的内容
                    for face_img in image_array:
                        hash_value = calculate_hash(face_img)
                        if len(faces_dict) == 0:
                            #  创建第一个相似人脸文件夹
                            folder_path = os.path.join('D:/1/', str(hash_value))
                            create_folder(folder_path)
                            face_path = os.path.join(folder_path, 'face{}.jpg'.format(frame_count))
                            cv2.imwrite(face_path, face_img)
                            faces_dict[hash_value] = folder_path
                        else:
                            #  判断是否有相似人脸已经存在
                            folder_paths = list(faces_dict.values())
                            for folder_path in folder_paths:
                                #  计算当前人脸图像与已存在文件夹中的人脸图像的汉明距离
                                face_paths = os.listdir(folder_path)
                                if len(face_paths) > 0:
                                    first_face_path = os.path.join(folder_path, face_paths[0])
                                    first_face_img = cv2.imread(first_face_path)
                                    first_hash_value = calculate_hash(first_face_img)
                                    distance = hamming_distance(hash_value, first_hash_value)
                                    if distance < 20:  # 汉明距离小于等于5认为是相似人脸
                                        face_path = os.path.join(folder_path,
                                                                 'face{}.jpg'.format(frame_count))
                                        cv2.imwrite(face_path, face_img)
                                        break
                            else:
                                #  与所有已存在文件夹中的人脸图像都不相似，创建新的文件夹
                                folder_path = os.path.join('D:/1/', str(hash_value))
                                create_folder(folder_path)
                                face_path = os.path.join(folder_path, 'face{}.jpg'.format(frame_count))
                                cv2.imwrite(face_path, face_img)
                                faces_dict[hash_value] = folder_path

                else:  # 如果是视频或流模式
                    if vid_path[i] != save_path:  # 如果是新视频
                        vid_path[i] = save_path
                        if isinstance(vid_writer[i], cv2.VideoWriter):  # 如果之前有视频写入器
                            vid_writer[i].release()  # 释放之前的视频写入器
                        if vid_cap:  # 如果是视频
                            fps = vid_cap.get(cv2.CAP_PROP_FPS)  # 获取帧率
                            w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))  # 获取宽度
                            h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # 获取高度

                            frame_interval = int(fps / 3)  # 计算每秒保存三张图片的帧间隔
                            if frame_count % frame_interval == 0:
                                for face_img in image_array:
                                    hash_value = calculate_hash(face_img)
                                    if len(faces_dict) == 0:
                                        #  创建第一个相似人脸文件夹
                                        folder_path = os.path.join('E:/1/', str(hash_value))
                                        create_folder(folder_path)
                                        face_path = os.path.join(folder_path, 'face{}.jpg'.format(frame_count))
                                        cv2.imwrite(face_path, face_img)
                                        faces_dict[hash_value] = folder_path
                                    else:
                                        #  判断是否有相似人脸已经存在
                                        folder_paths = list(faces_dict.values())
                                        for folder_path in folder_paths:
                                            #  计算当前人脸图像与已存在文件夹中的人脸图像的汉明距离
                                            face_paths = os.listdir(folder_path)
                                            if len(face_paths) > 0:
                                                first_face_path = os.path.join(folder_path, face_paths[0])
                                                first_face_img = cv2.imread(first_face_path)
                                                first_hash_value = calculate_hash(first_face_img)
                                                distance = hamming_distance(hash_value, first_hash_value)
                                                if distance < 20:  # 汉明距离小于等于5认为是相似人脸
                                                    face_path = os.path.join(folder_path,
                                                                             'face{}.jpg'.format(frame_count))
                                                    cv2.imwrite(face_path, face_img)
                                                    break
                                        else:
                                            #  与所有已存在文件夹中的人脸图像都不相似，创建新的文件夹
                                            folder_path = os.path.join('E:/1/', str(hash_value))
                                            create_folder(folder_path)
                                            face_path = os.path.join(folder_path, 'face{}.jpg'.format(frame_count))
                                            cv2.imwrite(face_path, face_img)
                                            faces_dict[hash_value] = folder_path
                            frame_count += 1

                        else:  # 如果是流
                            fps, w, h = 30, im0.shape[1], im0.shape[0]  # 设置帧率和大小
                        save_path = str(Path(save_path).with_suffix('.mp4'))  # 保存路径设为mp4格式
                        vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps,
                                                        (w, h))  # 创建视频写入器
                    try:
                        vid_writer[i].write(im0)  # 写入帧
                        fps = vid_cap.get(cv2.CAP_PROP_FPS)  # 获取帧率
                        frame_interval = int(fps / 3)  # 计算每秒保存三张图片的帧间隔
                        if frame_count % frame_interval == 0:
                            for face_img in image_array:
                                hash_value = calculate_hash(face_img)
                                if len(faces_dict) == 0:
                                    #  创建第一个相似人脸文件夹
                                    folder_path = os.path.join('E:/1/', str(hash_value))
                                    create_folder(folder_path)
                                    face_path = os.path.join(folder_path, 'face{}.jpg'.format(frame_count))
                                    cv2.imwrite(face_path, face_img)
                                    faces_dict[hash_value] = folder_path
                                else:
                                    #  判断是否有相似人脸已经存在
                                    folder_paths = list(faces_dict.values())
                                    for folder_path in folder_paths:
                                        #  计算当前人脸图像与已存在文件夹中的人脸图像的汉明距离
                                        face_paths = os.listdir(folder_path)
                                        if len(face_paths) > 0:
                                            first_face_path = os.path.join(folder_path, face_paths[0])
                                            first_face_img = cv2.imread(first_face_path)
                                            first_hash_value = calculate_hash(first_face_img)
                                            distance = hamming_distance(hash_value, first_hash_value)
                                            if distance < 20:  # 汉明距离小于等于5认为是相似人脸
                                                face_path = os.path.join(folder_path, 'face{}.jpg'.format(frame_count))
                                                cv2.imwrite(face_path, face_img)
                                                break
                                    else:
                                        #  与所有已存在文件夹中的人脸图像都不相似，创建新的文件夹
                                        folder_path = os.path.join('E:/1/', str(hash_value))
                                        create_folder(folder_path)
                                        face_path = os.path.join(folder_path, 'face{}.jpg'.format(frame_count))
                                        cv2.imwrite(face_path, face_img)
                                        faces_dict[hash_value] = folder_path
                                # save_paths = os.path.join('1', f"bbox_{str(hash_value)}.jpg")  # 构建保存文件的完整路径
                                # cv2.imwrite(save_paths, im_crop)  # 保存截取的图像
                        frame_count += 1

                        # image_array.clear()
                    except Exception as e:
                        print(e)  # 打印异常


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp/weights/best.pt',
                        help='model.pt path(s)')
    parser.add_argument('--source', type=str, default='0', help='source')  # file/folder, 0 for webcam
    parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
    parser.add_argument('--project', default='runs/detect', help='save results to project/name')
    parser.add_argument('--name', default='exp', help='save results to project/name')
    parser.add_argument('--exist-o', action='store_true', help='existing project/name ok, do not increment')
    parser.add_argument('--save-img', action='store_true', help='save results', default=True)
    parser.add_argument('--view-img', action='store_true', help='show results', default=True)
    parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    opt = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = load_model(opt.weights, device)
    detect(model, opt.source, device, opt.project, opt.name, opt.exist_o, opt.save_img, opt.view_img)
