#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@author:  runyuanye
@contact: runyuanye@163.com
"""

import argparse
import os
import sys
import numpy as np
import cv2
from collections import OrderedDict
import glob
import time
import multiprocessing
import torch

from distance import euclidean_squared_distance, euclidean_squared_distance2, cosine_distance, cosine_distance2, cosine_distance_ex

sys.path.append('.')

"""
    人脸
"""


def show_images(image_dir, file_names, total_image_count, dis_text=''):
    h_image_count = 10
    v_image_count = 6
    img_width = 128
    img_height = 128
    # h_image_count = 8
    # v_image_count = 4
    # img_width = 224
    # img_height = 224

    display_image_count = h_image_count * v_image_count
    files = []
    last_idx = len(file_names) - 1
    for idx, file_name in enumerate(file_names):
        files.append(file_name)
        file_count = len(files)
        if file_count < display_image_count and (idx < last_idx):
            continue
        imgs = []
        for file_name in files:
            file_path = os.path.join(image_dir, file_name.strip())
            img = cv2.imread(file_path)
            img = cv2.resize(img, (img_width, img_height))
            # w, h = img.shape[1], img.shape[0]
            # wscale = img_width / w
            # hscale = img_height / h
            # scale = min(wscale, hscale)
            # w = int(round(w * scale))
            # h = int(round(h * scale))
            # image = cv2.resize(img, (w, h))
            # img = np.full((img_height, img_width, image.shape[2]), fill_value=196, dtype=np.uint8)
            # x = (img_width-w)//2
            # y = (img_height-h)//2
            # img[y:y+h, x:x+w, :] = image
            imgs.append(img)
        files.clear()
        image_count = len(imgs)
        images = []
        x_off = 0
        y_off = 0
        for j, img in enumerate(imgs):
            if j % h_image_count == 0:
                x_off = 0
                if j != 0:
                    y_off += img_height
                h_images = [img]
            else:
                x_off += img_width
                h_images.append(img)
            if ((j + 1) % h_image_count == 0) or ((j + 1) == image_count):
                images.append(np.concatenate(h_images, axis=1))

        if len(images) > 1:
            w0 = images[0].shape[1]
            h0 = images[0].shape[0]
            w1 = images[-1].shape[1]
            h1 = images[-1].shape[0]
            if (w0 != w1) or (h0 != h1):
                # image = np.empty_like(images[0])
                image = np.zeros_like(images[0])
                image[0:h1, 0:w1, :] = images[-1]
                images[-1] = image
        draw_image = np.vstack(images)
        cv2.putText(draw_image, '%s %d/%d/%d' % (dis_text, idx + 1, last_idx + 1, total_image_count), (8, 24), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 255, 0), 1)
        cv2.imshow('image', draw_image)
        key = cv2.waitKey()
        if key == 27 or key == ord('q') or key == ord('Q'):
            return


def face_select_run(file_queue, out_queue, id, args, output_dir):
    DEVICE_ID = str(id % args.gpu_count)
    os.environ['CUDA_VISIBLE_DEVICES'] = DEVICE_ID

    DEVICE = 'cuda:0'

    from torch.backends import cudnn

    cudnn.benchmark = True

    try:
        while True:
            file_info = file_queue.get(timeout=5)
            if file_info is None:
                break
            faceid, files, features = file_info
            total_image_count = len(files)
            if total_image_count == 1:
                clone_feat = features[0].clone()
                clone_feat.share_memory_()
                out_queue.put(([faceid, files[0], clone_feat, 1], False, id))
                continue

            # feats = np.frombuffer(feats, dtype=np.float32).reshape(-1, args.feature_dim)
            # feats = torch.tensor(feats, device=DEVICE, requires_grad=False)
            if not args.cpu:
                feats = features.cuda(device=DEVICE)
            else:
                feats = features
            feats.requires_grad = False
            # feats = torch.nn.functional.normalize(feats, dim=1, p=2)
            split_process = feats.size(0) > args.split_process_feature_count
            if split_process:
                dist_sum = cosine_distance_ex(feats)
            else:
                distmat = cosine_distance(feats)
                dist_sum = distmat.sum(axis=1)
            if not args.cpu:
                dist_sum = dist_sum.cpu()
            dist_sum = dist_sum.numpy()
            face_idx = np.argmin(dist_sum, axis=0)
            if args.show:
                max_face_idx = np.argmax(dist_sum, axis=0)
                show_images(args.image_dir, files, total_image_count, '{} {}'.format(face_idx, max_face_idx))
            clone_feat = features[face_idx].clone()
            clone_feat.share_memory_()
            while out_queue.qsize() > 1000:
                time.sleep(0.01)
            out_queue.put(([faceid, files[face_idx], clone_feat, len(files)], False, id))
    except Exception as e:
        if str(e) != '':
            print(e)

    out_queue.put(([], True, id))


def out_face_select_run(out_queue, args, total_face_count, output_dir):
    face_count = 0
    list_file_path = os.path.join(output_dir, 'TypicalFace.txt')
    feature_file_path = os.path.join(output_dir, 'TypicalFaceFeature.dat')
    list_file = open(list_file_path, 'a+')
    feature_file = open(feature_file_path, 'a+')
    try:
        finish_worker_count = 0
        while True:
            file_info = out_queue.get(block=True)
            if file_info is None:
                break
            faceid_file_feature, finish, id = file_info
            if finish:
                print('Proc{} finish'.format(id, ))
                finish_worker_count += 1
                if args.proccess_count <= finish_worker_count:
                    break
                continue
            faceid, file, feature, face_image_count = faceid_file_feature
            faceid = faceid.split('_')
            out_info = '{} {} {} {}\n'.format(faceid[0], faceid[1], file, face_image_count)
            list_file.write(out_info)
            feature = feature.numpy()
            feature.tofile(feature_file)
            face_count += 1
            if face_count % 1000 == 0:
                print('{:06f}, Proc{}, File Count: {}/{}, {}'.format(time.time(), id, face_count, total_face_count, faceid))

    except Exception as e:
        print(e)
    list_file.close()
    feature_file.close()
    os.system('chmod a+wr {}'.format(list_file_path))
    os.system('chmod a+wr {}'.format(feature_file_path))


def face_select_mp(args, xiaoqu_faceid_file_features):

    output_dir = args.output_dir

    for xiaoqu, faceid_file_features in xiaoqu_faceid_file_features.items():
        file_queue = multiprocessing.Queue()
        out_queue = multiprocessing.Queue()

        total_face_count = len(faceid_file_features)

        workers = []
        for i in range(args.proccess_count):
            workers.append(multiprocessing.Process(target=face_select_run, args=(file_queue, out_queue, i, args, output_dir)))

        out_worker = multiprocessing.Process(target=out_face_select_run, args=(out_queue, args, total_face_count, output_dir))

        for i in range(args.proccess_count):
            workers[i].start()

        out_worker.start()
        print('Start Time: {:06f} {}'.format(time.time(), xiaoqu))
        wait_count_thr = args.proccess_count * 1000
        for faceid, file_features in faceid_file_features.items():
            while file_queue.qsize() > wait_count_thr:
                time.sleep(0.01)
            files = file_features[0]
            features = np.concatenate(file_features[1], axis=0)
            file_features[1] = None
            # features = multiprocessing.RawArray('f', features.ravel())
            features = torch.from_numpy(features)
            features.share_memory_()
            file_queue.put([faceid, files, features])

        out_worker.join()

        for i in range(args.proccess_count):
            workers[i].join()

        # for i, [seq_file_path, seq_name] in enumerate(zip(seq_files, seq_name_list)):
        #     if i == 0:
        #         continue
        #     file_queue.put([seq_file_path, seq_name])
        #     face_merge_run(file_queue, out_queue, 0, args, output_dir)
        #     break


def face_merge_run(file_queue, out_queue, id, args, output_dir):
    DEVICE_ID = str(id % args.gpu_count)
    os.environ['CUDA_VISIBLE_DEVICES'] = DEVICE_ID

    DEVICE = 'cuda:0'

    from torch.backends import cudnn

    cudnn.benchmark = True

    try:
        same_face_distance_thr = 1 - args.score_thr
        while True:
            file_info = file_queue.get(timeout=5)
            if file_info is None:
                break
            xiaoqu, faceids, files, features, face_image_counts = file_info
            total_image_count = len(files)
            if total_image_count == 1:
                if args.show:
                    out_queue.put(([], False, id))
                else:
                    out_queue.put(([xiaoqu, [], [], faceids, files], False, id))
                continue

            if not args.cpu:
                feats = features.cuda(device=DEVICE)
            else:
                feats = features
            feats.requires_grad = False
            face_image_count_sort = face_image_counts.numpy().argsort()
            same_face_idxes = []
            diff_face_idxes = []
            feat_count = feats.size(0)
            split_process = feat_count > args.split_process_feature_count
            if not split_process:
                distmat = cosine_distance(feats)
                distmat = distmat.cpu().numpy()
            if args.mode == 0:
                same_idx_set = set()
                for idx in face_image_count_sort:
                    if idx in same_idx_set:
                        continue
                    if split_process:
                        distmat = cosine_distance2(feats, feats[idx:idx+1])
                        distmat = distmat.t().cpu().numpy()
                        dist = distmat[0]
                    else:
                        dist = distmat[idx]
                    mask = dist < same_face_distance_thr
                    mask[idx] = False
                    same_image_idx = np.nonzero(mask)[0]
                    if same_image_idx.size > 0:
                        same_image_idx = same_image_idx.tolist()
                        same_idx_set.update(same_image_idx)
                        same_face_idxes.append([idx] + same_image_idx)  # 有点问题，这里并没有排除之前判断为相似的人脸
            elif args.mode == 1:
                same_idx_set = {}
                for idx in face_image_count_sort:
                    if split_process:
                        distmat = cosine_distance2(feats, feats[idx:idx+1])
                        distmat = distmat.t().cpu().numpy()
                        dist = distmat[0]
                    else:
                        dist = distmat[idx]
                    mask = dist < same_face_distance_thr
                    mask[idx] = False
                    same_image_idx = np.nonzero(mask)[0]
                    if same_image_idx.size > 0:
                        same_image_idx = set(same_image_idx.tolist())
                        has_merge = False
                        for sidx, sidx_set in same_idx_set.items():
                            if (sidx in same_image_idx) or (idx in sidx_set) or (len(sidx_set & same_image_idx) > 0):
                                sidx_set.update(same_image_idx)
                                sidx_set.add(idx)
                                if sidx in sidx_set:
                                    sidx_set.remove(sidx)
                                has_merge = True
                                break
                        if not has_merge:
                            same_idx_set[idx] = same_image_idx
                for idx, same_image_idx in same_idx_set.items():
                    same_face_idxes.append([idx] + list(same_image_idx))
            elif args.mode == 2:
                no_same_faceids = []
                same_faceids = []
                for idx in range(feat_count):
                    if split_process:
                        distmat = cosine_distance2(feats, feats[idx:idx+1])
                        distmat = distmat.t().cpu().numpy()
                        dist = distmat[0]
                    else:
                        dist = distmat[idx]
                    mask = dist < same_face_distance_thr
                    mask[idx] = False
                    same_face_count = mask.sum()
                    if same_face_count > 0:
                        same_image_idx = np.nonzero(mask)[0]
                        same_faceids.append(set([idx]+same_image_idx.tolist()))
                    else:
                        no_same_faceids.append(idx)
                diff_face_idxes = no_same_faceids
                if len(same_faceids) > 1:
                    while True:
                        has_merge_idx_set = set()
                        same_faceids_count = len(same_faceids)
                        merge_same_faceids = []
                        for i in range(same_faceids_count):
                            if i in has_merge_idx_set:
                                continue
                            merge_same_faceid_set = same_faceids[i]
                            for j in range(i+1, same_faceids_count):
                                same_faceid_set = same_faceids[j]
                                _merge_same_faceid_set = merge_same_faceid_set | same_faceid_set
                                if len(_merge_same_faceid_set) < (len(merge_same_faceid_set) + len(same_faceid_set)):
                                    # 有相交的faceid，使用合并后的集合
                                    merge_same_faceid_set = _merge_same_faceid_set
                                    has_merge_idx_set.add(j)
                            has_merge_idx_set.add(i)
                            merge_same_faceids.append(merge_same_faceid_set)
                        same_faceids = merge_same_faceids
                        if len(merge_same_faceids) >= same_faceids_count:
                            break
                    same_face_idxes = same_faceids

            if args.show:
                for same_face_idx in same_face_idxes:
                    same_files = []
                    same_faceids = []
                    for idx in same_face_idx:
                        same_files.append(files[idx])
                        same_faceids.append(faceids[idx])
                    # same_files = [files[idx] for idx in same_face_idx]
                    # same_faceids = [faceids[idx] for idx in same_face_idx]
                    # print(same_faceids)
                    show_images(args.image_dir, same_files, len(same_files), same_faceids[0])
            else:
                same_files_list = []
                same_faceids_list = []
                for same_face_idx in same_face_idxes:
                    same_files = []
                    same_faceids = []
                    for idx in same_face_idx:
                        same_files.append(files[idx])
                        same_faceids.append(faceids[idx])
                    # same_files = [files[idx] for idx in same_face_idx]
                    # same_faceids = [faceids[idx] for idx in same_face_idx]
                    same_files_list.append(same_files)
                    same_faceids_list.append(same_faceids)

                diff_files_list = []
                diff_faceids_list = []
                for diff_face_idx in diff_face_idxes:
                    diff_files_list.append(files[diff_face_idx])
                    diff_faceids_list.append(faceids[diff_face_idx])

                while out_queue.qsize() > 1000:
                    time.sleep(0.01)
                if (len(same_face_idxes) > 0) or (len(diff_face_idxes) > 0):
                    out_queue.put(([xiaoqu, same_faceids_list, same_files_list, diff_faceids_list, diff_files_list], False, id))
                else:
                    out_queue.put(([], False, id))
    except Exception as e:
        if str(e) != '':
            print(e)

    out_queue.put(([], True, id))


def out_face_merge_run(out_queue, args, total_xiaoqu_count, output_dir):
    xiaoqu_count = 0
    list_file_path = os.path.join(output_dir, 'MergeFace.txt')
    image_list_file_path = os.path.join(output_dir, 'MergeFaceImage.txt')
    list_file = open(list_file_path, 'w')
    image_list_file = open(image_list_file_path, 'w')

    diff_list_file_path = os.path.join(output_dir, 'DiffFace.txt')
    diff_image_list_file_path = os.path.join(output_dir, 'DiffFaceImage.txt')
    diff_list_file = open(diff_list_file_path, 'w')
    diff_image_list_file = open(diff_image_list_file_path, 'w')
    try:
        finish_worker_count = 0
        while True:
            file_info = out_queue.get(block=True)
            if file_info is None:
                break
            xiaoqu_faceid_files, finish, id = file_info
            if finish:
                print('Proc{} finish'.format(id, ))
                finish_worker_count += 1
                if args.proccess_count <= finish_worker_count:
                    break
                continue
            xiaoqu = None
            if len(xiaoqu_faceid_files) > 0:
                xiaoqu, same_faceids_list, same_files_list, diff_faceids_list, diff_files_list = xiaoqu_faceid_files
                for same_faceids, same_files in zip(same_faceids_list, same_files_list):
                    same_faceids_info = xiaoqu + ':' + (','.join(same_faceids)) + '\n'
                    list_file.write(same_faceids_info)
                    same_files_info = xiaoqu + ':' + (','.join(same_files)) + '\n'
                    image_list_file.write(same_files_info)
                for diff_faceid, diff_file in zip(diff_faceids_list, diff_files_list):
                    diff_faceids_info = diff_faceid + '\n'
                    diff_list_file.write(diff_faceids_info)
                    diff_files_info = diff_file + '\n'
                    diff_image_list_file.write(diff_files_info)
            xiaoqu_count += 1
            print('{:06f}, Proc{}, {}, {}/{}'.format(time.time(), id, xiaoqu, xiaoqu_count, total_xiaoqu_count))
    except Exception as e:
        print(e)
    image_list_file.close()
    list_file.close()
    diff_image_list_file.close()
    diff_list_file.close()
    os.system('chmod a+wr {}'.format(list_file_path))
    os.system('chmod a+wr {}'.format(image_list_file_path))
    os.system('chmod a+wr {}'.format(diff_list_file_path))
    os.system('chmod a+wr {}'.format(diff_image_list_file_path))


def face_merge_mp(args, xiaoqu_faceid_file_features):

    output_dir = args.output_dir

    file_queue = multiprocessing.Queue()
    out_queue = multiprocessing.Queue()

    total_face_count = len(xiaoqu_faceid_file_features)

    workers = []
    for i in range(args.proccess_count):
        workers.append(multiprocessing.Process(target=face_merge_run, args=(file_queue, out_queue, i, args, output_dir)))

    out_worker = multiprocessing.Process(target=out_face_merge_run, args=(out_queue, args, total_face_count, output_dir))

    for i in range(args.proccess_count):
        workers[i].start()

    out_worker.start()
    print('Start Time: {:06f}'.format(time.time()))
    wait_count_thr = args.proccess_count * 100
    for xiaoqu, faceid_file_features in xiaoqu_faceid_file_features.items():
        while file_queue.qsize() > wait_count_thr:
            time.sleep(0.01)
        faceids = faceid_file_features[0]
        files = faceid_file_features[1]
        features = np.concatenate(faceid_file_features[2], axis=0)
        faceid_file_features[2] = None
        face_image_counts = faceid_file_features[3]
        # features = multiprocessing.RawArray('f', features.ravel())
        features = torch.from_numpy(features)
        features.share_memory_()
        face_image_counts = torch.tensor(face_image_counts, dtype=torch.int32)
        face_image_counts.share_memory_()
        file_queue.put([xiaoqu, faceids, files, features, face_image_counts])

    out_worker.join()

    for i in range(args.proccess_count):
        workers[i].join()

    # for i, [seq_file_path, seq_name] in enumerate(zip(seq_files, seq_name_list)):
    #     if i == 0:
    #         continue
    #     file_queue.put([seq_file_path, seq_name])
    #     face_merge_run(file_queue, out_queue, 0, args, output_dir)
    #     break


def read_features(file_list, feature_file, feat_dim, mode=0):
    xiaoqu_faceid_file_features = {}
    feat_file = open(feature_file, 'rb')
    file_count = 0
    with open(file_list, 'r') as file:
        for line in file.readlines():
            file_count += 1
            if file_count % 10000 == 0:
                print('Read Feature Count: {}'.format(file_count))

            # if file_count == 100000:
            #     break

            lines = line.strip().split()
            feature = np.fromfile(feat_file, dtype=np.float32, count=feat_dim).reshape(1, feat_dim)

            if mode == 0:
                # 150100414a54443452064d2742ce3600 fbea3e79-e936-4307-9f6a-c4dec71e2f92 gd/zs/nqjd/dxjyjy/150100414a54443452064d2742ce3600/video/20190710_133107/000040_000.jpg 1
                if 0 != int(lines[-1]):  # mode
                    continue
                file_name = lines[-2].strip()
                info = file_name.split('/')
                if len(info) > 1:
                    xiaoqu = '/'.join(info[:-4])
                else:
                    xiaoqu = 'NONE'
                faceid = '{}_{}'.format(lines[0], lines[1])
            else:
                # 000016.jpg
                file_name = lines[0].strip()
                xiaoqu = 'NONE'
                _faceid = file_name.split('.')[0]
                faceid = '{}_{}'.format(_faceid, _faceid)

            faceid_file_features = xiaoqu_faceid_file_features.get(xiaoqu, None)
            if faceid_file_features is None:
               files = [file_name]
               features = [feature]
               file_features = [files, features]
               xiaoqu_faceid_file_features[xiaoqu] = {faceid: file_features}
            else:
                file_features = faceid_file_features.get(faceid, None)
                if file_features is None:
                    files = [file_name]
                    features = [feature]
                    file_features = [files, features]
                    faceid_file_features[faceid] = file_features
                else:
                    file_features[0].append(file_name)
                    file_features[1].append(feature)
        print('Read Feature Count: {}'.format(file_count))
    feat_file.close()

    return xiaoqu_faceid_file_features


def read_typecial_features(file_list, feature_file, feat_dim, mode=0):
    xiaoqu_faceid_file_features = {}
    feat_file = open(feature_file, 'rb')
    file_count = 0
    with open(file_list, 'r') as file:
        for line in file.readlines():
            file_count += 1
            if file_count % 10000 == 0:
                print('Read Feature Count: {}'.format(file_count))

            lines = line.strip().split()
            feature = np.fromfile(feat_file, dtype=np.float32, count=feat_dim).reshape(1, feat_dim)

            if mode == 0:
                # 150100414a54443452064d2742ce3600 fbea3e79-e936-4307-9f6a-c4dec71e2f92 gd/zs/nqjd/dxjyjy/150100414a54443452064d2742ce3600/video/20190710_133107/000040_000.jpg 1
                file_name = lines[-2].strip()
                info = file_name.split('/')
                if len(info) > 1:
                    xiaoqu = '/'.join(info[:-4])
                else:
                    xiaoqu = 'NONE'
                faceid = '{}_{}'.format(lines[0], lines[1])
                face_image_count = int(lines[-1])
            else:
                # 000016.jpg
                file_name = lines[0].strip()
                xiaoqu = 'NONE'
                _faceid = file_name.split('.')[0]
                faceid = '{}_{}'.format(_faceid, _faceid)
                face_image_count = 1

            faceid_file_features = xiaoqu_faceid_file_features.get(xiaoqu, None)
            if faceid_file_features is None:
                faceids = [faceid]
                files = [file_name]
                features = [feature]
                face_img_counts = [face_image_count]
                file_features = [faceids, files, features, face_img_counts]
                xiaoqu_faceid_file_features[xiaoqu] = file_features
            else:
                faceid_file_features[0].append(faceid)
                faceid_file_features[1].append(file_name)
                faceid_file_features[2].append(feature)
                faceid_file_features[3].append(face_image_count)
        print('Read Feature Count: {}'.format(file_count))
    feat_file.close()

    return xiaoqu_faceid_file_features


def filter_face(output_dir):
    diff_face_path = os.path.join(output_dir, 'DiffFace.txt')
    diff_face_image_path = os.path.join(output_dir, 'DiffFaceImage.txt')
    merge_face_path = os.path.join(output_dir, 'MergeFace.txt')
    merge_face_image_path = os.path.join(output_dir, 'MergeFaceImage.txt')

    out_face_path = os.path.join(output_dir, 'FilterFace.txt')
    out_face_image_path = os.path.join(output_dir, 'FilterFaceImage.txt')
    out_face_file = open(out_face_path, 'w')
    out_face_image_file = open(out_face_image_path, 'w')

    diff_face_file = open(diff_face_path, 'r')
    diff_face_image_file = open(diff_face_image_path, 'r')

    face_count = 0
    file_count = 0

    for line in diff_face_file.readlines():
        # 000001_000001
        out_face_file.write(line)
        face_count += 1

    for line in diff_face_image_file.readlines():
        # 000001.jpg
        out_face_image_file.write(line)
        file_count += 1

    diff_face_file.close()
    diff_face_image_file.close()

    merge_face_file = open(merge_face_path, 'r')
    merge_face_image_file = open(merge_face_image_path, 'r')

    for line in merge_face_file.readlines():
        # NONE:000011_000011,041389_041389
        lines = line.split(':')
        faceids = lines[1].split(',')
        faceid = faceids[0]
        out_face_file.write(faceid + '\n')
        face_count += 1

    for line in merge_face_image_file.readlines():
        # NONE:000011.jpg,041389.jpg
        lines = line.split(':')
        images = lines[1].split(',')
        image = images[0]
        out_face_image_file.write(image + '\n')
        file_count += 1

    merge_face_file.close()
    merge_face_image_file.close()

    out_face_file.close()
    out_face_image_file.close()
    os.system('chmod a+wr {}'.format(out_face_path))
    os.system('chmod a+wr {}'.format(out_face_image_path))

    print('Face Count:', face_count, ', File Count:', file_count)


def main():
    parser = argparse.ArgumentParser(description="Merge Face ReID Image")
    parser.add_argument(
        "--image_dir", default="/rootfs/media/kasim/Data1/data/VideoFaceImage", help="path to image file list", type=str
    )
    parser.add_argument(
        "--file_list", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/XiaoQu/Feature.select.FR.txt", help="path to image file list", type=str
    )
    parser.add_argument(
        "--feature_file", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/XiaoQu/Feature.select.norm.FR.dat", help="path to feature file", type=str
    )
    parser.add_argument(
        "--feature_dim", default=256, help="feature dim", type=int
    )
    parser.add_argument(
        "--output_dir", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/XiaoQu", help="path to auto select reid image info", type=str
    )
    parser.add_argument('--proccess_count', type=int, default=8, help='detect proccess count')
    parser.add_argument('--gpu_count', type=int, default=2, help='detect gpu count')
    parser.add_argument("--show", action='store_true', help="show")
    parser.add_argument("--cpu", action='store_true', help="use cpu")
    parser.add_argument("--select", action='store_true', help="select typical face")
    parser.add_argument('--score_thr', type=float, default=0.5, help='face merge score thr')
    parser.add_argument("--merge", action='store_true', help="merge face")
    parser.add_argument('--mode', type=int, default=2, help='merge mode')
    parser.add_argument('--file_list_mode', type=int, default=0, help='file list mode')
    parser.add_argument('--split_process_feature_count', type=int, default=30000, help='split process feature count')
    parser.add_argument("--filter", action='store_true', help="filter face and image")
    args = parser.parse_args()

    output_dir = args.output_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        os.system('chmod a+wr {}'.format(output_dir))

    if args.filter:
        filter_face(output_dir)
        return

    if args.select:
        xiaoqu_faceid_file_features = read_features(args.file_list, args.feature_file, args.feature_dim, args.file_list_mode)

        face_select_mp(args, xiaoqu_faceid_file_features)
        return

    if args.merge:
        xiaoqu_faceid_file_features = read_typecial_features(args.file_list, args.feature_file, args.feature_dim, args.file_list_mode)
        face_merge_mp(args, xiaoqu_faceid_file_features)
        return
    print('finish!')


if __name__ == '__main__':
    main()
