#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@author:  runyuanye
@contact: runyuanye@163.com
"""

import argparse
import os
import sys
import numpy as np
import cv2
from collections import OrderedDict
import glob
import time
import multiprocessing
import torch

sys.path.append('.')

from distance import euclidean_squared_distance, euclidean_squared_distance2, cosine_distance, cosine_distance2, cosine_distance_ex


"""
    过滤掉太相似的人脸
"""

same_image_distance_thr = 0.10


def show_images(image_dir, file_names, total_image_count):
    h_image_count = 12
    v_image_count = 6
    img_width = 128
    img_height = 128
    # h_image_count = 8
    # v_image_count = 4
    # img_width = 224
    # img_height = 224

    display_image_count = h_image_count * v_image_count
    files = []
    last_idx = len(file_names) - 1
    for idx, file_name in enumerate(file_names):
        files.append(file_name)
        file_count = len(files)
        if file_count < display_image_count and (idx < last_idx):
            continue
        imgs = []
        for file_name in files:
            file_path = os.path.join(image_dir, file_name.strip())
            img = cv2.imread(file_path)
            img = cv2.resize(img, (img_width, img_height))
            # w, h = img.shape[1], img.shape[0]
            # wscale = img_width / w
            # hscale = img_height / h
            # scale = min(wscale, hscale)
            # w = int(round(w * scale))
            # h = int(round(h * scale))
            # image = cv2.resize(img, (w, h))
            # img = np.full((img_height, img_width, image.shape[2]), fill_value=196, dtype=np.uint8)
            # x = (img_width-w)//2
            # y = (img_height-h)//2
            # img[y:y+h, x:x+w, :] = image
            imgs.append(img)
        files.clear()
        image_count = len(imgs)
        images = []
        x_off = 0
        y_off = 0
        for j, img in enumerate(imgs):
            if j % h_image_count == 0:
                x_off = 0
                if j != 0:
                    y_off += img_height
                h_images = [img]
            else:
                x_off += img_width
                h_images.append(img)
            if ((j + 1) % h_image_count == 0) or ((j + 1) == image_count):
                images.append(np.concatenate(h_images, axis=1))

        if len(images) > 1:
            w0 = images[0].shape[1]
            h0 = images[0].shape[0]
            w1 = images[-1].shape[1]
            h1 = images[-1].shape[0]
            if (w0 != w1) or (h0 != h1):
                # image = np.empty_like(images[0])
                image = np.zeros_like(images[0])
                image[0:h1, 0:w1, :] = images[-1]
                images[-1] = image
        draw_image = np.vstack(images)
        cv2.putText(draw_image, '%d/%d/%d' % (idx + 1, last_idx + 1, total_image_count), (8, 24), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 255, 0), 1)
        cv2.imshow('image', draw_image)
        key = cv2.waitKey()
        if key == 27 or key == ord('q') or key == ord('Q'):
            return


def auto_select_run(file_queue, out_queue, id, args, output_dir):
    DEVICE_ID = str(id % args.gpu_count)
    os.environ['CUDA_VISIBLE_DEVICES'] = DEVICE_ID

    DEVICE = 'cuda:0'

    import torch
    from torch.backends import cudnn

    cudnn.benchmark = True

    out_file_path = os.path.join(output_dir, 'Feature.select{}.txt'.format(id))
    out_file = open(out_file_path, 'w')
    out_feat_file_path = os.path.join(output_dir, 'Feature.select{}.dat'.format(id))
    out_feat_file = open(out_feat_file_path, 'wb')
    cur_vid = None
    split_process = args.device_process or args.xiaoqu_process
    try:
        while True:
            file_info = file_queue.get(timeout=5)
            if file_info is None:
                break
            vid, files, features = file_info
            cur_vid = vid
            total_image_count = len(files)
            if total_image_count == 1:
                features = features.numpy()
                out_file.write(files[0])
                features.tofile(out_feat_file)
                out_queue.put((cur_vid, False, id))
                continue

            # feats = np.frombuffer(feats, dtype=np.float32).reshape(-1, args.feature_dim)
            # feats = torch.tensor(feats, device=DEVICE, requires_grad=False)
            if not args.cpu:
                feats = features.cuda(device=DEVICE)
            else:
                feats = features
            feats.requires_grad = False
            feats = torch.nn.functional.normalize(feats, dim=1, p=2)
            if split_process:
                dist_sum = cosine_distance_ex(feats)
            else:
                distmat = cosine_distance(feats)
                dist_sum = distmat.sum(axis=1)
                if not args.cpu:
                    distmat = distmat.cpu()
                    distmat = distmat.numpy()
            if not args.cpu:
                dist_sum = dist_sum.cpu()
            dist_sum = dist_sum.numpy()
            dist_sum_sort = dist_sum.argsort()
            # dist_sum_sort = dist_sum_sort[::-1]  # 倒序, 与其他图片差别大的排前面
            same_idx_set = set()
            keep_idxes = []
            for idx in dist_sum_sort:
                if idx in same_idx_set:
                    continue
                keep_idxes.append(idx)
                if split_process:
                    distmat = cosine_distance2(feats, feats[idx:idx+1])
                    distmat = distmat.t().cpu().numpy()
                    dist = distmat[0]
                else:
                    dist = distmat[idx]
                mask = dist < same_image_distance_thr
                mask[idx] = False
                same_image_idx = np.nonzero(mask)[0]
                if same_image_idx.size > 0:
                    same_idx_set.update(same_image_idx.tolist())
            keep_files = [files[idx] for idx in keep_idxes]
            keep_feats = features[keep_idxes]
            keep_feats = keep_feats.numpy()
            if args.show:
                show_images(args.image_dir, keep_files, total_image_count)
            else:
                for keep_file in keep_files:
                    out_file.write(keep_file)
                keep_feats.tofile(out_feat_file)
            while out_queue.qsize() > 1000:
                time.sleep(0.01)
            out_queue.put((cur_vid, False, id))
    except Exception as e:
        if str(e) != '':
            print(e)

    out_feat_file.close()
    out_file.close()
    os.system('chmod a+wr {}'.format(out_feat_file_path))
    os.system('chmod a+wr {}'.format(out_file_path))
    out_queue.put((cur_vid, True, id))


def out_run(out_queue, args, total_video_count, output_dir):
    video_count = 0
    try:
        finish_worker_count = 0
        while True:
            file_info = out_queue.get(block=True)
            if file_info is None:
                break
            video_name, finish, id = file_info
            if finish:
                print('Proc{} finish'.format(id, ))
                finish_worker_count += 1
                if args.proccess_count <= finish_worker_count:
                    break
                continue
            video_count += 1
            if args.device_process or args.xiaoqu_process:
                print('{:06f}, Proc{}, File Count: {}/{}, {}'.format(time.time(), id, video_count, total_video_count, video_name))
            else:
                if video_count % 1000 == 0:
                    print('{:06f}, Proc{}, File Count: {}/{}, {}'.format(time.time(), id, video_count, total_video_count, video_name))
            if video_count >= total_video_count:
                break
    except Exception as e:
        print(e)


def auto_select_mp(args, vid_file_features):

    output_dir = args.output_dir

    manager = multiprocessing.Manager()
    file_queue = manager.Queue()
    # file_queue = multiprocessing.Queue()
    out_queue = multiprocessing.Queue()

    total_video_count = len(vid_file_features)

    workers = []
    for i in range(args.proccess_count):
        workers.append(multiprocessing.Process(target=auto_select_run, args=(file_queue, out_queue, i, args, output_dir)))

    out_worker = multiprocessing.Process(target=out_run, args=(out_queue, args, total_video_count, output_dir))

    for i in range(args.proccess_count):
        workers[i].start()

    out_worker.start()
    print('Start Time: {:06f}'.format(time.time()))
    wait_count_thr = args.proccess_count * 1000
    for vid, file_features in vid_file_features.items():
        while file_queue.qsize() > wait_count_thr:
            time.sleep(0.01)
        files = file_features[0]
        features = np.concatenate(file_features[1], axis=0)
        file_features[1] = None
        # features = multiprocessing.RawArray('f', features.ravel())
        features = torch.from_numpy(features)
        features.share_memory_()
        file_queue.put([vid, files, features])

    out_worker.join()

    for i in range(args.proccess_count):
        workers[i].join()

    # for i, [seq_file_path, seq_name] in enumerate(zip(seq_files, seq_name_list)):
    #     if i == 0:
    #         continue
    #     file_queue.put([seq_file_path, seq_name])
    #     auto_select_run(file_queue, out_queue, 0, args, output_dir)
    #     break


def read_features(file_list, feature_file, feat_dim, device_process=False, xiaoqu_process=False):
    vid_file_features = {}
    feat_file = open(feature_file, 'rb')
    file_count = 0
    with open(file_list, 'r') as file:
        for line in file.readlines():
            # gd/zs/dqjd/xgly/150100414a5444345203bcd04294b500/video/20190912_161846/00012_000.jpg
            info = line.strip().split('/')
            if xiaoqu_process:
                vid = '/'.join(info[:-4])
            else:
                # image_name = info[-1]
                device_id = info[-4]
                if device_process:
                    vid = device_id
                else:
                    video_name = info[-2]
                    vid = '{}-{}'.format(device_id, video_name)
            feature = np.fromfile(feat_file, dtype=np.float32, count=feat_dim).reshape(1, feat_dim)
            file_features = vid_file_features.get(vid, None)
            if file_features is None:
               files = [line]
               features = [feature]
               file_features = [files, features]
               vid_file_features[vid] = file_features
            else:
                file_features[0].append(line)
                file_features[1].append(feature)
            file_count += 1
            if file_count % 10000 == 0:
                print('Read Feature Count: {}'.format(file_count))
                # break
            # if file_count == 100000:
            #     break
        print('Read Feature Count: {}'.format(file_count))
    feat_file.close()

    return vid_file_features


def main():
    global same_image_distance_thr
    parser = argparse.ArgumentParser(description="Select Face ReID Image")
    parser.add_argument(
        "--image_dir", default="/rootfs/media/kasim/Data1/data/VideoFaceImage", help="path to image file list", type=str
    )
    # parser.add_argument(
    #     "--file_list", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/Feature.sel.txt", help="path to image file list", type=str
    # )
    # parser.add_argument(
    #     "--feature_file", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/Feature.sel.dat", help="path to feature file", type=str
    # )
    # parser.add_argument(
    #     "--file_list", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/Feature.select.txt", help="path to image file list", type=str
    # )
    # parser.add_argument(
    #     "--feature_file", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/Feature.select.dat", help="path to feature file", type=str
    # )
    parser.add_argument(
        "--file_list", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/Device/Feature.select.txt", help="path to image file list", type=str
    )
    parser.add_argument(
        "--feature_file", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/Device/Feature.select.dat", help="path to feature file", type=str
    )
    parser.add_argument(
        "--feature_dim", default=256, help="feature dim", type=int
    )
    parser.add_argument(
        # "--output_dir", default="/rootfs/media/kasim/Data1/data/VideoFaceImage", help="path to auto select reid image info", type=str
        # "--output_dir", default="/rootfs/media/kasim/Data1/data/VideoFaceImage/Device", help="path to auto select reid image info", type=str
        "--output_dir", default = "/rootfs/media/kasim/Data1/data/VideoFaceImage/XiaoQu", help = "path to auto select reid image info", type = str
    )
    parser.add_argument('--proccess_count', type=int, default=2, help='detect proccess count')
    parser.add_argument('--gpu_count', type=int, default=2, help='detect gpu count')
    parser.add_argument("--show", action='store_true', help="show")
    parser.add_argument("--cpu", action='store_true', help="use cpu")
    parser.add_argument("--device_process", action='store_true', help="device process")
    parser.add_argument("--xiaoqu_process", action='store_true', help="xiaoqu process")
    parser.add_argument('--same_face_score_thr', type=float, default=1-same_image_distance_thr, help='same face score thr')
    args = parser.parse_args()

    same_image_distance_thr = 1-args.same_face_score_thr

    output_dir = args.output_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        os.system('chmod a+wr {}'.format(output_dir))

    vid_file_features = read_features(args.file_list, args.feature_file, args.feature_dim, args.device_process, args.xiaoqu_process)

    auto_select_mp(args, vid_file_features)

    print('finish!')


if __name__ == '__main__':
    main()
