#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@author:  runyuanye
@contact: runyuanye@erisedmedia.com
"""

import argparse
import os
import sys
import numpy as np
import time
import multiprocessing
import torch
from enum import Enum

sys.path.append('.')

"""
    人脸识别评估
"""


class PairType(Enum):
    DIFF = 0
    SAME = 1


def eval_score_run(file_queue, out_queue, id, args):
    DEVICE_ID = str(id % args.gpu_count)
    os.environ['CUDA_VISIBLE_DEVICES'] = DEVICE_ID

    DEVICE = 'cuda:0'

    import torch
    from torch.backends import cudnn

    cudnn.benchmark = True

    def similarity(input1, input2):
        """Computes similarity.

        Args:
            input1 (torch.Tensor): 2-D feature matrix.
            input2 (torch.Tensor): 2-D feature matrix.

        Returns:
            torch.Tensor: similarity matrix.
        """
        similarity_score = torch.mul(input1, input2).sum(1)
        similarity_score.clamp_(0, 1)
        similarity_score = similarity_score.tolist()
        return similarity_score

    try:
        while True:
            file_info = file_queue.get(timeout=5)
            if file_info is None:
                break
            face_pairs, feats1, feats2, pair_type = file_info

            if args.gpu:
                feats1 = feats1.cuda(device=DEVICE)
                feats2 = feats2.cuda(device=DEVICE)

            feats1.requires_grad = False
            feats2.requires_grad = False
            # feats1 = torch.nn.functional.normalize(feats1, dim=1, p=2)
            # feats2 = torch.nn.functional.normalize(feats2, dim=1, p=2)
            scores = similarity(feats1, feats2)
            while out_queue.qsize() > 100:
                time.sleep(0.01)
            out_queue.put((face_pairs, scores, pair_type, False, id))
    except Exception as e:
        if str(e) != '':
            print(e)

    out_queue.put((None, None, None, True, id))


def calculate_accuracy(threshold, score, actual_issame):
    predict_issame = np.less(threshold, score)
    not_actual_issame = np.logical_not(actual_issame)
    not_predict_issame = np.logical_not(predict_issame)
    tp = np.sum(np.logical_and(predict_issame, actual_issame))  # 相同识别为相同 - correct
    fp = np.sum(np.logical_and(predict_issame, not_actual_issame))  # 不同识别为相同 - error 更严重的错误
    tn = np.sum(np.logical_and(not_predict_issame, not_actual_issame)) # 不同识别为不同 - correct
    fn = np.sum(np.logical_and(not_predict_issame, actual_issame))  # 相同识别为不同 - error
    tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
    fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
    acc = float(tp + tn) / score.size
    return tpr, fpr, acc


def evaluate_run(file_queue, out_queue, id, args, actual_issame, score):
    DEVICE_ID = str(id % args.gpu_count)
    os.environ['CUDA_VISIBLE_DEVICES'] = DEVICE_ID

    DEVICE = 'cuda:0'

    import torch
    from torch.backends import cudnn

    cudnn.benchmark = True

    def _calculate_accuracy(threshold, score, actual_issame):
        predict_issame = threshold < score
        not_actual_issame = torch.logical_not(actual_issame)
        not_predict_issame = torch.logical_not(predict_issame)
        tp = torch.sum(predict_issame & actual_issame)  # 相同识别为相同 - correct
        fp = torch.sum(predict_issame & not_actual_issame)  # 不同识别为相同 - error 更严重的错误
        tn = torch.sum(not_predict_issame & not_actual_issame) # 不同识别为不同 - correct
        fn = torch.sum(not_predict_issame & actual_issame)  # 相同识别为不同 - error
        if args.gpu:
            tp = tp.cpu()
            fp = fp.cpu()
            tn = tn.cpu()
            fn = fn.cpu()
        tpr = 0.0 if (tp + fn == 0) else float(tp) / float(tp + fn)
        fpr = 0.0 if (fp + tn == 0) else float(fp) / float(fp + tn)
        acc = float(tp + tn) / float(score.size(0))

        return tpr, fpr, acc

    try:
        while True:
            file_info = file_queue.get(timeout=2)
            if file_info is None:
                break
            idx, thr = file_info

            if args.gpu:
                actual_issame = actual_issame.cuda(device=DEVICE)
                score = score.cuda(device=DEVICE)

                actual_issame.requires_grad = False
            score.requires_grad = False
            tpr, fpr, acc = _calculate_accuracy(thr, score, actual_issame)
            while out_queue.qsize() > 100:
                time.sleep(0.01)
            out_queue.put((idx, tpr, fpr, acc, False, id))
    except Exception as e:
        if str(e) != '':
            print(e)

    out_queue.put((None, None, None, None, True, id))


def file_run(file_queue, args, thresholds):
    wait_count_thr = args.proccess_count * 5
    for idx, thr in enumerate(thresholds):
        while file_queue.qsize() > wait_count_thr:
            time.sleep(0.01)
        file_queue.put((idx, thr))


def evaluate(args, pair_file_list, score_list, actual_issame_list, output_dir, threshold=None, use_median=False, show=False, imagepath='./'):
    output_path = os.path.join(output_dir, 'evaluate.txt')
    thresholds = np.arange(0, 1, 0.01)
    # thresholds = np.arange(0, 1, 0.001)
    actual_issame = np.array(actual_issame_list)
    score = np.array(score_list)

    num = thresholds.size
    accs = np.zeros((num))
    tprs = np.zeros((num))
    fprs = np.zeros((num))

    out_file = open(output_path, 'w')

    # for idx, thr in enumerate(thresholds):
    #     tprs[idx], fprs[idx], accs[idx] = calculate_accuracy(thr, score, actual_issame)
    #     info_str = 'thr: {:.3f}, acc: {:.6f}, tpr: {:.6f}, fpr: {:.6f}'.format(thr, accs[idx], tprs[idx], fprs[idx])
    #     out_file.write(info_str + '\n')
    #     print(info_str)

    # manager = multiprocessing.Manager()
    # file_queue = manager.Queue()
    file_queue = multiprocessing.Queue()
    out_queue = multiprocessing.Queue()

    actual_issame = torch.from_numpy(actual_issame)
    score = torch.from_numpy(score)
    actual_issame.share_memory_()
    score.share_memory_()

    proccess_count = args.proccess_count

    workers = []
    for i in range(proccess_count):
        workers.append(multiprocessing.Process(target=evaluate_run, args=(file_queue, out_queue, i, args, actual_issame, score)))

    file_worker = multiprocessing.Process(target=file_run, args=(file_queue, args, thresholds.tolist()))
    file_worker.start()

    for i in range(proccess_count):
        workers[i].start()

    try:
        finish_worker_count = 0
        while True:
            file_info = out_queue.get(block=True)
            if file_info is None:
                break
            idx, tpr, fpr, acc, finish, id = file_info
            if finish:
                print('Evaluate Proc{} finish'.format(id, ))
                finish_worker_count += 1
                if proccess_count <= finish_worker_count:
                    break
                continue

            tprs[idx], fprs[idx], accs[idx] = tpr, fpr, acc
            info_str = 'Proc{} - {} - thr: {:.3f}, acc: {:.6f}, tpr: {:.6f}, fpr: {:.6f}'.format(id, idx, thresholds[idx], accs[idx], tprs[idx], fprs[idx])
            print(info_str)
    except Exception as e:
        print(e)

    file_worker.join()

    for i in range(proccess_count):
        workers[i].join()

    for idx, thr in enumerate(thresholds):
        info_str = 'thr: {:.3f}, acc: {:.6f}, tpr: {:.6f}, fpr: {:.6f}'.format(thr, accs[idx], tprs[idx], fprs[idx])
        out_file.write(info_str + '\n')

    best_threshold_index = np.argmax(accs)
    info_str = 'acc: {}, tpr: {}, fpr: {}, threshold: {}'.format(accs[best_threshold_index], tprs[best_threshold_index], fprs[best_threshold_index], thresholds[best_threshold_index])
    out_file.write(info_str + '\n')
    print(info_str)

    if threshold is None:
        max_acc = accs[best_threshold_index]
        thr_list = [thresholds[i] for i, acc in enumerate(accs) if acc == max_acc]
        # print(thr_list)
        if use_median:
            best_threshold = thr_list[len(thr_list) // 2]
            best_threshold_index = int(np.where(thresholds == best_threshold)[0])
        else:
            # best_threshold = float(thresholds[best_threshold_index])
            best_threshold = thr_list[-1]
            best_threshold_index = int(np.where(thresholds == best_threshold)[0])

        info_str = 'acc: {}, tpr: {}, fpr: {}, threshold: {}'.format(accs[best_threshold_index], tprs[best_threshold_index], fprs[best_threshold_index], thresholds[best_threshold_index])
        out_file.write(info_str + '\n')
        print(info_str)
    else:
        best_threshold = float(threshold)
        for index, thr in enumerate(thresholds):
            if round(float(thr), 3) == round(best_threshold, 3):
                best_threshold_index = index
                break
        info_str = 'acc: {}, tpr: {}, fpr: {}, threshold: {}'.format(accs[best_threshold_index], tprs[best_threshold_index], fprs[best_threshold_index], thresholds[best_threshold_index])
        out_file.write(info_str + '\n')
        print(info_str)

    match_error_count = 0
    notmatch_error_count = 0
    same_err_file = open(os.path.join(output_dir, 'same_face_error.txt'), 'w')
    diff_err_file = open(os.path.join(output_dir, 'diff_face_error.txt'), 'w')
    for i, score in enumerate(score_list):
        pair_files = pair_file_list[i]
        if actual_issame_list[i]:
            if score < best_threshold:
                match_error_count += 1
                info_str = '{},{}\n'.format(pair_files[0], pair_files[1])
                same_err_file.write(info_str)
        else:
            if score > best_threshold:
                notmatch_error_count += 1
                info_str = '{},{}\n'.format(pair_files[0], pair_files[1])
                diff_err_file.write(info_str)
    same_err_file.close()
    diff_err_file.close()
    info_str = 'Same Face Error Count: {}, Diff Face Error Count: {}'.format(match_error_count, notmatch_error_count)
    out_file.write(info_str + '\n')
    print(info_str)
    out_file.close()

    if show:
        import cv2
        for i, score in enumerate(score_list):
            is_error = False
            if actual_issame_list[i]:
                if score < best_threshold:
                    is_error = True
                    same_str = 'SameFace Error!'
            else:
                if score > best_threshold:
                    is_error = True
                    same_str = 'DiffFace Error!'
            if is_error:
                file1, file2 = pair_file_list[i]
                print(file1, file2, score, best_threshold, same_str)
                image1 = cv2.imread(os.path.join(imagepath, file1))
                image2 = cv2.imread(os.path.join(imagepath, file2))
                cv2.imshow('image1', image1)
                cv2.imshow('image2', image2)
                k = cv2.waitKey()
                if k == ord('q') or k == ord('Q') or k == 27:
                    break
                else:
                    continue
        cv2.destroyAllWindows()


def out_run(out_queue, args, total_set_count, output_dir):
    set_count = 0
    actual_issame_list = []
    score_list = []
    pair_file_list = []
    out_score_file_path = os.path.join(output_dir, 'face_pair_scores.txt')
    out_score_file = open(out_score_file_path, 'w')
    try:
        finish_worker_count = 0
        while True:
            file_info = out_queue.get(block=True)
            if file_info is None:
                break
            face_pairs, scores, pair_type, finish, id = file_info
            if finish:
                print('Proc{} finish'.format(id, ))
                finish_worker_count += 1
                if args.proccess_count <= finish_worker_count:
                    break
                continue
            set_count += 1
            for face_pair, score in zip(face_pairs, scores):
                pair_file_list.append(face_pair)
                score_list.append(score)
                actual_issame_list.append(pair_type == PairType.SAME)
                out_info = '{},{},{},{}\n'.format(int(pair_type == PairType.SAME), score, face_pair[0], face_pair[1])
                out_score_file.write(out_info)
            print('{:06f}, Proc{}, Count: {}/{}'.format(time.time(), id, set_count, total_set_count))
    except Exception as e:
        print(e)
    out_score_file.close()

    evaluate(args, pair_file_list, score_list, actual_issame_list, output_dir, threshold=args.threshold, use_median=False, show=args.show, imagepath=args.image_dir)


def eval_score_mp(args, file_features):

    output_dir = args.output_dir

    # manager = multiprocessing.Manager()
    # file_queue = manager.Queue()
    file_queue = multiprocessing.Queue()
    out_queue = multiprocessing.Queue()

    workers = []
    for i in range(args.proccess_count):
        workers.append(multiprocessing.Process(target=eval_score_run, args=(file_queue, out_queue, i, args)))

    for i in range(args.proccess_count):
        workers[i].start()

    print('Start Time: {:06f}'.format(time.time()))
    wait_count_thr = args.proccess_count * 2
    with open(args.face_pair_list, 'r') as file:
        line = file.readline()
        lines = line.strip().split('\t')
        sets_count = int(lines[0])
        pairs_count = int(lines[1])

        out_worker = multiprocessing.Process(target=out_run, args=(out_queue, args, sets_count*2, output_dir))
        out_worker.start()

        for set_idx in range(sets_count):
            while file_queue.qsize() > wait_count_thr:
                time.sleep(0.01)
            # Same Face Pairs
            same_face_pairs = []
            same_face_feat1 = []
            same_face_feat2 = []
            for pair_idx in range(pairs_count):
                line = file.readline()
                lines = line.strip().split('\t')
                face_file1 = os.path.join(lines[0], lines[1])
                face_file2 = os.path.join(lines[0], lines[2])
                feat1 = file_features.get(face_file1, None)
                feat2 = file_features.get(face_file2, None)
                if (feat1 is None) or (feat2 is None):
                    continue
                same_face_feat1.append(feat1)
                same_face_feat2.append(feat2)
                same_face_pairs.append((face_file1, face_file2))

            same_feats1 = np.concatenate(same_face_feat1, axis=0)
            same_feats2 = np.concatenate(same_face_feat2, axis=0)
            same_feats1 = torch.from_numpy(same_feats1)
            same_feats2 = torch.from_numpy(same_feats2)
            same_feats1.share_memory_()
            same_feats2.share_memory_()
            file_queue.put([same_face_pairs, same_feats1, same_feats2, PairType.SAME])

            # Diff Face Pairs
            diff_face_pairs = []
            diff_face_feat1 = []
            diff_face_feat2 = []
            for pair_idx in range(pairs_count):
                line = file.readline()
                lines = line.strip().split('\t')
                face_file1 = os.path.join(lines[0], lines[1])
                face_file2 = os.path.join(lines[2], lines[3])
                feat1 = file_features.get(face_file1, None)
                feat2 = file_features.get(face_file2, None)
                if (feat1 is None) or (feat2 is None):
                    continue
                diff_face_feat1.append(feat1)
                diff_face_feat2.append(feat2)
                diff_face_pairs.append((face_file1, face_file2))

            diff_feats1 = np.concatenate(diff_face_feat1, axis=0)
            diff_feats2 = np.concatenate(diff_face_feat2, axis=0)
            diff_feats1 = torch.from_numpy(diff_feats1)
            diff_feats2 = torch.from_numpy(diff_feats2)
            diff_feats1.share_memory_()
            diff_feats2.share_memory_()
            file_queue.put([diff_face_pairs, diff_feats1, diff_feats2, PairType.DIFF])

        out_worker.join()

    for i in range(args.proccess_count):
        workers[i].join()


def read_features(file_list, feature_file, feat_dim, feat_head_offset):
    file_features = {}
    feat_file = open(feature_file, 'rb')
    file_count = 0
    with open(file_list, 'r') as file:
        for line in file.readlines():
            face_file_name = line.strip().split()[0]
            if feat_head_offset < 1:
                feature = np.fromfile(feat_file, dtype=np.float32, count=feat_dim).reshape(1, feat_dim)
            else:
                feature = feat_file.read(feat_head_offset + feat_dim * 4)
                feature = np.frombuffer(feature[feat_head_offset:], dtype=np.float32).reshape(-1, feat_dim)
            file_features[face_file_name] = feature
            file_count += 1
            if file_count % 10000 == 0:
                print('Read Feature Count: {}'.format(file_count))
                # break
            # if file_count == 100000:
            #     break
        print('Read Feature Count: {}'.format(file_count))
    feat_file.close()

    return file_features


def read_face_pair_scores(file_path):
    pair_file_list, score_list, actual_issame_list = [], [], []

    with open(file_path, 'r') as file:
        for line in file.readlines():
            scoreinfo = line.split(',')
            actual_issame_list.append((int)(scoreinfo[0]))
            score_list.append((float)(scoreinfo[1]))
            pair_file_list.append((scoreinfo[2], scoreinfo[3]))

    return pair_file_list, score_list, actual_issame_list


def main():
    parser = argparse.ArgumentParser(description="Face Rec Eval")
    parser.add_argument(
        "--image_dir", default="/rootfs/media/kasim/Data/data/ErisedVideoFaceRec/images", help="path to face image dir", type=str
    )
    parser.add_argument(
        "--face_pair_list", default="/rootfs/media/kasim/Data/data/ErisedVideoFaceRec/FacePair.txt", help="path to face image pair list", type=str
    )
    parser.add_argument(
        "--file_list", default="/rootfs/media/kasim/Data/data/ErisedVideoFaceRec/BoxFeature.txt", help="path to face image file list", type=str
    )
    parser.add_argument(
        "--feature_file", default="/rootfs/media/kasim/Data/data/ErisedVideoFaceRec/Feature.dat", help="path to face image feature file", type=str
    )
    parser.add_argument(
        "--feature_dim", default=256, help="feature dim", type=int
    )
    parser.add_argument(
        "--skip_feature_head_offset", default=8, help="skip feature head offset", type=int
    )
    parser.add_argument(
        "--output_dir", default="/rootfs/media/kasim/Data/data/ErisedVideoFaceRec", help="path to eval result out path", type=str
    )
    parser.add_argument('--proccess_count', type=int, default=16, help='detect proccess count')
    parser.add_argument('--gpu_count', type=int, default=2, help='detect gpu count')
    parser.add_argument("--show", action='store_true', help="show error")
    parser.add_argument("--gpu", action='store_true', help="use gpu")
    parser.add_argument('--threshold', type=float, default=None, help='score threshold')  # 使用指定的阈值计算准确率，一般不指定，让其算一个最好的
    parser.add_argument("--only_eval", action='store_true', help="only evaluate")  # 使用已生成的face_pair_scores.txt文件进行评估，不再重新生成
    parser.add_argument("--use_median", action='store_true', help="use median threshold")  # 可能有多个准确率相同的阈值，选择位于中值的阈值
    args = parser.parse_args()
    output_dir = args.output_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        os.system('chmod a+wr {}'.format(output_dir))

    if args.only_eval:
        pair_file_list, score_list, actual_issame_list = read_face_pair_scores(os.path.join(output_dir, 'face_pair_scores.txt'))
        evaluate(args, pair_file_list, score_list, actual_issame_list, output_dir, threshold=args.threshold, use_median=False, show=args.show, imagepath=args.image_dir)
        print('finish!')
        return

    file_features = read_features(args.file_list, args.feature_file, args.feature_dim, args.skip_feature_head_offset)

    eval_score_mp(args, file_features)

    print('finish!')


if __name__ == '__main__':
    main()
