# coding = utf-8
"""Functions for building the face recognition network.
"""
# MIT License
# 
# Copyright (c) 2016 David Sandberg
# 
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# 
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# 
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
from subprocess import Popen, PIPE
import tensorflow as tf
from tensorflow.python.framework import ops
import numpy as np
from scipy import misc
from sklearn.model_selection import KFold
from scipy import interpolate
from tensorflow.python.training import training
import random
import re
import csv
from tensorflow.python.platform import gfile


# 第一部分： 定义了几种损失函数（具体定义参考相应的文章），包括：
#     triplet_loss -- 欧式距离型三元组损失
#     triplet_loss_angular --- 角度距离型三元组损失
#     soft_mragin_loss --- 软边沿损失
#    simple_margin_loss --- 简单边沿损失（欧式距离）
#     simple_margin_loss_angular --- 简单边沿损失（角度距离）
#    decov_loss --- 为克服过拟合而引入的一种去相关的表达及对应的损失
#     center_loss --- 中心损失
# 注意计算不同的损失函数时，因定义的不同，需要的输入参数可能有所区别
# 善于运用tensorflow 的数学计算函数来实现完整损失的计算，如 tf.square， tf.reduce_sum等
def triplet_loss(anchor, positive, negative, alpha):  
    """Calculate the triplet loss according to the FaceNet paper
    
    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.
  
    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)   # 欧式距离
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
        
        basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
      
    return loss


def triplet_loss_angular(anchor, positive, negative, alpha=0.06):
    """Calculate the triplet loss according to the FaceNet paper
    
    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.
  
    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss_angular'):
        pos_dist = tf.reduce_sum(anchor*positive, 1)  # 类似计算两个向量的余弦距离
        neg_dist = tf.reduce_sum(anchor*negative, 1)
        
        basic_loss = tf.add(tf.subtract(neg_dist, pos_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
    
    return loss


def soft_margin_loss(anchor, positive, negative, belta=0.001):
    """Calculate the soft margin loss according to paper 'In Defense of triplet loss for person re-identification'
    
    """
    with tf.variable_scope('triplet_loss_softmargin'):
        pos_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1))
        neg_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1))
        
        # basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        # loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
        basic_loss = tf.reduce_mean(tf.log(1+tf.exp(tf.subtract(pos_dist, neg_dist))), 0)
        auxiliary_loss = tf.reduce_mean(tf.log(1+tf.exp(pos_dist)), 0)
        '''
        threshould_loss = tf.max(0.,tf.exp(pos_dist)-thre)
        '''
        tf.summary.scalar('basic_loss', basic_loss)
        tf.summary.scalar('auxiliary_loss', auxiliary_loss)
        loss = basic_loss + belta*auxiliary_loss
    return loss 


def simple_margin_loss(anchor, positive, negative, alpha):
    '''Calculate the simple margin loss according to paper 'Sampling matters in Deep Embedding Learning'
    Args:
        alpha: here alpha is not in triplet loss, instead, it means the alpha in simple margin loss, need experiment
        belta: default 1.1
    '''
    belta = 1.1
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1))
        neg_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1))
        basic_loss = tf.maximum(alpha+pos_dist-belta, 0.0) + tf.maximum(alpha-neg_dist+belta, 0.0)
        # basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(basic_loss)
    return loss


def simple_margin_loss_angular(anchor, positive, negative, alpha=0.05):
    '''Calculate the simple margin loss according to paper 'Sampling matters in Deep Embedding Learning'
    Args:
        alpha: here alpha is not in triplet loss, instead, it means the alpha in simple margin loss, need experiment
        belta: default 1.1
    '''
    belta = 0.28
    with tf.variable_scope('triplet_loss_angular'):
        pos_dist = tf.reduce_sum(anchor*positive, 1)
        neg_dist = tf.reduce_sum(anchor*negative, 1)
        basic_loss = tf.maximum(alpha+neg_dist-belta, 0.0) + tf.maximum(alpha-pos_dist+belta, 0.0)
        # basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(basic_loss)
    return loss


def decov_loss(xs):
    """Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf
    'Reducing Overfitting In Deep Networks by Decorrelating Representation'
    """
    x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
    m = tf.reduce_mean(x, 0, True)
    z = tf.expand_dims(x-m, 2)
    corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)
    corr_frob_sqr = tf.reduce_sum(tf.square(corr))
    corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
    loss = 0.5*(corr_frob_sqr - corr_diag_sqr)
    return loss 


def center_loss(features, label, alfa, nrof_classes):
    """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
       (http://ydwen.github.io/papers/WenECCV16.pdf)
    """
    nrof_features = features.get_shape()[1]
    centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
        initializer=tf.constant_initializer(0), trainable=False)
    label = tf.reshape(label, [-1])
    centers_batch = tf.gather(centers, label)
    diff = (1 - alfa) * (centers_batch - features)
    centers = tf.scatter_sub(centers, label, diff)
    loss = tf.reduce_mean(tf.square(features - centers_batch))
    # loss_2 = tf.reduce_mean(tf.reduce_sum(tf.square(features - centers_batch),1))
    return loss, centers


# 第二部分： 主要定义了图像数据的读入以及各种数据增强（augmentation）的技巧
# 获取图像的文件路径以及对应标签
def get_image_paths_and_labels(dataset):
    """
       attain the image path and the labels
    """
    image_paths_flat = []
    labels_flat = []
    for i in range(len(dataset)):
        image_paths_flat += dataset[i].image_paths
        labels_flat += [i] * len(dataset[i].image_paths)
    return image_paths_flat, labels_flat


def shuffle_examples(image_paths, labels):
    """place the data in a random order
    """
    shuffle_list = list(zip(image_paths, labels))
    random.shuffle(shuffle_list)
    image_paths_shuff, labels_shuff = zip(*shuffle_list)
    return image_paths_shuff, labels_shuff


def read_images_from_disk(input_queue):
    """Consumes a single filename and label as a ' '-delimited string.
    Args:
      filename_and_label_tensor: A scalar string tensor.
    Returns:
      Two tensors: the decoded image, and the string label.
    """
    label = input_queue[1]
    file_contents = tf.read_file(input_queue[0])
    example = tf.image.decode_png(file_contents, channels=3)
    return example, label


# 随机旋转图片
def random_rotate_image(image):
    angle = np.random.uniform(low=-10.0, high=10.0)
    return misc.imrotate(image, angle, 'bicubic')


# 读入并增强图片
def read_and_augment_data(image_list, label_list, image_size, batch_size, max_nrof_epochs, 
        random_crop, random_flip, random_rotate, nrof_preprocess_threads, shuffle=True):
    """
    1. 通过不同的操作（如旋转rotation、截取crop、大小重新调整resize、翻转flip等
    来预处理图片
    2. 对预处理后的图片进行reshape 和 标准化 standardization
    3. 把图像分批次 ，即分成一个个的batch
     prepocess the image data through different operations(rotation,crop/resize,flip)
     then reshape and standardize the image data
     finally divide the data into batch
    
    Output: batch of image and label
    """
    images = ops.convert_to_tensor(image_list, dtype=tf.string)
    labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
    
    # Makes an input queue
    input_queue = tf.train.slice_input_producer([images, labels],
        num_epochs=max_nrof_epochs, shuffle=shuffle)

    images_and_labels = []
    # execute different preprocessing opearation according to the parameters
    for _ in range(nrof_preprocess_threads):
        image, label = read_images_from_disk(input_queue)  # 从输入的队列中读取图片和类标
        # 根据输入参数判断是否需要进行相应的预处理
        if random_rotate:
            image = tf.py_func(random_rotate_image, [image], tf.uint8)
        if random_crop:
            image = tf.random_crop(image, [image_size, image_size, 3])
        else:
            image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size)
        if random_flip:
            image = tf.image.random_flip_left_right(image)
        # pylint: disable=no-member
        image.set_shape((image_size, image_size, 3))
        image = tf.image.per_image_standardization(image)
        images_and_labels.append([image, label])

    # 将图像及其对应的标签分批
    image_batch, label_batch = tf.train.batch_join(
        images_and_labels, batch_size=batch_size,
        capacity=4 * nrof_preprocess_threads * batch_size,
        allow_smaller_final_batch=True)
  
    return image_batch, label_batch


# 第三部分： 定义summary 函数来记录损失（包括total loss总损失 以及 各种损失的滑动平均版本（moving average）
def _add_loss_summaries(total_loss):
    """Add summaries for losses.
  
    Generates moving average for all losses and associated summaries for
    visualizing the performance of the network.
  
    Args:
      total_loss: Total loss from loss().
    Returns:
      loss_averages_op: op for generating moving averages of losses.
    """
    # Compute the moving average of all individual losses and the total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    losses = tf.get_collection('losses')  # return the list whose name is "losses"
    loss_averages_op = loss_averages.apply(losses + [total_loss])
  
    # Attach a scalar summmary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
        tf.summary.scalar(l.op.name + ' (raw)', l)
        tf.summary.scalar(l.op.name, loss_averages.average(l))
  
    return loss_averages_op


# 第四部分：定义train函数
# 输入参数： total_loss -- 总损失， global_step -- 全局步数（在这一步要执行什么操作）
# optimizer -- 优化器（如adam, momentum等） learning_rate --- 学习率
# moving_average_decay -- 滑动平均衰减，用于计算滑动平均
# update_gradient_vars --- 可更新梯度的变量
# log_histograms= -- 是否记录可训练变量的直方图的bool变量
def train(total_loss, global_step, optimizer, learning_rate, moving_average_decay, update_gradient_vars, log_histograms=True):
    # Generate moving averages of all losses and associated summaries.
    # loss_averages_op = _add_loss_summaries(total_loss)

    # 定义不同的优化器
    # with tf.control_dependencies([loss_averages_op]):
    if optimizer == 'ADAGRAD':
        opt = tf.train.AdagradOptimizer(learning_rate)
    elif optimizer == 'ADADELTA':
        opt = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
    elif optimizer == 'ADAM':
        opt = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
    elif optimizer == 'RMSPROP':
        opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
    elif optimizer == 'MOM':
        opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
    else:
        raise ValueError('Invalid optimization algorithm')

    # 根据损失计算可更新梯度的变量的梯度
    grads = opt.compute_gradients(total_loss, update_gradient_vars)
        
    # 梯度更新.
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
  
    # Add histograms for trainable variables.
    if log_histograms:
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)
   
    # Add histograms for gradients.
    if log_histograms:
        for grad, var in grads:
            if grad is not None:
                tf.summary.histogram(var.op.name + '/gradients', grad)
  
    # Track the moving averages of all trainable variables.
    variable_averages = tf.train.ExponentialMovingAverage(
        moving_average_decay, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
  
    with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
        train_op = tf.no_op(name='train')
  
    return train_op


# 第五部分：定义了若干图像处理的操作（prewhiten, crop,flip，to_rgb),然后导入图像数据进行相关预处理
# prewhiten --- 预白化,减去均值除以标准差
def prewhiten(x):
    # reduce mean to 0, normalize std to 1, whitening
    mean = np.mean(x)
    std = np.std(x)
    std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
    y = np.multiply(np.subtract(x, mean), 1/std_adj)
    return y


# crop --- 按给定的大小截取原图中的某块区域
def crop(image, random_crop, image_size):
    if image.shape[1] > image_size:
        sz1 = int(image.shape[1]//2)
        sz2 = int(image_size//2)
        if random_crop:
            diff = sz1-sz2
            (h, v) = (np.random.randint(-diff, diff+1), np.random.randint(-diff, diff+1))
        else:
            (h, v) = (0,0)
        image = image[(sz1-sz2+v):(sz1+sz2+v), (sz1-sz2+h):(sz1+sz2+h), :]
    return image


# flip --  图像的翻转
def flip(image, random_flip):
    # 翻转图片
    if random_flip and np.random.choice([True, False]):
        image = np.fliplr(image)
    return image


# 转化为RGB图像
def to_rgb(img):
    # rgb 3 channels
    w, h = img.shape
    ret = np.empty((w, h, 3), dtype=np.uint8)
    ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
    return ret


# 导入数据，并利用上述定义的各种操作进行预处理
def load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
    nrof_samples = len(image_paths)
    images = np.zeros((nrof_samples, image_size, image_size, 3))
    for i in range(nrof_samples):
        img = misc.imread(image_paths[i])
        if img.ndim == 2:
            img = to_rgb(img)
        if do_prewhiten:
            img = prewhiten(img)
        img = crop(img, do_random_crop, image_size)  # 预处理
        img = flip(img, do_random_flip)
        images[i, :, :, :] = img
    return images


# belief data 也就是对原始的image data作了resize操作
def load_belief_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):

    nrof_samples = len(image_paths)
    images = np.zeros((nrof_samples, image_size, image_size, 3))
    for i in range(nrof_samples):
        img = misc.imresize(misc.imread(image_paths[i]), [image_size+22, image_size+22, 3])   # 与load_data函数的不同之处
        if img.ndim == 2:
            img = to_rgb(img)
        if do_prewhiten:
            img = prewhiten(img)
        img = img[22:22+image_size, 22:22+image_size, :]
        img = flip(img, do_random_flip)
        images[i, :, :, :] = img
    return images   


# 第六部分 获取各种批数据，包括 label_batch, image_data_batch, triplet_batch
# 输入参数：
# data： 原始的数据集（如label, image)
# batch_size ： 批的大小
# batch_index ： 批的下标，即第几批
def get_label_batch(label_data, batch_size, batch_index):
    nrof_examples = np.size(label_data, 0)
    j = batch_index*batch_size % nrof_examples
    # if-else 判断批的起点加上批的大小是否会超出原来数据集的最大长度
    if j+batch_size <= nrof_examples:
        batch = label_data[j:j+batch_size]
    else:  # 有点看不懂
        x1 = label_data[j:nrof_examples]
        x2 = label_data[0:nrof_examples-j]
        batch = np.vstack([x1, x2])
    batch_int = batch.astype(np.int64)
    return batch_int


def get_batch(image_data, batch_size, batch_index):
    nrof_examples = np.size(image_data, 0)
    j = batch_index*batch_size % nrof_examples
    if j+batch_size <= nrof_examples:
        batch = image_data[j:j+batch_size, :, :, :]
    else:
        x1 = image_data[j:nrof_examples, :, :, :]
        x2 = image_data[0:nrof_examples-j, :, :, :]
        batch = np.vstack([x1, x2])
    batch_float = batch.astype(np.float32)
    return batch_float


# triplet 代表三元组，即 ax --- anchor_imgge, px --- positive image , nx --- negative_image
def get_triplet_batch(triplets, batch_index, batch_size):
    ax, px, nx = triplets
    a = get_batch(ax, int(batch_size/3), batch_index)
    p = get_batch(px, int(batch_size/3), batch_index)
    n = get_batch(nx, int(batch_size/3), batch_index)
    batch = np.vstack([a, p, n])
    return batch


# 从文件中读取学习率learning_rate,主要是配合学习率随epoch衰减时使用
def get_learning_rate_from_file(filename, epoch):
    with open(filename, 'r') as f:
        for line in f.readlines():
            line = line.split('#', 1)[0]
            if line:
                par = line.strip().split(':')
                e = int(par[0])
                lr = float(par[1])
                if e <= epoch:
                    learning_rate = lr
                else:
                    return learning_rate


# 第七部分  --- 定义图像类 ImageClass
# 类的属性 --- name（名字）， image_path（指向它的文字路径）
class ImageClass():
    "Stores the paths to images for a given class"
    def __init__(self, name, image_paths):
        self.name = name
        self.image_paths = image_paths
  
    def __str__(self):
        return self.name + ', ' + str(len(self.image_paths)) + ' images'
  
    def __len__(self):
        return len(self.image_paths)


# 文件夹层次/路径依赖关系 ---: root path/ class//img 1 2 3...
# 获取数据集
def get_dataset(paths):
    dataset = []
    for path in paths.split(':'):
        path_exp = os.path.expanduser(path)
        classes = os.listdir(path_exp)   # 在指定路径下列出所有子文件夹的名字，每个子文件夹对应一个类class
        classes.sort()
        nrof_classes = len(classes)
        for i in range(nrof_classes):   # 遍历所有类，将其元素加入数据集dataset中
            class_name = classes[i]
            facedir = os.path.join(path_exp, class_name)
            if os.path.isdir(facedir):
                images = os.listdir(facedir)
                image_paths = [os.path.join(facedir, img) for img in images]
                # if len(image_paths)>30:
                dataset.append(ImageClass(class_name, image_paths))   # dataset的元素为ImageClass,即包含所属的类和指向它的路径
    return dataset


# 把数据集分为训练集train_set和测试集test_set
def split_dataset(dataset, split_ratio, mode):
    # 按类别进行划分，即把一定比例的类构成的数据集作为训练集，剩余的为测试集，每个类包含该类的所有图片
    if mode == 'SPLIT_CLASSES':  #
        nrof_classes = len(dataset)
        class_indices = np.arange(nrof_classes)
        np.random.shuffle(class_indices)
        split = int(round(nrof_classes*split_ratio))
        train_set = [dataset[i] for i in class_indices[0:split]]
        test_set = [dataset[i] for i in class_indices[split:-1]]
    # 按图像进行划分，把若干比例的图像分为训练集，这种情况下训练集和测试集都包含所有类的部分数据
    elif mode == 'SPLIT_IMAGES':
        train_set = []
        test_set = []
        min_nrof_images = 2
        for cls in dataset:
            paths = cls.image_paths
            np.random.shuffle(paths)
            split = int(round(len(paths)*split_ratio))
            if split < min_nrof_images:
                continue  # Not enough images for test set. Skip class...
            train_set.append(ImageClass(cls.name, paths[0:split]))
            test_set.append(ImageClass(cls.name, paths[split:-1]))
    # 按csv文件进行划分，具体可参考lfw 上对peopleDevTrain.csv, peopleDevTest.csv的简介理解其划分方式
    elif mode == 'SPLIT_CSV':
        train_set = []
        test_set = []
        people_trian = []
        people_test = []
        with open("data/peopleDevTrain.csv", 'r') as csvfiletrain:
            reader = csv.reader(csvfiletrain)
            for row in reader:
                people_trian.append(row[0])
        with open("data/peopleDevTest.csv", 'r') as csvfiletest:
            reader = csv.reader(csvfiletest)
            for row in reader:
                people_test.append(row[0])
        for cls in dataset:
            name = cls.name
            if people_trian.count(name) > 0:
               train_set.append(cls)
            if people_test.count(name) > 0:
               test_set.append(cls)
    else:
        raise ValueError('Invalid train/test split mode "%s"' % mode)
    return train_set, test_set


# 第八部分 导入tensorflow模型 ,包括两部分
# 1. metagraph file 保留完整的tensorflow graph,如变量variable， 运算operation， 集合collection
# 2.checkpoint file  包含各种模型参数，如weight, bias, gradients等
def load_model(model):
    # Check if the model is a model directory (containing a metagraph and a checkpoint file)
    #  or if it is a protobuf file with a frozen graph
    model_exp = os.path.expanduser(model)
    if os.path.isfile(model_exp):
        print('Model filename: %s' % model_exp)
        with gfile.FastGFile(model_exp, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')
    else:
        print('Model directory: %s' % model_exp)
        meta_file, ckpt_file = get_model_filenames(model_exp)
        
        print('Metagraph file: %s' % meta_file)
        print('Checkpoint file: %s' % ckpt_file)
    # saver --- to save a tensorflow model which consists of two file: metagraph and checkpoint file
        saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))  # loading the model
        saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))  # restore the model parameters


# 给定模型目录，获取模型文件名即meta_file(metagraph file) 和 ckpt_file(checkpoint_file)
def get_model_filenames(model_dir):
    files = os.listdir(model_dir)
    meta_files = [s for s in files if s.endswith('.meta')]
    if len(meta_files) == 0:
        raise ValueError('No meta file found in the model directory (%s)' % model_dir)
    elif len(meta_files) > 1:
        raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
    meta_file = meta_files[0]
    meta_files = [s for s in files if '.ckpt' in s]
    max_step = -1
    for f in files:
        step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
        if step_str is not None and len(step_str.groups()) >= 2:
            step = int(step_str.groups()[1])
            if step > max_step:
                max_step = step
                ckpt_file = step_str.groups()[0]
    return meta_file, ckpt_file
# 第九部分 --- 计算各种评价指标
    """
    输入 : threshold ---  each threshold  corresponds to a ROC curve
        embedding 1 --- the embedding representation of person 1 
        embedding 2 --- the embedding representation of person 2
        actual_issame --- A bool array whose elemnt is true or false
        nrof_folds --- number of k-folds validation

    输出 : tpr --- true positive rate; npr --- negative postive rate; acc--accuracy
    """


def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])  # assert function is used to test a condition, if it doesn't satisfy, raise a AssertionError
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    print ("embeddings1.shape[0]", embeddings1.shape[0])
    print ("embeddings1.shape[1]", embeddings1.shape[0])
    print ("actual_issame]", actual_issame)
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)
    
    tprs = np.zeros((nrof_folds, nrof_thresholds))
    fprs = np.zeros((nrof_folds, nrof_thresholds))
    accuracy = np.zeros((nrof_folds))
    
    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff), 1)
    # dist = (embeddings1 * embeddings2).sum(axis=1)/2.
    indices = np.arange(nrof_pairs)
   
    best_thres = []

    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
        
        # Find the best threshold for the fold
        acc_train = np.zeros((nrof_thresholds))
        for threshold_idx, threshold in enumerate(thresholds):
            _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
        best_threshold_index = np.argmax(acc_train)
        for threshold_idx, threshold in enumerate(thresholds):
            tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
        _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])

        tpr = np.mean(tprs, 0)
        fpr = np.mean(fprs, 0)
    return tpr, fpr, accuracy


# 以算法预测出来的label和真实的label为输入，通过逻辑的与或非操作，计算true positive(tp), false positive(fp),
# true negative(tn) and false negative(fn) 的数量， 进而用它们计算tpr, fpr， acc等评价指标
def calculate_accuracy(threshold, dist, actual_issame):
    predict_issame = np.less(dist, threshold)  # if the dist is less than the threshold, return ture
    tp = np.sum(np.logical_and(predict_issame, actual_issame))
    fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
    tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
    fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
  
    tpr = 0 if (tp+fn == 0) else float(tp) / float(tp+fn)
    fpr = 0 if (fp+tn == 0) else float(fp) / float(fp+tn)
    acc = float(tp+tn)/dist.size
    return tpr, fpr, acc


    """
    Input : threshold ---  each threshold  corresponds to a ROC curve
        embedding 1 --- the embedding representation of person 1 
        embedding 2 --- the embedding representation of person 2
        actual_issame --- A bool array whose elemnt is true or false
        far_target --- 
        nrof_folds --- number of k-folds validation

    Output : val_mean --- mean of validation rate
         val_std  -- standard variation of validatiob rate
         far_mean --- mean of far
    """


# 也是一种评价指标，其计算方式类似上面计算 tpr, fpr, acc的方式
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)
    
    val = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)
    
    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    # dist = (embeddings1 * embeddings2).sum(axis=1)/2.
    indices = np.arange(nrof_pairs)
    
    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
      
        # Find the threshold that gives FAR = far_target
        far_train = np.zeros(nrof_thresholds)
        for threshold_idx, threshold in enumerate(thresholds):
            _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
        if np.max(far_train) >= far_target:
            f = interpolate.interp1d(far_train, thresholds, kind='slinear')
            threshold = f(far_target)
        else:
            threshold = 0.0
    
        val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
  
    val_mean = np.mean(val)
    far_mean = np.mean(far)
    val_std = np.std(val)
    return val_mean, val_std, far_mean

    """
    val --- true accepted rate , for all the true in "actual_issame", how many is accepetd by the algorithm under the best threshold
    val vs acc
    val --- whether the same person can be judged as same
    acc --- whether the same person can be judged as same and the different person can be judged as different
    far --- false accepted rate, for all the false in "actual_issame", how many is accepetd by the algorithm under the best threshold 
    far = fpr
    """


def calculate_val_far(threshold, dist, actual_issame):
    predict_issame = np.less(dist, threshold)
    true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
    false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
    n_same = np.sum(actual_issame)
    n_diff = np.sum(np.logical_not(actual_issame))
    val, far = 0, 0
    if float(n_same) != 0:
        val = float(true_accept) / float(n_same)
    if float(n_diff) != 0:
        far = float(false_accept) / float(n_diff)
    return val, far


# 第十部分
# 记录修改的信息
def store_revision_info(src_path, output_dir, arg_string):
  
    # Get git hash
    gitproc = Popen(['git', 'rev-parse', 'HEAD'], stdout=PIPE, cwd=src_path)
    (stdout, _) = gitproc.communicate()
    git_hash = stdout.strip()
  
    # Get local changes
    gitproc = Popen(['git', 'diff', 'HEAD'], stdout=PIPE, cwd=src_path)
    (stdout, _) = gitproc.communicate()
    git_diff = stdout.strip()
    
    # Store a text file in the log directory
    rev_info_filename = os.path.join(output_dir, 'revision_info.txt')
    with open(rev_info_filename, "w") as text_file:
        text_file.write('arguments: %s\n--------------------\n' % arg_string)
        text_file.write('git hash: %s\n--------------------\n' % git_hash)
        text_file.write('%s' % git_diff)


# 列举文件中的所有变量
def list_variables(filename):
    reader = training.NewCheckpointReader(filename)
    variable_map = reader.get_variable_to_shape_map()
    names = sorted(variable_map.keys())
    return names
#----- PART 10 : record the revising information and list the variable in a specific file
