# -- coding: utf-8 --
"""
训练中，阶段性测试数据反馈
csv格式：Probs，Xcoor，Ycoor，DataName
"""   

from __future__ import absolute_import
from __future__ import division
# from __future__ import print_function

import math
import tensorflow as tf
import time
from datetime import datetime

from tensorflow.python.platform import gfile
from prostate_input import inputPipeLine
import prostate_network
from tensorflow.python.platform import tf_logging as logging
import time
import numpy as np
import tensorflow.contrib.metrics as metrics
import os
'''
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS

tf.app.flags.DEFINE_integer(
    'batch_size', 100, 'The number of samples in each batch.')

tf.app.flags.DEFINE_integer(
    'max_num_batches', None,
    'Max number of batches to evaluate by default use all.')

tf.app.flags.DEFINE_string(
    'checkpoint_dir','/tmp/prostate-models-ramsley/resnet50',
    'The directory where the model was written to or an absolute path to a '
    'checkpoint file.')

tf.app.flags.DEFINE_string(
    'model_name', 'resnet50', 'The name of the architecture to evaluate.')

tf.app.flags.DEFINE_float(
    'moving_average_decay', None,
    'The decay to use for the moving average.'
    'If left as None, then moving averages are not used.')

tf.app.flags.DEFINE_string('is_train_data', 'validation','''''')
tf.app.flags.DEFINE_string('eval_log_dir', '/home/ramsley/workspace/prostate/prostate_eval_log','''''')
tf.app.flags.DEFINE_integer('num_examples', 10000,'''''')
tf.app.flags.DEFINE_boolean('run_once', True,'''''')
tf.app.flags.DEFINE_string('PRE_csvDIR', '/DataSet/prostate/testingData/predictions/csv','''''') 
tf.app.flags.DEFINE_string('dataset_dir', '/home/ramsley/DataSet/prostate/testingData/patch-based-classification/tf-records', 'The directory where the dataset files are stored.') #数据读取路径
tf.app.flags.DEFINE_string('fast_mode', True, '''''''') #数据读取路径

FLAGS = tf.app.flags.FLAGS

def model_select(model, image_batch, is_training=False, reuse = None):
    if model== 'vgg16':
        return prostate_network.vgg16_inference(image_batch, is_training=is_training)
    if model== 'resnet152':
        return prostate_network.resnet152_inference(image_batch, is_training=is_training, reuse = reuse)
    if model== 'resnet50':
        return prostate_network.resnet50_inference(image_batch, is_training=is_training, reuse = reuse)
    if model== 'inception_resnet_v2':
        return prostate_network.inception_resnet_v2_inference(image_batch, is_training=is_training, reuse = reuse)
    if model== 'inception_v3':
        return prostate_network.inception_v3_inference(image_batch, is_training=is_training, reuse = reuse)
    # if model== 'inception_v4':
    #   return prostate_network.inception_v4_inference(image_batch, is_training=is_training, re

def test_once(saver, summary_writer, summary_op, top_k_op, confusion_matrix_op, logits, raw_info):
    with tf.Session() as sess:  
        tf.logging.info('Evaluating model from%s' % FLAGS.checkpoint_dir)
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            print 'Succesfully loaded model from %s at step=%s.' %(ckpt.model_checkpoint_path, global_step)
        else:
            print 'No checkpoint file found'
            return
        # sess.run(tf.global_variables_initializer())

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:    
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(qr.create_threads(sess, coord=coord, daemon=True,    #?
                                                 start=True))
            if FLAGS.max_num_batches:
                num_iter = FLAGS.max_num_batches
            else:
                num_iter = math.ceil(FLAGS.num_examples / float(FLAGS.batch_size))#用于计算的patch数
            #total_sample_count = num_iter * FLAGS.batch_size #经过取整，实际计算的样本数
            step = 0
            #PRE_csvDIR_Path = PRE_csvDIR + "/" + "_predCSV.csv"
            #file = open(PRE_csvDIR_Path ,'w') 
            print '%s: start evaluation on TestSet.' % (datetime.now()) 
            total_correct_count = 0
            TP = TN = FP = FN = 0
            TOTAL_NEG = TOTAL_POS = 0
            confusion_matrix=[[],[]]
            m_iter = t_iter = 0
            interval = num_iter/20
            while step < num_iter and not coord.should_stop():          
                if t_iter > interval:
                    t_iter = 0
                    tf.logging.info('Evaluation finish--%.2f %%' %(m_iter*100/num_iter))
                confusion_matrix ,top_k, predictions, label_ref = sess.run([confusion_matrix_op,
                                                top_k_op,
                                                logits,
                                                raw_info[0]]
                                                )
                
                # total_correct_count += np.sum(correct_count)
                # print tf.shape(confusion_matrix)
                TP += confusion_matrix[1][1]
                TN += confusion_matrix[0][0]
                FP += confusion_matrix[0][1]
                FN += confusion_matrix[1][0]
                # Accuracy = slim.metrics.streaming_accuracy(predictions, label_ref)
                # TP = slim.metrics.streaming_true_positives(predictions, label_ref)
                # FP = slim.metrics.streaming_false_positives(predictions, label_ref)
                # TN = slim.metrics.streaming_true_negatives(predictions, label_ref)
                # FN = slim.metrics.streaming_false_negatives(predictions, label_ref)
                # AUC = slim.metrics.streaming_auc(predictions, label_ref)

                TOTAL_POS = TP + FN
                TOTAL_NEG = TN + FP
                step += 1
                t_iter+=1
                m_iter+=1
                # pdb.set_trace()
            #file.close()
            #file = open(FLAGS.eval_log_dir + "/step:%s_Accuracy.txt"%global_step, 'a')
            # if not file:
            #   print "Cannot open the file %s for writing" %(FLAGS.eval_log_dir + "/_Accuracy.txt")
            # pdb.set_trace()
            prt0 = "Positive Number = %d, Negtive Number = %d" %(TOTAL_POS, TOTAL_NEG)
            prt1 = "True Patch Detected Accuracy: %f" %(float(TP + TN)/float(TOTAL_POS + TOTAL_NEG))#这个batch的准确率
            #prt2 = "IU Accuracy: %f" %(float(TP)/float(TP + FP + FN))#？？？
            prt3 = "Sensitivity(TPR): %f" %(float(TP)/(TOTAL_POS))#癌中判为正确的概率
            prt4 = "Specificity(TNR): %f" %(float(TN)/(TOTAL_NEG))#正常中判为正确的概率
            print prt0
            print prt1
            #print prt2
            print prt3
            print prt4
            # file.write(prt0+'\n')
            # file.write(prt1+'\n')
            # #file.write(prt2+'\n')
            # file.write(prt3+'\n')
            # file.write(prt4+'\n\n')
            # file.close() 

            # summary = tf.Summary()
            # summary.ParseFromString(sess.run(summary_op)) #ParseFromString？？？
            # summary.value.add(tag='Positive Number', simple_value=TOTAL_POS)  #summary.value.add ？？
            # summary.value.add(tag='Negtive Number', simple_value=TOTAL_NEG)
            # summary.value.add(tag='Accuracy', simple_value=float(TP + TN)/float(TOTAL_POS + TOTAL_NEG))
            # #summary.value.add(tag='IU Accuracy', simple_value=float(TP)/float(TP + FP + FN))
            # summary.value.add(tag='Tumour(Positive) Accuracy', simple_value=float(TP)/(TOTAL_POS))
            # summary.value.add(tag='Background(Negtive) Accuracy', simple_value=float(TN)/(TOTAL_NEG))
            # summary_writer.add_summary(summary, global_step)
        except Exception as e:  
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads, stop_grace_period_secs=10) #10秒内结束

def test():
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        global_step = slim.create_global_step()
        #从test集取文件计算
        image_batch, label_batch,_,_,_=inputPipeLine('validation', batchSize = 100, fast_mode = True, Data_Dir = FLAGS.dataset_dir, numEpochs = None)
        # pdb.set_trace()
        logits, _ = model_select(FLAGS.model_name, image_batch)
        #logits = tf.nn.softmax(logits)
        if FLAGS.moving_average_decay:
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, global_step)
            variables_to_restore = variable_averages.variables_to_restore(
                slim.get_model_variables())
            variables_to_restore[global_step.op.name] = global_step
        else:
            variables_to_restore = slim.get_variables_to_restore()
        # print variables_to_restore
        # pdb.set_trace()
        raw_info = []
        raw_info.append(label_batch)
        # raw_info.append(Xcoor_batch)
        # raw_info.append(Ycoor_batch)
        # raw_info.append(filename_batch)
        #top_k_op = tf.nn.top_k(logits,2)
        confusion_matrix_op = metrics.confusion_matrix(raw_info[0], tf.argmax(logits, axis=1)) #列代表预测，行代表正确，计算矩阵
        top_k_op = tf.nn.in_top_k(logits, raw_info[0], 1) #label_batch的值（GT）作为logits的索引对应的值是否为最大的k个数，返回bool
        #加载变量的滑动平均值，而不是变量，variables_to_restore()使其可以通过变量名直接读取滑动平均值

        

        # variable_averages = tf.train.ExponentialMovingAverage(prostate_network.MOVING_AVERAGE_DECAY)
        # variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        summary_op = tf.summary.merge_all() #merge 哪些summary

        graph_def = tf.get_default_graph().as_graph_def()
        summary_writer =tf.summary.FileWriter(FLAGS.eval_log_dir,
                                                graph=graph_def)

        while True:
            test_once(saver, summary_writer, summary_op, top_k_op, confusion_matrix_op, logits, raw_info)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)

def main(argv=None):  # pylint: disable=unused-argument
    if gfile.Exists(FLAGS.eval_log_dir):
        gfile.DeleteRecursively(FLAGS.eval_log_dir)
    gfile.MakeDirs(FLAGS.eval_log_dir)
    test()  #运行测试函数

# def main(_):
#   if not FLAGS.dataset_dir:
#     raise ValueError('You must supply the dataset directory with --dataset_dir')

#   tf.logging.set_verbosity(tf.logging.INFO)
#   with tf.Graph().as_default():
#     tf_global_step = slim.get_or_create_global_step()


#     images, labels,_,_,_=inputPipeLine('test', batchSize = 100, fast_mode = True, Data_Dir = FLAGS.dataset_dir, numEpochs = None)
#     logits, _ = model_select('resnet50', images)

#     if FLAGS.moving_average_decay:
#       variable_averages = tf.train.ExponentialMovingAverage(
#           FLAGS.moving_average_decay, tf_global_step)
#       variables_to_restore = variable_averages.variables_to_restore(
#           slim.get_model_variables())
#       variables_to_restore[tf_global_step.op.name] = tf_global_step
#     else:
#       variables_to_restore = slim.get_variables_to_restore()

#     # print (variables_to_restore)
#     # pdb.set_trace()
#     predictions = tf.argmax(logits, 1)
#     labels = tf.squeeze(labels)

#     # Define the metrics:
#     names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
#         'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
#         'Auc': slim.metrics.streaming_auc(predictions, labels),
#         'TP': slim.metrics.streaming_true_positives(predictions, labels),
#         'FP': slim.metrics.streaming_false_positives(predictions, labels),
#         'TN': slim.metrics.streaming_true_negatives(predictions, labels),
#         'FN': slim.metrics.streaming_false_negatives(predictions, labels)
#     })

#     # Print the summaries to screen.
#     for name, value in names_to_values.items():
#       summary_name = 'eval/%s' % name
#       op = tf.summary.scalar(summary_name, value, collections=[])
#       op = tf.Print(op, [value], summary_name)
#       tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

#     # TODO(sguada) use num_epochs=1
#     if FLAGS.max_num_batches:
#       num_batches = FLAGS.max_num_batches
#     else:
#       # This ensures that we make a single pass over all of the data.
#       num_batches = math.ceil(10000 / float(FLAGS.batch_size))

#     if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
#       checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
#     else:
#       checkpoint_path = FLAGS.checkpoint_path

#     tf.logging.info('Evaluating %s' % checkpoint_path)

#     slim.evaluation.evaluate_once(
#         master=FLAGS.master,
#         checkpoint_path=checkpoint_path,
#         logdir=FLAGS.eval_dir,
#         num_evals=num_batches,
#         eval_op=list(names_to_updates.values()),
#         variables_to_restore=variables_to_restore)


if __name__ == '__main__':
  tf.app.run()

'''

