import math

from config import Input_shape, channels, model_type, MODEL_PATH, num_class
from yolo_v3 import yolo_v3

from loss_function import compute_loss
from checkData import show_data, INPUT_SIZE
from utils.yolo_utils import save_np_data, read_anchors, Data_Gen

from PIL import ImageFile

from yolo_v3_tiny import yolo_v3_tiny

ImageFile.LOAD_TRUNCATED_IMAGES = True

import numpy as np
import tensorflow as tf
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

np.random.seed(101)




def Valid(model_dir,model_name,np_pkg_dir,data_root,case_file,result_file,error_threshold):
    annotation_valid = os.path.join(data_root , case_file)
    annotation_result = os.path.join(data_root ,result_file)
    # annotation_path_test = PATH + '/model/voc_test.txt'
    if model_type == 'N':
        anchors_paths = data_root + '/normal_anchor.txt'
    else:
        anchors_paths = data_root + '/tiny_anchor.txt'
        
    anchors = read_anchors(anchors_paths)
    graph = tf.Graph()
    with graph.as_default():
        # Start running operations on the Graph.
        # STEP 1: Input data ###############################################################################################
        
        X = tf.placeholder(tf.float32, shape=[None, Input_shape, Input_shape, channels], name='Input')  # for image_data
        phase = tf.placeholder(tf.bool, shape=[], name='Phase')
        with tf.name_scope("Target"):
            Y1 = tf.placeholder(tf.float32, shape=[None, Input_shape / 32, Input_shape / 32, 3, 8+num_class], name='target_S1')
            Y2 = tf.placeholder(tf.float32, shape=[None, Input_shape / 16, Input_shape / 16, 3, 8+num_class], name='target_S2')
            Y3 = tf.placeholder(tf.float32, shape=[None, Input_shape / 8, Input_shape / 8, 3, 8+num_class], name='target_S3')
        # Reshape images for visualization
        x_reshape = tf.reshape(X, [-1, Input_shape, Input_shape, 1])
        tf.summary.image("input", x_reshape)
    
        if model_type == 'N':
            model = yolo_v3
        
        else:
            model = yolo_v3_tiny
        scale_pred = model(X, phase)
        y_true = [Y1, Y2, Y3]
        
        with tf.name_scope("Loss_and_Detect"):
            # Calculate loss
            # total_loss, loss, order_loss, wrong_num = compute_loss(scale_pred, y_true, anchors,
            #                                                        is_training=phase, print_loss=False)
            
            loss, wrong_order , class_accuracy,loss_list,log_data= compute_loss(scale_pred, y_true, anchors, num_class,
                                                                                    is_training=phase, print_loss=False)
    
            # loss_print = compute_loss(scale_total, y_predict, anchors, len(classes_data), print_loss=False)
            
            # val_sum=tf.summary.scalar("Val_Loss", loss)
    
        g_list = tf.global_variables()
        bn_moving_vars = [g for g in g_list if 'moving_avg' in g.name or 'moving_var' in g.name]
    
        var_list = tf.trainable_variables() + bn_moving_vars
        loader = tf.train.Saver(var_list=var_list)
    
        # run_options = tf.RunOptions(report_tensor_allocations_upon_oom = True)
    
    
    
    
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config, graph=graph) as sess:
            dir_load = model_dir  # where to restore the model  '20190307-0.5', '255'
            model_name = model_name
            np_path_valid = os.path.join(data_root, np_pkg_dir)
            data_gen = Data_Gen(np_path_valid)
            ######################
            load_checkpoints_dir = os.path.join(MODEL_PATH , dir_load)
            var_file = os.path.join(load_checkpoints_dir, model_name)
            loader.restore(sess, var_file)  # 从模型中恢复最新变量
            bad_data=open(annotation_result,'w')
    
            with open(annotation_valid) as f:
                GG = f.readlines()
                
            file_index=0
            rcd_num=0
            is_end=[False,'']
            accumulate_num=0
            while not is_end[0]:
    
                x_train, box_data_train, image_shape_train, y_train = data_gen.load_pkg(is_end)
                number_image_train = np.shape(x_train)[0]
                print("number_image_train", number_image_train)
        
                for idx in range(number_image_train):
                    feed_in = {X: x_train[idx:idx + 1] / 255.,
                               phase: False,
                               Y1: y_train[0][idx:idx + 1],
                               Y2: y_train[1][idx:idx + 1],
                               }
                    if model_type == 'N':
                        feed_in[Y3] = y_train[2][idx:idx + 1]
            
                    loss_train = sess.run(loss,feed_dict=feed_in)  # , options=run_options)
                    line = GG[file_index]
                    while line =='\n' or line.startswith('#'):
                        file_index+=1
                        line = GG[file_index]
                    file_index+=1
        
                    if loss_train > error_threshold or math.isnan(loss_train):
                        rcd_num+=1
                        filename = line.split('.png')[0]+ '.png'
                        bad_data.write(line)
                        print(
                            "(start: %s)\tdetect_loss: %s\tcase =%s"
                            % (idx+accumulate_num, loss_train,filename))
                        
                accumulate_num += number_image_train
    
            bad_data.close()
            print('record data num: %d'%rcd_num)
            show_data(data_root,result_file, INPUT_SIZE, anchor_num=None)
            

if __name__ == '__main__':
    Valid(model_dir= '20190529-2102/rutine',
          model_name='model.ckpt-15',
          np_pkg_dir='valid_np_pkg',
          data_root='F:/ProjectData/yolo3/newData/AIData3/1aitest',
          case_file='train.txt',   # 必须与 np_pkg 的数据对应， 输出的记录才有效
          result_file='valid_result.txt',
          error_threshold=200
    )



