from functools import wraps
from tensorflow.keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D, Lambda, add
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.regularizers import l2
import sys
sys.path.append("./")
#from utils.utils import compose
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils  import plot_model
from tensorflow.keras.applications.resnet50 import ResNet50
from model import build_model
import tensorflow.keras.backend as K
from tensorflow.keras.applications import MobileNetV2
from dataloader import DataGen
import os
import cv2


def Decector(output):
    hm = output[...,:-4]
    #hm = slim.max_pool2d(hm,3,stride=1,padding='same')
    hm_shape = tf.shape(hm)
    batch,h,w,c = hm_shape[0], hm_shape[1], hm_shape[2], hm_shape[3]
    hm_flatten = tf.reshape(hm,[batch,-1])
    top_socres, top_inds = tf.nn.top_k(hm_flatten,100,name='top100') 
    cls_re = tf.cast(tf.mod(top_inds,c),tf.int32) #在第几张特征图上
    return top_socres,top_inds,cls_re #top_inds_x,top_inds_y





def get_batch_boxes(lbl_ph,top_socres_true,top_inds_true,cls_re_true):
    '''
    top_inds_true :?,100
    cls_re_true:?,100
    top_socres_true:?,100
    '''
    #1.求目标中心点位置
    ids = tf.cast(tf.divide(top_inds_true,10),dtype=tf.int32) # 单通道位置
    w_pos = tf.expand_dims(tf.mod(ids,128),axis=-1) #在第几列
    h_pos =  tf.expand_dims(tf.cast(tf.divide(ids,128),tf.int32),axis=-1)#在第几行
    ct_pos = tf.cast(tf.concat([h_pos,w_pos],axis=-1) ,dtype=tf.float32)
    # 2. 或取目标wh和中心offset偏移
    offset_ = tf.reshape(lbl_ph[...,-2:],[-1,128*128,2])
    wh_ = tf.reshape(lbl_ph[...,-4:-2],[-1,128*128,2])
    offset_ = tf.batch_gather(offset_,ids)
    wh_ = tf.batch_gather(wh_,ids)
    #3. boxes坐标计算
    ct_pos = ct_pos + offset_ #中心点矫正
    y1x1= (ct_pos-wh_/2*128.)/128. #左上角坐标（归一）
    y2x2= (ct_pos+wh_/2*128.)/128. #右下角坐标（归一）
    boxes_per_batach = tf.concat([y1x1,y2x2],axis=-1)
    return boxes_per_batach


def get_nms_result(boxes_per_bath_true,cls_re_true,top_socres_true):
    # boxes = boxes_per_bath_true[0]
    # clss = cls_re_true[0]
    # socres = top_socres_true[0]
    # boxes_final = []
    # cls_final = []
    def _get_nms_result(boxes,clss,socres):  
        boxes_final = []
        cls_final = []
        for i in range(10):
            indices = tf.reshape(tf.where(tf.equal(clss,i)),[-1,])
            scores_for_clss = tf.gather(socres,indices)
            boxes_for_clss = tf.gather(boxes,indices)
            boxes_for_clss_indices = tf.image.non_max_suppression(boxes_for_clss,scores_for_clss,max_output_size=100,iou_threshold=0.5) # nms处理后indices
            boxes_for_clss_final = tf.gather(boxes_for_clss,boxes_for_clss_indices) #获取bbox
            cls_re =tf.expand_dims((boxes_for_clss_final[...,0]*0+1.)*i,axis=-1) #获取类别
            result = tf.concat([boxes_for_clss_final,cls_re],axis=-1) #[y1,x1,y2,x2,c]
            boxes_final.append(result)
        boxes_final = tf.concat(boxes_final,axis=0)
        return boxes_final
    results =[]
    for i in range(8):
        result = _get_nms_result(boxes_per_bath_true[i] ,cls_re_true[i],top_socres_true[i])
        results.append(result) 
    return results


def get_boxes(lbl_ph,batch_size,image_tenor,threshold):
    NMS = True
    # tf.summary.image("image", model.input)
    top_socres_true,top_inds_true,cls_re_true = Decector(lbl_ph)
    boxes_per_bath_ = get_batch_boxes(lbl_ph,top_socres_true,top_inds_true,cls_re_true)
    top_socres_mask_true = tf.greater(top_socres_true,threshold)
    top_socres_mask_true = tf.cast(tf.expand_dims(top_socres_mask_true,axis=-1),tf.float32)
    boxes_per_bath_true = boxes_per_bath_ * top_socres_mask_true         # 没有NMS


    if NMS:
        boxes_nms_result = get_nms_result(boxes_per_bath_true,cls_re_true,top_socres_true) #有NMS
        image_with_boxes_true =[]
        for i in range(8): 
            img = tf.expand_dims(image_tenor[i],axis=0)
            boxes = tf.expand_dims(boxes_nms_result[i],axis=0)[...,:4]
            img = tf.image.draw_bounding_boxes(img,boxes,name=None) 
            image_with_boxes_true.append(img)
        image_with_boxes_true = tf.concat(image_with_boxes_true,axis=0,name=None)
    else:
        image_with_boxes_true = tf.image.draw_bounding_boxes(image_tenor,boxes_per_bath_true,name=None)
    return image_with_boxes_true      


if __name__ == "__main__":
    model = build_model()
    batch_size = 8
    #1.载入模型
    model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3), loss=[None,None,None,None,None],loss_weights=[0,1.,1.,1.,0],metrics=[])
    model.load_weights('./log_path/model-208000.h5')
    sess = tf.keras.backend.get_session()


    # 2.tensorboard_summary
    loss_val_hm_ph = K.placeholder(dtype="float32")
    loss_val_wh_ph = K.placeholder(dtype="float32")
    loss_val_reg_ph = K.placeholder(dtype="float32")
    loss_val_total_ph = K.placeholder(dtype="float32")

    lbl_ph = K.placeholder(dtype="float32")
    train_writer = tf.summary.FileWriter('log_path/train',sess.graph)
    val_writer=tf.summary.FileWriter('log_path/val')


    with tf.name_scope("imageSrc"):
        #tf.summary.image("image", model.input)
        image_boxes_true = get_boxes(lbl_ph,batch_size,model.input[0],0.999)
        tf.summary.image("image_with_boxes_true", image_boxes_true)     
        image_mask_true = tf.expand_dims(tf.reduce_max(lbl_ph[...,:-4],-1),-1) 
        tf.summary.image("image_mask_true", image_mask_true)     


    with tf.name_scope("imageResult"):
        image_boxes_pred = get_boxes(model.output[0],batch_size,model.input[0],0.7)
        tf.summary.image("image_with_boxes_pred", image_boxes_pred)        
        image_mask_true = tf.expand_dims(tf.reduce_max(model.output[0][...,:-4],-1),-1) 
        tf.summary.image("image_mask_pred", image_mask_true)        


    with tf.name_scope("losses"):
        reg_loss = model.get_layer('wh_loss').output
        tf.summary.scalar("hm_loss",K.sum(model.get_layer('hm_loss').output))
        tf.summary.scalar("wh_loss",K.sum(model.get_layer('wh_loss').output))
        tf.summary.scalar("reg_loss",K.sum(model.get_layer('reg_loss').output))
        tf.summary.scalar("total_loss",K.sum(model.get_layer('total_loss').output))
    merge = tf.summary.merge_all()

    with tf.name_scope("val_losses"):
        tf.summary.scalar("loss_val_hm_loss",loss_val_hm_ph)
        tf.summary.scalar("loss_val_wh_loss",loss_val_wh_ph)
        tf.summary.scalar("loss_val_reg_loss",loss_val_reg_ph)
        tf.summary.scalar("loss_val_total_loss",loss_val_total_ph)
        
    valloss_summary = tf.summary.merge([tf.get_collection(tf.GraphKeys.SUMMARIES,'val_losses/loss_val_hm_loss'),
                                                                                     tf.get_collection(tf.GraphKeys.SUMMARIES,'val_losses/loss_val_wh_loss'),
                                                                                     tf.get_collection(tf.GraphKeys.SUMMARIES,'val_losses/loss_val_reg_loss'),
                                                                                     tf.get_collection(tf.GraphKeys.SUMMARIES,'val_losses/loss_val_total_loss')])  

    # 3 .导入数据训练
    global_step = 208000
    for img,lbl in  DataGen(batch_size):
        #batch_input =[img]+lbl
        ret = model.train_on_batch([img,lbl])
        output = model.predict([img,lbl])
        global_step+=1 

        if global_step%30==0:
            m = sess.run(merge,feed_dict={model.input[0]:img,model.input[1]:lbl,lbl_ph:lbl})
            train_writer.add_summary(m,global_step=global_step)
            train_writer.flush()
        
        if (global_step % 1000 == 0):
            model.save_weights(os.path.join('log_path',"model-%d.h5" % global_step))

        if (global_step% 5000==0):#每5000步用验证集验证128*8张图
            loss_val ,n= 0,0
            try:
                for X_test,Y_test in  DataGen(batch_size,img_zip='./data/mchar_val.zip',label_json='./data/mchar_val.json'):
                    if n==128:
                        break
                    loss_val_re = model.predict([img,lbl])[1:]
                    loss_val+=np.asarray(loss_val_re)
                    n+=1
                loss_val = np.reshape(loss_val,[-1,])/n
                m_val = sess.run(valloss_summary,feed_dict={loss_val_hm_ph:loss_val[0],loss_val_wh_ph:loss_val[1],loss_val_reg_ph:loss_val[2],loss_val_total_ph:loss_val[3]})
                val_writer.add_summary(m_val,global_step=global_step)
                val_writer.flush()
            except:
                pass
        print('step{}:\t  hm_loss {:.4f}\t wh_loss {:.4f}\t reg_loss {:.4f}'.format(global_step,sum(output[1]),sum(output[2]),sum(output[3])))