
# coding: utf-8

# In[1]:


import tensorflow as tf
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, PReLU, Dense, Flatten
from tensorflow.keras.initializers import Constant
import numpy as np
#只把70%数据用作参数更新
num_keep_radio=0.7


class P_Net(Model):
    def __init__(self, **kwargs):
        super(P_Net, self).__init__(**kwargs)
        self.conv1 = Conv2D(10, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv1")

        self.pool1 = MaxPooling2D(pool_size=(2, 2), strides=2, padding='same', name='pool1')

        self.conv2 = Conv2D(16, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv2")

        self.conv3 = Conv2D(32, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv3")

        self.conv4_1 = Conv2D(2, 1, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(0.0005),
                              name="conv4_1")

        self.bbox_pred = Conv2D(4, 1, activation=None, kernel_regularizer=tf.keras.regularizers.l2(0.0005),
                                name="conv4_2")

        self.landmark_pred = Conv2D(10, 1, activation=None, kernel_regularizer=tf.keras.regularizers.l2(0.0005),
                                    name="conv4_3")

    def call(self, inputs, label=None, bbox_target=None, landmark_target=None, training=None):
        x = self.conv1(inputs)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        conv4_1 = self.conv4_1(x)
        bbox_pred = self.bbox_pred(x)
        landmark_pred = self.landmark_pred(x)
        if training:
            cls_prob = tf.squeeze(conv4_1, [1, 2], name='cls_prob')  # [batch,2]
            cls_loss = cls_ohem(cls_prob, label)

            bbox_pred = tf.squeeze(bbox_pred, [1, 2], name='bbox_pred')  # [bacth,4]
            bbox_loss = bbox_ohem(bbox_pred, bbox_target, label)

            landmark_pred = tf.squeeze(landmark_pred, [1, 2], name='landmark_pred')  # [batch,10]
            landmark_loss = landmark_ohem(landmark_pred, landmark_target, label)

            accuracy = cal_accuracy(cls_prob, label)

            L2_loss = sum(self.losses)

            return cls_loss, bbox_loss, landmark_loss, L2_loss, accuracy
        else:
            cls_pro_test = tf.squeeze(conv4_1, axis=0)
            bbox_pred_test = tf.squeeze(bbox_pred, axis=0)
            landmark_pred_test = tf.squeeze(landmark_pred, axis=0)
            return cls_pro_test, bbox_pred_test, landmark_pred_test


class R_Net(Model):
    def __init__(self, **kwargs):
        super(R_Net, self).__init__(**kwargs)
        self.conv1 = Conv2D(28, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv1")

        self.pool1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same', name='pool1')

        self.conv2 = Conv2D(48, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv2")

        self.pool2 = tf.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=2, name='pool2')

        self.conv3 = tf.keras.layers.Conv2D(64, 2, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv3")

        self.fc_flatten = Flatten()

        self.fc1 = Dense(128, name="fc1")

        self.cls_prob = Dense(2, activation=tf.nn.softmax, name="cls_fc")
        self.bbox_pred = Dense(4, activation=None, name="bbox_fc")
        self.landmark_pred = Dense(10, activation=None, name="landmark_fc")

    @tf.function
    def call(self, inputs, label=None, bbox_target=None, landmark_target=None, training=None):
        x = self.conv1(inputs)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = self.fc_flatten(x)
        x = self.fc1(x)
        cls_prob = self.cls_prob(x)
        bbox_pred = self.bbox_pred(x)
        landmark_pred = self.landmark_pred(x)
        if training:
            cls_loss = cls_ohem(cls_prob, label)

            bbox_loss = bbox_ohem(bbox_pred, bbox_target, label)

            landmark_loss = landmark_ohem(landmark_pred, landmark_target, label)

            accuracy = cal_accuracy(cls_prob, label)
            L2_loss = sum(self.losses)

            return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
        else:
            return cls_prob,bbox_pred,landmark_pred


class O_Net(Model):
    def __init__(self, **kwargs):
        super(O_Net, self).__init__(**kwargs)
        self.conv1 = Conv2D(32, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv1")

        self.pool1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same', name='pool1')

        self.conv2 = Conv2D(64, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv2")

        self.pool2 = MaxPooling2D(pool_size=(3, 3), strides=2, name='pool2')

        self.conv3 = Conv2D(64, 2, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv3")

        self.pool3 = MaxPooling2D(pool_size=(2, 2), strides=2, padding='same', name='pool3')

        self.conv4 = Conv2D(128, 2, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv4")

        self.fc_flatten = Flatten()

        self.fc1 = Dense(256, name="fc1")

        self.cls_prob = Dense(2, activation=tf.nn.softmax, name="cls_fc")
        self.bbox_pred = Dense(4, activation=None, name="bbox_fc")
        self.landmark_pred = Dense(10, activation=None, name="landmark_fc")

    @tf.function
    def call(self, inputs, label=None, bbox_target=None, landmark_target=None, training=None):
        x = self.conv1(inputs)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = self.pool3(x)
        x = self.conv4(x)
        x = self.fc_flatten(x)
        x = self.fc1(x)
        cls_prob = self.cls_prob(x)
        bbox_pred = self.bbox_pred(x)
        landmark_pred = self.landmark_pred(x)
        if training:
            cls_loss = cls_ohem(cls_prob, label)

            bbox_loss = bbox_ohem(bbox_pred, bbox_target, label)

            landmark_loss = landmark_ohem(landmark_pred, landmark_target, label)

            accuracy = cal_accuracy(cls_prob, label)
            L2_loss = sum(self.losses)

            return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
        else:
            return cls_prob,bbox_pred,landmark_pred


def cls_ohem(cls_prob,label):
    zeros=tf.zeros_like(label)

    label_filter_invalid=tf.where(tf.less(label,0),zeros,label)

    num_cls_prob=tf.size(cls_prob)
    cls_prob_reshpae=tf.reshape(cls_prob,[num_cls_prob,-1])
    label_int=tf.cast(label_filter_invalid,tf.int32)

    num_row=tf.cast(cls_prob.get_shape()[0], dtype=tf.int32)

    row=tf.range(num_row)*2
    indices_=row+label_int

    label_prob=tf.squeeze(tf.gather(cls_prob_reshpae,indices_))
    loss=-tf.math.log(label_prob+1e-10)
    zeros=tf.zeros_like(label_prob,dtype=tf.float32)
    ones=tf.ones_like(label_prob,dtype=tf.float32)

    valid_inds=tf.where(label<zeros,zeros,ones)
    num_valid=tf.reduce_sum(valid_inds)

    keep_num=tf.cast(num_valid*num_keep_radio,dtype=tf.int32)

    loss=loss*valid_inds
    loss,_=tf.nn.top_k(loss,k=keep_num)
    return tf.reduce_mean(loss)



def bbox_ohem(bbox_pred,bbox_target,label):

    zeros_index=tf.zeros_like(label,dtype=tf.float32)
    ones_index=tf.ones_like(label,dtype=tf.float32)

    valid_inds=tf.where(tf.equal(tf.abs(label),1),ones_index,zeros_index)

    square_error=tf.square(bbox_pred-bbox_target)
    square_error=tf.reduce_sum(square_error,axis=1)

    num_valid=tf.reduce_sum(valid_inds)
    keep_num=tf.cast(num_valid,dtype=tf.int32)

    square_error=square_error*valid_inds
    square_error,_=tf.nn.top_k(square_error,k=keep_num)
    return tf.reduce_mean(square_error)



def landmark_ohem(landmark_pred,landmark_target,label):

    ones=tf.ones_like(label,dtype=tf.float32)
    zeros=tf.zeros_like(label,dtype=tf.float32)

    valid_inds=tf.where(tf.equal(label,-2),ones,zeros)

    square_error=tf.square(landmark_pred-landmark_target)
    square_error=tf.reduce_sum(square_error,axis=1)

    num_valid=tf.reduce_sum(valid_inds)
    keep_num=tf.cast(num_valid,dtype=tf.int32)

    square_error=square_error*valid_inds
    square_error,_=tf.nn.top_k(square_error,k=keep_num)
    return tf.reduce_mean(square_error)




def cal_accuracy(cls_prob,label):

    pred=tf.argmax(cls_prob,axis=1)
    label_int=tf.cast(label,tf.int64)

    cond=tf.where(tf.greater_equal(label_int,0))
    picked=tf.squeeze(cond)

    label_picked=tf.gather(label_int,picked)
    pred_picked=tf.gather(pred,picked)

    accuracy_op=tf.reduce_mean(tf.cast(tf.equal(label_picked,pred_picked),tf.float32))
    return accuracy_op

