# coding: utf-8

# In[1]:


import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, PReLU, Flatten, Dense
from tensorflow import keras
from tensorflow.keras.initializers import Constant
import numpy as np
from tqdm import tqdm

num_keep_radio = 0.7


class P_Net(Model):
    def __init__(self, **kwargs):
        super(P_Net, self).__init__(**kwargs)
        self.conv1 = Conv2D(10, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv1")

        self.pool1 = MaxPooling2D(pool_size=(2, 2), strides=2, padding='same', name='pool1')

        self.conv2 = Conv2D(16, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv2")

        self.conv3 = Conv2D(32, 3, activation=tf.keras.layers.PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv3")

        self.conv4_1 = Conv2D(2, 1, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(0.0005),
                              name="conv4_1")

        self.bbox_pred = Conv2D(4, 1, activation=None, kernel_regularizer=tf.keras.regularizers.l2(0.0005),
                                name="conv4_2")

        self.landmark_pred = Conv2D(10, 1, activation=None, kernel_regularizer=tf.keras.regularizers.l2(0.0005),
                                    name="conv4_3")

    @tf.function
    def precall(self, inputs):
        x = self.conv1(inputs)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        conv4_1 = self.conv4_1(x)
        bbox_pred = self.bbox_pred(x)
        landmark_pred = self.landmark_pred(x)
        return conv4_1, bbox_pred, landmark_pred

    def call(self, inputs, label=None, bbox_target=None, landmark_target=None, training=None):
        conv4_1, bbox_pred, landmark_pred = self.precall(inputs)
        if training:
            cls_prob = tf.squeeze(conv4_1, [1, 2], name='cls_prob')  # [batch,2]
            cls_loss = cls_ohem(cls_prob, label)

            bbox_pred = tf.squeeze(bbox_pred, [1, 2], name='bbox_pred')  # [bacth,4]
            bbox_loss = bbox_ohem(bbox_pred, bbox_target, label)

            landmark_pred = tf.squeeze(landmark_pred, [1, 2], name='landmark_pred')  # [batch,10]
            landmark_loss = landmark_ohem(landmark_pred, landmark_target, label)

            accuracy = cal_accuracy(cls_prob, label)

            L2_loss = sum(self.losses)

            return cls_loss, bbox_loss, landmark_loss, L2_loss, accuracy
        else:
            cls_pro_test = tf.squeeze(conv4_1, axis=0)
            bbox_pred_test = tf.squeeze(bbox_pred, axis=0)
            landmark_pred_test = tf.squeeze(landmark_pred, axis=0)
            return cls_pro_test, bbox_pred_test, landmark_pred_test


# model = P_Net()
# model.build(input_shape=(None, 12, 12, 3))
# model.summary()
#
# for varible in model.trainable_variables:
#     if varible.name == "conv1/p_re_lu/alpha:0":
#         print(varible.shape)


# data = np.random.normal(size=[32, 640, 480, 3])
#
# for i in tqdm(range(10000)):
#     final = model(tf.cast(data, tf.float32))

# data = np.random.normal(size=[32, 640, 480, 3])
# final = model(tf.cast(data, tf.float32))
#
# for varible in model.trainable_variables:
#     print(varible.name)
#     if varible.name == "conv1/p_re_lu/alpha:0":
#         print(varible.shape)
#
#
# data = np.random.normal(size=[32, 480, 480, 3])
# final = model(tf.cast(data, tf.float32))
#
# for varible in model.trainable_variables:
#     print(varible.name)
#     if varible.name == "conv1/p_re_lu/alpha:0":
#         print(varible.shape)


class R_Net(Model):
    def __init__(self, **kwargs):
        super(R_Net, self).__init__(**kwargs)
        self.conv1 = Conv2D(28, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv1")

        self.pool1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same', name='pool1')

        self.conv2 = Conv2D(48, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv2")

        self.pool2 = MaxPooling2D(pool_size=(3, 3), strides=2, name='pool2')

        self.conv3 = Conv2D(64, 2, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv3")

        self.fc_flatten = Flatten()

        self.fc1 = Dense(128, kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="fc1")

        self.cls_prob = Dense(2, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="cls_fc")
        self.bbox_pred = Dense(4, activation=None, kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="bbox_fc")
        self.landmark_pred = Dense(10, activation=None, kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="landmark_fc")

    def call(self, inputs, label=None, bbox_target=None, landmark_target=None, training=None):
        x = self.conv1(inputs)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = self.fc_flatten(x)
        x = self.fc1(x)
        cls_prob = self.cls_prob(x)
        bbox_pred = self.bbox_pred(x)
        landmark_pred = self.landmark_pred(x)
        if training:
            cls_loss = cls_ohem(cls_prob, label)

            bbox_loss = bbox_ohem(bbox_pred, bbox_target, label)

            landmark_loss = landmark_ohem(landmark_pred, landmark_target, label)

            accuracy = cal_accuracy(cls_prob, label)
            L2_loss = sum(self.losses)

            return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
        else:
            return cls_prob,bbox_pred,landmark_pred

from threading import Thread,Lock
my_lock=Lock()

model = P_Net()
ckpt = tf.train.Checkpoint(model=model)
ckpt_manager = tf.train.CheckpointManager(ckpt, "D:/project_python/MTCNN/MTCNN-tensorflow_v2/model_v2/PNet", max_to_keep=5)
ckpt.restore(ckpt_manager.latest_checkpoint)
# model.build(input_shape=(None, 24, 24, 3))
# data = np.random.normal(size=[32, 24, 24, 3])
# while True:
#     final = model(tf.cast(data, tf.float32))
# print(final)


def fetch_content(d):
    print(d.shape)
    for i in range(10):
        my_lock.acquire()
        t1,t2,t3=model(tf.cast(np.random.normal(size=[1, np.random.randint(1024, 1600), np.random.randint(1024, 1025), 3]), tf.float32))

        t1.numpy()
        t2.numpy()
        t3.numpy()
        my_lock.release()

threads = []
while True:
    size = 10
    for i in range(size):
        t = Thread(target=fetch_content, args=[np.random.normal(size=[1, np.random.randint(1024, 1600), np.random.randint(1024, 1025), 3])])
        t.start()
        threads.append(t)

    if len(threads) >= size:
        for t in threads:
            t.join()
        threads=[]

# model.summary()
#
# for i in tqdm(range(10000)):
#     final = model(tf.cast(data, tf.float32))

# data = np.random.normal(size=[32, 640, 480, 3])
# final = model(tf.cast(data, tf.float32))


class O_Net(Model):
    def __init__(self, **kwargs):
        super(O_Net, self).__init__(**kwargs)
        self.conv1 = Conv2D(32, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv1")

        self.pool1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same', name='pool1')

        self.conv2 = Conv2D(64, 3, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv2")

        self.pool2 = MaxPooling2D(pool_size=(3, 3), strides=2, name='pool2')

        self.conv3 = Conv2D(64, 2, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv3")

        self.pool3 = MaxPooling2D(pool_size=(2, 2), strides=2, padding='same', name='pool3')

        self.conv4 = Conv2D(128, 2, activation=PReLU(Constant(value=0.25), shared_axes=[1, 2]),
                            kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="conv4")

        self.fc_flatten = Flatten()

        self.fc1 = Dense(256, kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="fc1")

        self.cls_prob = Dense(2, activation=tf.nn.softmax, kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="cls_fc")
        self.bbox_pred = Dense(4, activation=None, kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="bbox_fc")
        self.landmark_pred = Dense(10, activation=None, kernel_regularizer=tf.keras.regularizers.l2(0.0005), name="landmark_fc")

    def call(self, inputs, label=None, bbox_target=None, landmark_target=None, training=None):
        x = self.conv1(inputs)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.pool2(x)
        x = self.conv3(x)
        x = self.pool3(x)
        x = self.conv4(x)
        x = self.fc_flatten(x)
        x = self.fc1(x)
        cls_prob = self.cls_prob(x)
        bbox_pred = self.bbox_pred(x)
        landmark_pred = self.landmark_pred(x)
        if training:
            cls_loss = cls_ohem(cls_prob, label)

            bbox_loss = bbox_ohem(bbox_pred, bbox_target, label)

            landmark_loss = landmark_ohem(landmark_pred, landmark_target, label)

            accuracy = cal_accuracy(cls_prob, label)
            L2_loss = sum(self.losses)

            return cls_loss,bbox_loss,landmark_loss,L2_loss,accuracy
        else:
            return cls_prob,bbox_pred,landmark_pred


@tf.function
def _parse_function(example_proto, image_size):
    def image_color_distort(inputs):
        inputs = tf.image.random_contrast(inputs, lower=0.5, upper=1.5)
        inputs = tf.image.random_brightness(inputs, max_delta=0.2)
        inputs = tf.image.random_hue(inputs, max_delta=0.2)
        inputs = tf.image.random_saturation(inputs, lower=0.5, upper=1.5)
        return inputs

    def random_flip_images(image, label, landmark):
        if tf.equal(label, 1) or tf.equal(label, -2):
            if tf.equal(tf.random.uniform([], maxval=2, dtype=tf.int32), 1):
                image = tf.image.flip_left_right(image)
                if tf.equal(label, -2):
                    landmark = tf.reshape(landmark, [-1, 2])
                    landmark = tf.concat(
                        [tf.expand_dims(1 - landmark[:, 0], axis=1), tf.expand_dims(landmark[:, 1], axis=1)],
                        axis=-1)
                    landmark = tf.gather(landmark, axis=0, indices=[1, 0, 2, 4, 3])
                    landmark = tf.reshape(landmark, [-1])

        return image, landmark

    feature_description = {
        'image/encoded': tf.io.FixedLenFeature([], tf.string),
        'image/label': tf.io.FixedLenFeature([], tf.int64),
        'image/roi': tf.io.FixedLenFeature([4], tf.float32),
        'image/landmark': tf.io.FixedLenFeature([10], tf.float32),
    }

    image_features = tf.io.parse_single_example(example_proto, feature_description)
    image = tf.io.decode_raw(image_features['image/encoded'], tf.uint8)
    image = tf.reshape(image, [image_size, image_size, 3])
    image = (tf.cast(image, tf.float32) - 127.5) / 128
    label = tf.cast(image_features['image/label'], tf.float32)
    roi = tf.cast(image_features['image/roi'], tf.float32)
    landmark = tf.cast(image_features['image/landmark'], tf.float32)

    image = image_color_distort(image)
    image, landmark = random_flip_images(image, label, landmark)

    return image, label, roi, landmark


filename = ["../data/12/tfrecord/train_PNet_landmark.tfrecord_shuffle"]
filenames = ["../data/24/tfrecord/neg_landmark.tfrecord_shuffle",
             "../data/24/tfrecord/part_landmark.tfrecord_shuffle",
             "../data/24/tfrecord/pos_landmark.tfrecord_shuffle",
             "../data/24/tfrecord/neg_landmark.tfrecord_shuffle"]
raw_dataset = tf.data.TFRecordDataset(filename)


# Create a description of the features.


def read_single_tfrecord(tfrecord_file, batch_size, net):
    '''读取tfrecord数据'''

    # Create a description of the features.

    if net == 'PNet':
        image_size = 12
    elif net == 'RNet':
        image_size = 24
    elif net == 'ONet':
        image_size = 48

    datasets = tf.data.TFRecordDataset(tfrecord_file).map(
        lambda x: _parse_function(x, image_size)).repeat().batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)

    return datasets


# datasets = read_single_tfrecord(filename, 384, "PNet")
#
# for data in datasets:
#     print(data[0])
# exit(0)


def read_multi_tfrecords(tfrecord_files, batch_sizes, net):
    '''读取多个tfrecord文件放一起'''
    pos_dir, part_dir, neg_dir, landmark_dir = tfrecord_files
    pos_batch_size, part_batch_size, neg_batch_size, landmark_batch_size = batch_sizes

    pos = read_single_tfrecord(pos_dir, pos_batch_size, net)

    part = read_single_tfrecord(part_dir, part_batch_size, net)

    neg = read_single_tfrecord(neg_dir, neg_batch_size, net)

    landmark = read_single_tfrecord(landmark_dir, landmark_batch_size, net)

    datasets = tf.data.Dataset.zip((pos, part, neg, landmark))

    return datasets


# datasets = read_multi_tfrecords(filenames, [66, 77, 88, 99], "RNet")
#
# for data in datasets.take(1):
#     images = tf.concat([d[0] for d in data], 0)
#     labels = tf.concat([d[1] for d in data], 0)
#     rois = tf.concat([d[2] for d in data], 0)
#     landmarks = tf.concat([d[3] for d in data], 0)
#     for l in landmarks:
#         print(l)


# image_size = 12
#
# parsed_dataset = raw_dataset.map(lambda x: _parse_function(x, image_size)).repeat().batch(384).prefetch(tf.data.experimental.AUTOTUNE)
# print(parsed_dataset)
#
# for parsed_dataset in parsed_dataset.take(300000):
#     print(1)
# tset = parsed_dataset[1]
# if parsed_dataset[1].numpy() == 1:
#     print(parsed_dataset[1], parsed_dataset[3])
#     print("\n\n\n")


def cls_ohem(cls_prob, label):
    '''计算类别损失
    参数：
      cls_prob：预测类别，是否有人
      label：真实值
    返回值：
      损失
    '''
    zeros = tf.zeros_like(label)
    # 只把pos的label设定为1,其余都为0
    label_filter_invalid = tf.where(tf.less(label, 0), zeros, label)
    # 类别size[2*batch]
    num_cls_prob = tf.size(cls_prob)
    cls_prob_reshpae = tf.reshape(cls_prob, [num_cls_prob, -1])
    label_int = tf.cast(label_filter_invalid, tf.int32)
    # 获取batch数
    num_row = tf.cast(cls_prob.get_shape()[0], dtype=tf.int32)
    # 对应某一batch而言，batch*2为非人类别概率，batch*2+1为人概率类别,indices为对应 cls_prob_reshpae
    # 应该的真实值，后续用交叉熵计算损失
    row = tf.range(num_row) * 2
    indices_ = row + label_int
    # 真实标签对应的概率
    label_prob = tf.squeeze(tf.gather(cls_prob_reshpae, indices_))
    loss = -tf.math.log(label_prob + 1e-10)
    zeros = tf.zeros_like(label_prob, dtype=tf.float32)
    ones = tf.ones_like(label_prob, dtype=tf.float32)
    # 统计neg和pos的数量
    valid_inds = tf.where(label < zeros, zeros, ones)
    num_valid = tf.reduce_sum(valid_inds)
    # 选取70%的数据
    keep_num = tf.cast(num_valid * num_keep_radio, dtype=tf.int32)
    # 只选取neg，pos的70%损失
    loss = loss * valid_inds
    loss, _ = tf.nn.top_k(loss, k=keep_num)
    return tf.reduce_mean(loss)


# In[4]:


def bbox_ohem(bbox_pred, bbox_target, label):
    '''计算box的损失'''
    zeros_index = tf.zeros_like(label, dtype=tf.float32)
    ones_index = tf.ones_like(label, dtype=tf.float32)
    # 保留pos和part的数据
    valid_inds = tf.where(tf.equal(tf.abs(label), 1), ones_index, zeros_index)
    # 计算平方差损失
    square_error = tf.square(bbox_pred - bbox_target)
    square_error = tf.reduce_sum(square_error, axis=1)
    # 保留的数据的个数
    num_valid = tf.reduce_sum(valid_inds)
    keep_num = tf.cast(num_valid, dtype=tf.int32)
    # 保留pos和part部分的损失
    square_error = square_error * valid_inds
    square_error, _ = tf.nn.top_k(square_error, k=keep_num)
    return tf.reduce_mean(square_error)


# In[5]:


def landmark_ohem(landmark_pred, landmark_target, label):
    '''计算关键点损失'''
    ones = tf.ones_like(label, dtype=tf.float32)
    zeros = tf.zeros_like(label, dtype=tf.float32)
    # 只保留landmark数据
    valid_inds = tf.where(tf.equal(label, -2), ones, zeros)
    # 计算平方差损失
    square_error = tf.square(landmark_pred - landmark_target)
    square_error = tf.reduce_sum(square_error, axis=1)
    # 保留数据个数
    num_valid = tf.reduce_sum(valid_inds)
    keep_num = tf.cast(num_valid, dtype=tf.int32)
    # 保留landmark部分数据损失
    square_error = square_error * valid_inds
    square_error, _ = tf.nn.top_k(square_error, k=keep_num)
    return tf.reduce_mean(square_error)


# In[6]:


def cal_accuracy(cls_prob, label):
    '''计算分类准确率'''
    # 预测最大概率的类别，0代表无人，1代表有人
    pred = tf.argmax(cls_prob, axis=1)
    label_int = tf.cast(label, tf.int64)
    # 保留label>=0的数据，即pos和neg的数据
    cond = tf.where(tf.greater_equal(label_int, 0))
    picked = tf.squeeze(cond)
    # 获取pos和neg的label值
    label_picked = tf.gather(label_int, picked)
    pred_picked = tf.gather(pred, picked)
    # 计算准确率
    accuracy_op = tf.reduce_mean(tf.cast(tf.equal(label_picked, pred_picked), tf.float32))
    return accuracy_op
