
# coding: utf-8

# In[1]:


import os
import sys
from datetime import datetime
import numpy as np
import tensorflow as tf
import config as FLAGS
import random
import cv2
from tqdm import tqdm, trange
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops

# In[ ]:

def train(net_factory,prefix,end_epoch,base_dir,display,base_lr):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
    tf.config.experimental.set_memory_growth(physical_devices[0], True)
    '''训练模型'''
    size=int(os.path.split(base_dir)[-1])
    if size==12:
        net='PNet'
        radio_cls_loss = 1.0;radio_bbox_loss = 0.5;radio_landmark_loss = 0.5;
    elif size==24:
        net='RNet'
        radio_cls_loss = 1.0;radio_bbox_loss = 0.5;radio_landmark_loss = 0.5;
    elif size==48:
        net='ONet'
        radio_cls_loss = 1.0;radio_bbox_loss = 0.5;radio_landmark_loss = 1;
        
    if net=='PNet':
        #计算一共多少组数据
        label_file=os.path.join(base_dir,'train_pnet_landmark.txt')
        f = open(label_file, 'r')
   
        num = len(f.readlines())
        dataset_dir=os.path.join(base_dir,'tfrecord/train_PNet_landmark.tfrecord_shuffle')
        #从tfrecord读取数据
        dataset_batch=read_single_tfrecord(dataset_dir,FLAGS.batch_size,net)
    else:
        #计算一共多少组数据
        label_file1=os.path.join(base_dir,'pos_%d.txt'%(size))
        f1 = open(label_file1, 'r')
        label_file2=os.path.join(base_dir,'part_%d.txt'%(size))
        f2 = open(label_file2, 'r')
        label_file3=os.path.join(base_dir,'neg_%d.txt'%(size))
        f3 = open(label_file3, 'r')
        label_file4=os.path.join(base_dir,'landmark_%d_aug.txt'%(size))
        f4 = open(label_file4, 'r')
   
        num = len(f1.readlines())+len(f2.readlines())+len(f3.readlines())+len(f4.readlines())
    
        pos_dir = os.path.join(base_dir,'tfrecord/pos_landmark.tfrecord_shuffle')
        part_dir = os.path.join(base_dir,'tfrecord/part_landmark.tfrecord_shuffle')
        neg_dir = os.path.join(base_dir,'tfrecord/neg_landmark.tfrecord_shuffle')
        landmark_dir = os.path.join(base_dir,'tfrecord/landmark_landmark.tfrecord_shuffle')
        dataset_dirs=[pos_dir,part_dir,neg_dir,landmark_dir]
        #各数据占比
        #目的是使每一个batch的数据占比都相同
        pos_radio,part_radio,landmark_radio,neg_radio=1.0/6,1.0/6,1.0/6,3.0/6
        pos_batch_size=int(np.ceil(FLAGS.batch_size*pos_radio))
        assert pos_batch_size != 0,"Batch Size 有误 "
        part_batch_size = int(np.ceil(FLAGS.batch_size*part_radio))
        assert part_batch_size != 0,"Batch Size 有误 "
        neg_batch_size = int(np.ceil(FLAGS.batch_size*neg_radio))
        assert neg_batch_size != 0,"Batch Size 有误 "
        landmark_batch_size = int(np.ceil(FLAGS.batch_size*landmark_radio))
        assert landmark_batch_size != 0,"Batch Size 有误 "
        batch_sizes = [pos_batch_size,part_batch_size,neg_batch_size,landmark_batch_size]
        dataset_batch = read_multi_tfrecords(dataset_dirs,batch_sizes, net)

    epoch_step = int(num / FLAGS.batch_size + 1)

    model = net_factory()
    lr = learning_rate_fn(base_lr, num)
    if net in ["", ""]:
        optimizer = tf.keras.optimizers.SGD(learning_rate=lr, momentum=0.9)
    else:
        optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
    datasets_it = iter(dataset_batch)

    ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)
    ckpt_manager = tf.train.CheckpointManager(ckpt, prefix, max_to_keep=5)
    ckpt.restore(ckpt_manager.latest_checkpoint)

    cls_loss_meter = metrics.Mean()
    bbox_loss_meter = metrics.Mean()
    landmark_loss_meter = metrics.Mean()
    L2_loss_meter = metrics.Mean()
    accuracy_meter = metrics.Mean()
    total_loss_meter = metrics.Mean()

    logs_dir = "../graph/%s" % (net)
    summary_writer = tf.summary.create_file_writer(logs_dir)

    for epoch in range(ckpt.save_counter.numpy(), end_epoch):
        cls_loss_meter.reset_states()
        bbox_loss_meter.reset_states()
        landmark_loss_meter.reset_states()
        L2_loss_meter.reset_states()
        accuracy_meter.reset_states()
        total_loss_meter.reset_states()
        for i in tqdm(range(epoch_step), desc='epoch  %d' % (epoch+1), ncols=75):
            data = next(datasets_it)
            if net == 'PNet':
                image_batch_array, label_batch_array, bbox_batch_array, landmark_batch_array = data
            else:
                image_batch_array = tf.concat([d[0] for d in data], 0)
                label_batch_array = tf.concat([d[1] for d in data], 0)
                bbox_batch_array = tf.concat([d[2] for d in data], 0)
                landmark_batch_array = tf.concat([d[3] for d in data], 0)

            with tf.GradientTape() as tape:
                cls_loss, bbox_loss, landmark_loss, L2_loss, accuracy = model(image_batch_array, label_batch_array,
                                                                     bbox_batch_array,
                                                                     landmark_batch_array, training=True)

                total_loss = radio_cls_loss * cls_loss + radio_bbox_loss * bbox_loss + radio_landmark_loss * landmark_loss + L2_loss

            cls_loss_meter.update_state(cls_loss)
            bbox_loss_meter.update_state(bbox_loss)
            landmark_loss_meter.update_state(landmark_loss)
            L2_loss_meter.update_state(L2_loss)
            accuracy_meter.update_state(accuracy)
            total_loss_meter.update_state(total_loss)

            grads = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            if (i + 1) % display == 0:
                # cls_loss_meter.update_state(cls_loss)
                # bbox_loss_meter.update_state(bbox_loss)
                # landmark_loss_meter.update_state(landmark_loss)
                # L2_loss_meter.update_state(L2_loss)
                # accuracy_meter.update_state(accuracy)
                # total_loss_meter.update_state(total_loss)
                # print(
                #     "\rStep:%5d/%d, accuracy: %3f, cls loss: %4f, bbox loss: %4f, Landmark loss :%4f, L2 loss: %4f, Total Loss: %4f ,lr:%f " % (
                #         i + 1, epoch_step, accuracy_meter.result().numpy(), cls_loss_meter.result().numpy(),
                #         bbox_loss_meter.result().numpy(), landmark_loss_meter.result().numpy(),
                #         L2_loss_meter.result().numpy(), total_loss_meter.result().numpy(),
                #         lr.cur_lr))
                sys.stderr.write("\rStep:%5d/%d, accuracy: %3f, cls loss: %4f, bbox loss: %4f, Landmark loss :%4f, L2 loss: %4f, Total Loss: %4f ,lr:%f \n" % (
                        i + 1, epoch_step, accuracy_meter.result().numpy(), cls_loss_meter.result().numpy(),
                        bbox_loss_meter.result().numpy(), landmark_loss_meter.result().numpy(),
                        L2_loss_meter.result().numpy(), total_loss_meter.result().numpy(),
                        lr.cur_lr))

                with summary_writer.as_default():
                    tf.summary.scalar("cls_loss", cls_loss_meter.result().numpy(), step=epoch+1)  # cls_loss
                    tf.summary.scalar("bbox_loss", bbox_loss_meter.result().numpy(), step=epoch+1)  # bbox_loss
                    tf.summary.scalar("landmark_loss", landmark_loss_meter.result().numpy(), step=epoch+1)  # landmark_loss
                    tf.summary.scalar("cls_accuracy", accuracy_meter.result().numpy(), step=epoch+1)  # cls_acc
                    tf.summary.scalar("L2_loss", L2_loss_meter.result().numpy(), step=epoch+1)  # cls_acc
                    tf.summary.scalar("total_loss",
                                      total_loss_meter.result().numpy(), step=epoch+1)  # cls_loss, bbox loss, landmark loss and L2 loss add together
                    summary_writer.flush()

        ckpt_save_path = ckpt_manager.save()
        print('\rSaving checkpoint for epoch {} at {}'.format(epoch + 1,
                                                            ckpt_save_path))

    print("完成！！！")


class CustomPiecewiseConstantDecay(tf.keras.optimizers.schedules.LearningRateSchedule):

  def __init__(
      self,
      boundaries,
      values,
      name=None):
    super(CustomPiecewiseConstantDecay, self).__init__()

    if len(boundaries) != len(values) - 1:
      raise ValueError(
          "The length of boundaries should be 1 less than the length of values")

    self.boundaries = boundaries
    self.values = values
    self.name = name
    self.cur_lr = values[0]

  def __call__(self, step):
    with ops.name_scope_v2(self.name or "CustomPiecewiseConstant"):
      boundaries = ops.convert_n_to_tensor(self.boundaries)
      values = ops.convert_n_to_tensor(self.values)
      x_recomp = ops.convert_to_tensor(step)
      for i, b in enumerate(boundaries):
        if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
          # We cast the boundaries to have the same type as the step
          b = math_ops.cast(b, x_recomp.dtype.base_dtype)
          boundaries[i] = b
      pred_fn_pairs = []
      pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
      pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
      for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
        # Need to bind v here; can do this with lambda v=v: ...
        pred = (x_recomp > low) & (x_recomp <= high)
        pred_fn_pairs.append((pred, lambda v=v: v))

      # The default isn't needed here because our conditions are mutually
      # exclusive and exhaustive, but tf.case requires it.
      default = lambda: values[0]
      self.cur_lr = control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
      return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)

  def get_config(self):
    return {
        "boundaries": self.boundaries,
        "values": self.values,
        "name": self.name
    }

def learning_rate_fn(base_lr, data_num):
    lr_factor = 0.1
    boundaries = [int(epoch * data_num / FLAGS.batch_size) for epoch in FLAGS.LR_EPOCH]
    lr_values = [base_lr * (lr_factor ** x) for x in range(0, len(FLAGS.LR_EPOCH) + 1)]
    lr_fn = CustomPiecewiseConstantDecay(
        boundaries, lr_values)

    return lr_fn


def read_single_tfrecord(tfrecord_file, batch_size, net):
    '''读取tfrecord数据'''

    # Create a description of the features.

    if net == 'PNet':
        image_size = 12
    elif net == 'RNet':
        image_size = 24
    elif net == 'ONet':
        image_size = 48

    datasets = tf.data.TFRecordDataset(tfrecord_file).map(
        lambda x: _parse_function(x, image_size)).shuffle(buffer_size=20000).repeat().batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)

    return datasets


def read_multi_tfrecords(tfrecord_files, batch_sizes, net):
    '''读取多个tfrecord文件放一起'''
    pos_dir, part_dir, neg_dir, landmark_dir = tfrecord_files
    pos_batch_size, part_batch_size, neg_batch_size, landmark_batch_size = batch_sizes

    pos = read_single_tfrecord(pos_dir, pos_batch_size, net)

    part = read_single_tfrecord(part_dir, part_batch_size, net)

    neg = read_single_tfrecord(neg_dir, neg_batch_size, net)

    landmark = read_single_tfrecord(landmark_dir, landmark_batch_size, net)

    datasets = tf.data.Dataset.zip((pos, part, neg, landmark))

    return datasets


@tf.function
def _parse_function(example_proto, image_size):
    feature_description = {
        'image/encoded': tf.io.FixedLenFeature([], tf.string),
        'image/label': tf.io.FixedLenFeature([], tf.int64),
        'image/roi': tf.io.FixedLenFeature([4], tf.float32),
        'image/landmark': tf.io.FixedLenFeature([10], tf.float32),
    }

    image_features = tf.io.parse_single_example(example_proto, feature_description)
    image = tf.io.decode_raw(image_features['image/encoded'], tf.uint8)
    image = tf.reshape(image, [image_size, image_size, 3])
    image = (tf.cast(image, tf.float32) - 127.5) / 128
    label = tf.cast(image_features['image/label'], tf.float32)
    roi = tf.cast(image_features['image/roi'], tf.float32)
    landmark = tf.cast(image_features['image/landmark'], tf.float32)

    image = image_color_distort(image)
    image, landmark = random_flip_images(image, label, landmark)

    return image, label, roi, landmark


@tf.function
def image_color_distort(inputs):
    inputs = tf.image.random_contrast(inputs, lower=0.5, upper=1.5)
    inputs = tf.image.random_brightness(inputs, max_delta=0.2)
    inputs = tf.image.random_hue(inputs, max_delta=0.2)
    inputs = tf.image.random_saturation(inputs, lower=0.5, upper=1.5)
    return inputs


@tf.function
def random_flip_images(image, label, landmark):
    if tf.equal(label, 1) or tf.equal(label, -2):
        if tf.equal(tf.random.uniform([], maxval=2, dtype=tf.int32), 1):
            image = tf.image.flip_left_right(image)
            if tf.equal(label, -2):
                landmark = tf.reshape(landmark, [-1, 2])
                landmark = tf.concat(
                    [tf.expand_dims(1 - landmark[:, 0], axis=1), tf.expand_dims(landmark[:, 1], axis=1)],
                    axis=-1)
                landmark = tf.gather(landmark, axis=0, indices=[1, 0, 2, 4, 3])
                landmark = tf.reshape(landmark, [-1])

    return image, landmark