import os
import tensorflow as tf
from tensorflow import keras
import numpy as np

from detection.datasets import coco, data_generator
from detection.models.detectors import faster_rcnn
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"


def train():

    train_dataset = coco.CocoDataSet('./data/coco2017', 'train',
                         flip_ratio=0.5,
                         pad_mode='fixed',
                         mean=(123.675, 116.28, 103.53),
                         std=(1., 1., 1.),
                         scale=(800, 1216))
    print('=====', train_dataset)

    # tf.data.Dataset
    train_generator = data_generator.DataGenerator(train_dataset)
    print("+++++", train_generator)

    tf_dataset = tf.data.Dataset.from_generator(train_generator,
                                                (tf.float32, tf.float32, tf.float32, tf.float32))
    print('------', tf_dataset)
    tf_dataset = tf_dataset.batch(1).prefetch(100).shuffle(100)

    # 建立模型以及训练
    num_classes = len(train_dataset.get_categories())
    print('-=-=-=-=', num_classes)
    model = faster_rcnn.FasterRCNN(num_classes=num_classes)
    optimizer = tf.keras.optimizers.SGD(1e-3, momentum=0.9, nesterov=True)

    for epoch in range(1):
        for (batch, inputs) in enumerate(tf_dataset):

            batch_imgs, batch_metas, batch_bboxes, batch_labels = inputs
            print(batch_imgs, batch_metas, batch_bboxes, batch_labels)
            with tf.GradientTape() as tape:

                rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss = \
                    model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)

                loss = rpn_class_loss + rpn_bbox_loss + rcnn_class_loss + rcnn_bbox_loss

            grads = tape.gradient(loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            print("迭代次数：%d, batch大小：%d, 损失大小：%f" % (epoch+1, batch+1, loss))


def test():

    train_dataset = coco.CocoDataSet('./data/coco2017', 'val')

    # 获取数据和模型
    train_generator = data_generator.DataGenerator(train_dataset)
    tf_dataset = tf.data.Dataset.from_generator(train_generator,
                                                (tf.float32, tf.float32, tf.float32, tf.float32))
    tf_dataset = tf_dataset.batch(1).prefetch(100).shuffle(100)
    num_classes = len(train_dataset.get_categories())
    model = faster_rcnn.FasterRCNN(num_classes=num_classes)
    print(num_classes)

    for (batch, inputs) in enumerate(tf_dataset):
        img, img_meta, _, _ = inputs
        print(img, img_meta)

        detections_list = model((img, img_meta), training=False)

        print(detections_list)


if __name__ == '__main__':
    train()
    # test()