import tensorflow.compat.v1 as tf

import dataloader
import det_model_fn
import hparams_config
import utils

hparams = 'use_bfloat16=false,num_classes=2'
model_name = 'efficientdet-d0'
config = hparams_config.get_detection_config(model_name)
config.override(hparams)
num_epochs = 50
iterations_per_loop = 100
model_dir = 'model/efficientdet-d0'
num_examples_per_epoch = 81350
use_tpu = False
backbone_ckpt = ''
val_json_file = 'dataset/test.json'
mode = 'eval'
model_fn_instance = det_model_fn.get_model_fn(model_name)
train_batch_size = 8
eval_samples = 8617
eval_batch_size = 8
validation_file_pattern = '/data_2/git/automl/efficientdet/tfrecord/test*'
use_spatial_partition = False
num_cores_per_replica = None
input_partition_dims = None
num_cores = 8
num_shards = num_cores
eval_master = ''


params = dict(config.as_dict(),
              batch_size=eval_batch_size,
              model_name=model_name,
              num_epochs=num_epochs,
              iterations_per_loop=iterations_per_loop,
              model_dir=model_dir,
              num_shards=num_shards,
              num_examples_per_epoch=num_examples_per_epoch,
              use_tpu=use_tpu,
              backbone_ckpt=None,
              val_json_file=val_json_file,
              mode=mode)
# config_proto = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)

# tpu_config = tf.estimator.tpu.TPUConfig(iterations_per_loop,
#                                         num_shards=num_shards,
#                                         num_cores_per_replica=num_cores_per_replica,
#                                         input_partition_dims=input_partition_dims,
#                                         per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2)

# run_config = tf.estimator.tpu.RunConfig(cluster=None,
#                                         evaluation_master=eval_master,
#                                         model_dir=model_dir,
#                                         log_step_count_steps=iterations_per_loop,
#                                         session_config=config_proto,
#                                         tpu_config=tpu_config)

eval_params = dict(params,
                   use_tpu=use_tpu,
                   input_rand_hflip=False,
                   backbone_ckpt=None,
                   is_training_bn=False,
                   use_bfloat16=False)

# eval_estimator = tf.estimator.tpu.TPUEstimator(model_fn=model_fn_instance,
#                                                use_tpu=False,
#                                                train_batch_size=train_batch_size,
#                                                eval_batch_size=eval_batch_size,
#                                                config=run_config,
#                                                params=eval_params)

import anchors
from object_detection import tf_example_decoder
from dataloader import DetectionInputProcessor
from dataloader import pad_to_fixed_size

_max_num_instances = 100


def _dataset_parser(value):
    input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
                                    params['num_scales'],
                                    params['aspect_ratios'],
                                    params['anchor_scale'],
                                    params['image_size'])

    anchor_labeler = anchors.AnchorLabeler(input_anchors, params['num_classes'])
    example_decoder = tf_example_decoder.TfExampleDecoder()
    with tf.name_scope('parser'):
        data = example_decoder.decode(value)
        print(value)
        source_id = data['source_id']
        image = data['image']
        boxes = data['groundtruth_boxes']
        classes = data['groundtruth_classes']
        classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
        areas = data['groundtruth_area']
        is_crowds = data['groundtruth_is_crowd']
        classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])

        input_processor = DetectionInputProcessor(
            image, params['image_size'], boxes, classes)
        input_processor.normalize_image()
        input_processor.set_scale_factors_to_output_size()
        image = input_processor.resize_and_crop_image()
        boxes, classes = input_processor.resize_and_crop_boxes()

        # Assign anchors.
        (cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors(boxes, classes)
        source_id = tf.where(tf.equal(source_id, tf.constant('')), '-1', source_id)
        source_id = tf.string_to_number(source_id)

        # Pad groundtruth data for evaluation.
        image_scale = input_processor.image_scale_to_original
        boxes *= image_scale
        is_crowds = tf.cast(is_crowds, dtype=tf.float32)
        boxes = pad_to_fixed_size(boxes, -1, [_max_num_instances, 4])
        is_crowds = pad_to_fixed_size(is_crowds, 0, [_max_num_instances, 1])
        areas = pad_to_fixed_size(areas, -1, [_max_num_instances, 1])
        classes = pad_to_fixed_size(classes, -1, [_max_num_instances, 1])
        return image, (cls_targets, box_targets, num_positives, source_id,
                image_scale, boxes, is_crowds, areas, classes)


def input_fn():
    dataset = tf.data.TFRecordDataset(validation_file_pattern)

    # Parse the fetched records to input tensors for model function.
    dataset = dataset.map(_dataset_parser, num_parallel_calls=64)
    dataset = dataset.prefetch(eval_batch_size)
    dataset = dataset.batch(eval_batch_size, drop_remainder=True)
    return dataset


dataset = input_fn()
print('========')
print(dataset.take(1))

estimator = tf.estimator.Estimator(model_fn=model_fn_instance,
                                   model_dir=model_dir,
                                   params=eval_params)
eval_results = estimator.evaluate(input_fn=lambda: input_fn(),
                                  steps=eval_samples // eval_batch_size)

ckpt = tf.train.latest_checkpoint(model_dir)

utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
