import argparse
import os

import numpy as np
import skimage
import tensorflow.keras.backend as K
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, \
    EarlyStopping
from skimage.io import imread
from sklearn.model_selection import train_test_split
from tensorflow.keras.losses import Huber
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import Callback

import layer_utils
from backbone import build_backbone
from boxes import get_nms
from data_generator import DataGenerator
from label_utils import build_label_dictionary
from model import build_ssd


def mask_offset(y_true, y_pred):
    offset = y_true[..., 0:4]
    mask = y_true[..., 4::8]

    pred = y_pred[..., 0:4]
    offset *= mask
    pred *= mask

    return offset, pred


def l1_loss(y_true, y_pred):
    offset, pred = mask_offset(y_true, y_pred)
    return K.mean(K.abs(pred - offset), axis=-1)


def smooth_l1_loss(y_true, y_pred):
    offset, pred = mask_offset(y_true, y_pred)
    return Huber()(offset, pred)


def focal_loss_categorical(y_true, y_pred):
    gamma = 2.0
    alpha = 0.25

    y_pred /= K.sum(y_pred, axis=-1, keepdims=True)

    epsilon = K.epsilon()
    y_pred = K.clip(y_pred, epsilon, 1. - epsilon)

    cross_entropy = -y_true * K.log(y_pred)

    weight = alpha * K.pow(1 - y_pred, gamma)
    cross_entropy *= weight
    return K.sum(cross_entropy, axis=-1)


def print_log(param, verbose=0):
    if verbose > 0:
        print(param)


def lr_scheduler(epoch):
    """Learning rate scheduler - called every epoch"""
    lr = 1e-3
    epoch_offset = 0
    if epoch > (200 - epoch_offset):
        lr *= 1e-4
    elif epoch > (180 - epoch_offset):
        lr *= 5e-4
    elif epoch > (160 - epoch_offset):
        lr *= 1e-3
    elif epoch > (140 - epoch_offset):
        lr *= 5e-3
    elif epoch > (120 - epoch_offset):
        lr *= 1e-2
    elif epoch > (100 - epoch_offset):
        lr *= 5e-2
    elif epoch > (80 - epoch_offset):
        lr *= 1e-1
    elif epoch > (60 - epoch_offset):
        lr *= 5e-1
    print('Learning rate: ', lr)
    return lr


def iou_metric(gt_box, boxes):
    thresholds = np.arange(0.3, 0.8, 0.05)
    gt_count = gt_box.shape[0]
    box_count = boxes.shape[0]

    tp = 0
    fp = 0
    fn = 0

    if box_count > 0:
        iou_value = layer_utils.iou(gt_box, boxes)
        for i, threshold in enumerate(thresholds):
            true_positive = np.sum(iou_value > threshold, axis=1)
            true_positive[true_positive > 1] = 1
            true_positive = np.sum(true_positive)
            false_positive = box_count - true_positive
            false_negative = gt_count - true_positive

            tp += true_positive
            fp += false_positive
            fn += false_negative
    else:
        fn = gt_count
    return (5 * tp) / (5 * tp + 4 * fn + fp)


class AccuracyCallback(Callback):
    def __init__(self, net):
        super(AccuracyCallback, self).__init__()
        self.net = net

    def on_epoch_end(self, epoch, logs=None):
        result = self.net.eval()
        print('acc:{}'.format(result))


class SSD:
    def __init__(self, args) -> None:
        self.args = args
        self.ssd = None
        self.train_generator = None
        self.valid_generator = None
        if args.restore_weights:
            self.build_model(None)
            self.restore_weights()
        else:
            self.build_model()

    def build_model(self, weights='imagenet'):
        self.build_dictionary()
        self.input_shape = (
            self.args.height, self.args.width, self.args.channels)

        self.backbone = self.args.backbone(
            self.input_shape, n_layers=self.args.layers, weights=weights)

        anchors, features, ssd = build_ssd(
            self.input_shape, self.backbone, n_layers=self.args.layers,
            n_classes=self.n_classes)

        self.n_anchors = anchors
        self.feature_shapes = features
        self.ssd = ssd

    def build_dictionary(self):
        path = os.path.join(self.args.data_path, self.args.train_labels)

        dictionary, classes = build_label_dictionary(path)
        self.n_classes = len(classes)

        keys = list(dictionary.items())
        if self.args.debug:
            keys = keys[:100]
        train_dict, valid_dict = train_test_split(keys,
                                                  train_size=self.args.train_split)
        self.train_dict = dict(train_dict)
        self.valid_dict = dict(valid_dict)

    def build_generator(self):
        self.train_generator = DataGenerator(
            self.args, self.args.batch_size, self.train_dict, self.n_classes,
            self.feature_shapes,
            self.n_anchors, True)

    def train(self):
        if self.train_generator is None:
            self.build_generator()

        optimizer = Adam(learning_rate=1e-3)

        loss = [focal_loss_categorical, smooth_l1_loss]

        self.ssd.compile(optimizer=optimizer, loss=loss)

        save_dir = os.path.join(os.getcwd(), self.args.save_dir)
        model_name = self.backbone.name
        model_name += '-' + str(self.args.layers) + "layer"

        if self.args.threshold < 1.0:
            model_name += "-extra_anchors"

        model_name += "-"
        model_name += self.args.dataset
        model_name += '-{epoch:03d}.h5'

        log = "# of classes %d" % self.n_classes
        print_log(log, self.args.verbose)
        log = "Batch size: %d" % self.args.batch_size
        print_log(log, self.args.verbose)
        log = "Weights filename: %s" % model_name
        print_log(log, self.args.verbose)
        if not os.path.isdir(save_dir):
            os.makedirs(save_dir)
        filepath = os.path.join(save_dir, model_name)

        checkpoint = ModelCheckpoint(
            filepath=filepath, verbose=1, save_weights_only=True)
        scheduler = LearningRateScheduler(lr_scheduler)

        callback = [checkpoint, scheduler,
                    AccuracyCallback(self)]

        self.ssd.fit(x=self.train_generator,
                     use_multiprocessing=True,
                     callbacks=callback, epochs=self.args.epochs,
                     workers=self.args.workers, verbose=self.args.verbose)

    def restore_weights(self):
        save_dir = os.path.join(os.getcwd(), self.args.save_dir)
        filename = os.path.join(save_dir, self.args.restore_weights)
        log = "Loading weights: %s" % filename
        print(log, self.args.verbose)
        self.ssd.load_weights(filename)

    def print_summary(self):
        """Print network summary for debugging purposes."""
        from tensorflow.keras.utils import plot_model
        if self.args.summary:
            self.backbone.summary()
            self.ssd.summary()
            plot_model(self.backbone,
                       to_file="backbone.png",
                       show_shapes=True)

    def detect_objects(self, image):
        image = np.expand_dims(image / 255., axis=0)
        classes, offsets = self.ssd.predict(image)
        image = np.squeeze(image, axis=0)
        classes = np.squeeze(classes)
        offsets = np.squeeze(offsets)
        objects, indexes, scores, boxes \
            = get_nms(self.args, image, classes, offsets,
                      self.feature_shapes)
        objects = objects[indexes]
        scores = scores[indexes]
        return objects, indexes, scores, boxes

    def output(self, pixel_array):
        objects, indexes, scores, boxes = self.detect_objects(pixel_array)
        result = ''
        sep = ' '
        for i in range(len(indexes)):
            x = boxes[i][0]
            w = boxes[i][1] - x
            y = boxes[i][2]
            h = boxes[i][3] - y
            if i > 0:
                result += sep
            result += '%f %f %f %f %f' % (scores[i], x, y, w, h)
        return result

    def eval(self):
        valid_keys = list(self.valid_dict.keys())

        keys = np.random.choice(valid_keys, size=self.args.valid_count)
        result = []
        for key in keys:
            video_id, pic_id = key.split('-')
            image_path = os.path.join(self.args.data_path, 'train_images',
                                      'video_' + video_id, pic_id + '.jpg')
            labels = self.valid_dict[key]
            labels = np.array(labels)
            gt_box = labels[:, :-1]

            if self.args.no_img:
                image = np.zeros(self.input_shape)
            else:
                image = skimage.img_as_float(imread(image_path))

            objects, indexes, scores, boxes = self.detect_objects(image)

            result.append(iou_metric(gt_box, np.array(boxes)))

        return np.mean(result)


def ssd_parser():
    """Instatiate a command line parser for ssd network model
    building, training, and testing
    """
    parser = argparse.ArgumentParser(description='SSD for object detection')
    # arguments for model building and training
    help_ = "Number of feature extraction layers of SSD head after backbone"
    parser.add_argument("--layers",
                        default=4,
                        type=int,
                        help=help_)
    help_ = "Train data ratio"
    parser.add_argument("--train_split",
                        default=0.8,
                        type=float,
                        help=help_)
    help_ = "Batch size during training"
    parser.add_argument("--batch_size",
                        default=4,
                        type=int,
                        help=help_)
    help_ = "Number of epochs to train"
    parser.add_argument("--epochs",
                        default=200,
                        type=int,
                        help=help_)
    help_ = "Number of data generator worker threads"
    parser.add_argument("--workers",
                        default=4,
                        type=int,
                        help=help_)
    help_ = "Labels IoU threshold"
    parser.add_argument("--threshold",
                        default=0.6,
                        type=float,
                        help=help_)
    help_ = "Backbone or base network"
    parser.add_argument("--backbone",
                        default=build_backbone,
                        help=help_)
    help_ = "Train the model"
    parser.add_argument("--train",
                        default=False,
                        action='store_true',
                        help=help_)
    help_ = "Print model summary (text and png)"
    parser.add_argument("--summary",
                        default=False,
                        action='store_true',
                        help=help_)
    help_ = "Directory for saving filenames"
    parser.add_argument("--save-dir",
                        default="weights",
                        help=help_)

    # inputs configurations
    help_ = "Input image height"
    parser.add_argument("--height",
                        default=720,
                        type=int,
                        help=help_)
    help_ = "Input image width"
    parser.add_argument("--width",
                        default=1280,
                        type=int,
                        help=help_)
    help_ = "Input image channels"
    parser.add_argument("--channels",
                        default=3,
                        type=int,
                        help=help_)

    # dataset configurations
    help_ = "Path to dataset directory"
    parser.add_argument("--data-path",
                        default="../../input/tensorflow-great-barrier-reef",
                        help=help_)
    help_ = "Train labels csv file name"
    parser.add_argument("--train-labels",
                        default="train.csv",
                        help=help_)

    # configurations for evaluation of a trained model
    help_ = "Load h5 model trained weights"
    parser.add_argument("--restore-weights",
                        help=help_)
    help_ = "Evaluate model"
    parser.add_argument("--evaluate",
                        default=False,
                        action='store_true',
                        help=help_)
    help_ = "Dataset Name"
    parser.add_argument("--dataset",
                        default='Reef',
                        action='store_true',
                        help=help_)
    help_ = "Class probability threshold (>= is an object)"
    parser.add_argument("--class-threshold",
                        default=0.5,
                        type=float,
                        help=help_)
    help_ = "NMS IoU threshold"
    parser.add_argument("--iou-threshold",
                        default=0.2,
                        type=float,
                        help=help_)
    help_ = "Use soft NMS or not"
    parser.add_argument("--soft-nms",
                        default=False,
                        action='store_true',
                        help=help_)

    help_ = "valid count"
    parser.add_argument("--valid_count",
                        default=10,
                        help=help_)

    help_ = "early stop patience"
    parser.add_argument("--patience",
                        default=15,
                        help=help_)

    # debug configuration
    help_ = "Level of verbosity for print function"
    parser.add_argument("--verbose",
                        default=1,
                        type=int,
                        help=help_)

    # debug configuration
    help_ = "No image"
    parser.add_argument("--no_img",
                        default=False,
                        action='store_true',
                        help=help_)

    help_ = "fast run for debug"
    parser.add_argument("--debug",
                        default=False,
                        action='store_true',
                        help=help_)

    help_ = "output for submit"
    parser.add_argument("--submit",
                        default=False,
                        action='store_true',
                        help=help_)

    return parser


class Option:
    def __init__(self):
        self.layers = 4
        self.train_split = 0.8
        self.batch_size = 4
        self.epochs = 200
        self.workers = 4
        self.threshold = 0.2
        self.backbone = build_backbone
        self.train = False
        self.summary = False
        self.save_dir = 'weights'
        self.height = 720
        self.width = 1280
        self.channels = 3
        self.data_path = "../../input/tensorflow-great-barrier-reef"
        self.train_labels = "train.csv"
        self.restore_weights = ""
        self.evaluate = False
        self.dataset = "reef"
        self.class_threshold = 0.5
        self.iou_threshold = 0.2
        self.soft_nms = False
        self.valid_count = 20
        self.patience = 15
        self.verbose = 1
        self.no_img = False
        self.debug = False


def get_model(option: Option):
    return SSD(option)


if __name__ == '__main__':
    parser = ssd_parser()
    args = parser.parse_args()

    if args.debug:
        args.valid_count = 20
        args.epochs = 5

    ssd = SSD(args)

    print(args)

    if args.summary:
        ssd.print_summary()

    if args.train:
        ssd.train()

    if args.evaluate:
        ssd.eval()
