import os

from keras.callbacks import CSVLogger, ModelCheckpoint, LearningRateScheduler
from keras.optimizers import SGD, Adam

from model import main_model
from utils.boxes import create_prior_boxes, to_point_form
from utils.data_management import DataManager, get_class_names
from utils.data_generator import DataGenerator
from utils.training import MultiboxLoss, scheduler

import numpy as np
import math

def freeze_model_layer(model, layer):
    layer = [i.split("/")[0] for i in layer]
    for l in model.layers:
        if l.name in layer:
            l.trainable = False

train_w = 64
train_y = 64
def get_configuration_file():
    # make sure the s_min a dynamic scales, the article said the min is 0.1, s_min / 2
    s0_min_scale = 1.5 # 2
    s_min = 0.5
    s_max = 0.8
    w, h = [train_w, train_y]
    m = 3
    scales = [math.ceil((s_min + (s_max - s_min) * (k-1) / (m - 1)) * w) for k in range(1,m + 2)] # (0.2 - 0.4), s = s_min + (s_max - s_min) * (k-1) / (m - 1), m=3 , (0.2 0.3 0.4)
    min_sizes = [math.ceil(w * s_min / s0_min_scale)] + scales[:-1]
    max_sizes = scales
    steps = [8, 16, 32, 64]
    
    configuration = {'feature_map_sizes': [(w // s, w // s) for s in steps],  #(24, 32), (12, 16), (6, 8), (3, 4)
                                                                                 # 3 2 2 3
                     'image_size': [w, h],
                     'steps': steps,
                     'min_sizes': min_sizes,
                     'max_sizes': max_sizes,
                     'aspect_ratios': [[2], [], [], [2]],
                     'square_msk': [[True, False], True, True, [True, False]],   # small, big
                     'rectangle_msk': [[True, True], False, False, [True, True]],   # [w>h, h>w]
                     'variance': [0.1, 0.2]}
    return configuration

# model
if __name__ == "__main__": 
    model_name = 'ssd_face_detect'
    num_classes = 2 # has one background
    # hyper-parameters
    batch_size = 2
    num_epochs = 250
    alpha_loss = 1.0
    learning_rate = 1e-3
    momentum = .9
    weight_decay = 5e-4
    gamma_decay = 0.1
    # scheduled_epochs = [155, 195, 235]
    negative_positive_ratio = 3

    # data, boxes in point form, encoded box using center box in this code
    # point as input, encoded center box (cx, cy, w, h) as output
    all_data = np.load("./class_dataset/train_img.npy")
    all_boxes = np.load("./class_dataset/train_face_pos.npy")
    all_boxes = np.asarray([np.hstack([to_point_form(all_box), all_box[:,4:]]) for all_box in all_boxes])

    random_seed = np.random.randint(0, len(all_data), len(all_data))
    train_test_rate = 0.2
    train_data_len = int((1 - train_test_rate) * len(all_data))
    train_data_msk = random_seed[:train_data_len]
    val_data_msk = random_seed[train_data_len:]

    train_data = [all_data[train_data_msk], all_boxes[train_data_msk]]
    val_data = [all_data[val_data_msk], all_boxes[val_data_msk]]

    model, freeze_node = main_model((64, 64, 3), num_classes)
    model.load_weights("./trained_models/ssd_face_detect/weights.01-5.69.h5", by_name=True)
    # freeze_model_layer(model, freeze_node) #[:14]
    prior_boxes = to_point_form(create_prior_boxes(get_configuration_file()))
    multibox_loss = MultiboxLoss(num_classes, negative_positive_ratio, alpha_loss)
    optimizer = Adam(learning_rate) #SGD(learning_rate, momentum, weight_decay)
    model.compile(optimizer, loss=multibox_loss.compute_loss)
    data_generator = DataGenerator(
            train_data, prior_boxes, batch_size, num_classes, val_data)

    # callbacks
    model_path = './trained_models/' + model_name + '/'
    save_path = model_path + 'weights.{epoch:02d}-{val_loss:.2f}.h5'
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    log = CSVLogger(model_path + model_name + '.log')
    checkpoint = ModelCheckpoint(save_path, verbose=1, save_weights_only=False, save_best_only=True)
    # reduce_on_plateau = ReduceLROnPlateau(factor=gamma_decay, verbose=1)
    # scheduler = LearningRateManager(learning_rate, gamma_decay, scheduled_epochs)
    learning_rate_schedule = LearningRateScheduler(scheduler, verbose=1)
    callbacks = [checkpoint, log, learning_rate_schedule]
    # callbacks = [checkpoint, log, reduce_on_plateau]

    # training
    model.summary()
    model.fit_generator(data_generator.flow(mode='train'),
                        steps_per_epoch=int(len(train_data[0]) / batch_size),
                        epochs=num_epochs,
                        verbose=1,
                        callbacks=callbacks,
                        validation_data=data_generator.flow(mode='val'),
                        validation_steps=int(len(val_data[0]) / batch_size),
                        use_multiprocessing=False,
                        workers=1)
