from typing import List, Union

import numpy as np
import tensorflow as tf
from tensorflow.python.keras import Model
from tensorflow.python.keras.callbacks import Callback

from citeu_data import CiteuData
from metrics import metrics_function, is_metric


class UpdateInnerGraph(Callback):
    def __init__(self, update_time, patience, sim_percentile):
        super().__init__()
        self.percentile = sim_percentile
        self.wait = 0
        self.patience = patience
        self.update_on_batch_end = False
        self.update_on_epoch_end = False
        if str(update_time).find('batch') != -1:
            self.update_on_batch_end = True
        if str(update_time).find('epoch') != -1:
            self.update_on_epoch_end = True

    def on_train_begin(self, logs=None):
        self.wait = 0

    def on_train_end(self, logs=None):
        pass

    def on_train_batch_end(self, batch, logs=None):
        if self.update_on_batch_end:
            self.wait += 1
            if self.wait >= self.patience:
                self.update_graph()
                self.wait = 0

    def on_epoch_end(self, epoch, logs=None):
        if self.update_on_epoch_end:
            self.wait += 1
            if self.wait >= self.patience:
                self.update_graph()
                self.wait = 0

    def update_graph(self):
        running_model: Model = self.model
        shapes = running_model.input_shape
        input_user_num = shapes[0][1]
        input_item_num = shapes[1][1]
        slice_layer = Model(inputs=running_model.inputs,
                            outputs=[running_model.get_layer('user_out').output,
                                     running_model.get_layer('item_out').output])
        predict_user = np.arange(0, input_user_num).reshape((1, input_user_num))
        predict_item = np.arange(0, input_item_num).reshape((1, input_item_num))
        user_outs, item_outs = slice_layer.predict([predict_user, predict_item], verbose=0)
        user_outs = user_outs[0]
        item_outs = item_outs[0]
        graph_uu_new = CiteuData.similarity_matrix(user_outs, self.percentile)
        graph_ii_new = CiteuData.similarity_matrix(item_outs, self.percentile)
        running_model.get_layer('user_dynamic_graph').update_graph(graph_uu_new)
        running_model.get_layer('item_dynamic_graph').update_graph(graph_ii_new)

    @staticmethod
    def cosine_sim(embedding):
        norm_emb = tf.linalg.l2_normalize(embedding, axis=1)
        cos_sim_mat = tf.matmul(norm_emb, norm_emb, transpose_b=True)
        return tf.abs(cos_sim_mat)


class CustomEarlyStop(Callback):
    def __init__(self, train_mat, val_mat, metrics: Union[List[str], str],
                 min_delta=0, loss_min_delta=0,
                 patience=0):
        super(CustomEarlyStop, self).__init__()
        if type(metrics) is str:
            metrics = [metrics]
        self.stopped_epoch = 0
        self.wait = 0
        self.metrics = list(filter(is_metric, metrics))
        self.metrics_call = metrics_function(train_mat, val_mat, metrics)
        self.min_delta = min_delta
        self.loss_min_delta = loss_min_delta
        self.patience = patience
        self.best_weights = None
        self.metrics_record = [-np.inf] * len(metrics)
        self.loss_record = np.inf

    def on_train_begin(self, logs=None):
        self.loss_record = np.inf
        self.metrics_record = [-np.inf] * len(self.metrics)
        self.wait = 0
        self.stopped_epoch = 0

    def on_epoch_end(self, epoch, logs=None):
        running_model: Model = self.model
        shapes = running_model.input_shape
        input_user_num = shapes[0][1]
        input_item_num = shapes[1][1]
        predict_user = np.arange(0, input_user_num)[np.newaxis, :]
        predict_item = np.arange(0, input_item_num)[np.newaxis, :]
        rating = running_model.predict([predict_user, predict_item], verbose=0)
        current_metric_dict = self.metrics_call(rating)
        current_metric = map(lambda m: current_metric_dict[m], self.metrics)

        if (self.loss_record - logs['loss'] > self.loss_min_delta) \
                or any(map(lambda c, r: c - r > self.min_delta, current_metric, self.metrics_record)):
            self.metrics_record = current_metric
            self.loss_record = logs['loss']
            self.wait = 0
            self.best_weights = self.model.get_weights()
        else:
            self.wait += 1
            if self.wait >= self.patience:
                self.stopped_epoch = epoch
                self.model.stop_training = True
                print("Restoring model weights from the end of the best epoch.")
                self.model.set_weights(self.best_weights)

    def on_train_end(self, logs=None):
        if self.stopped_epoch > 0:
            print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))
