import numpy as np
import tensorflow as tf
from tensorflow.keras.models import save_model, load_model
from tensorflow.keras.optimizers import Adam

from other_utils import function_timer


# generate random adjacency matrix
def adj_mat(size: int = 10):
    a = np.random.randint(0, 2, (size, size))
    a = np.triu(a)
    a += a.T - np.diag(a.diagonal())
    i = np.arange(0, size)
    a[i, i] = 1
    from other_utils import print_array_python
    s = print_array_python(a)
    print(s)


def loss_test():
    num_user = 3
    a = np.array([[1, 1, 0, 0, 0, 1, 0, 1, 1, 0],
                  [1, 1, 0, 0, 0, 1, 1, 0, 1, 0],
                  [0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
                  [0, 0, 1, 0, 1, 1, 0, 0, 1, 0],
                  [0, 0, 0, 1, 0, 1, 1, 1, 0, 1],
                  [1, 1, 0, 1, 1, 1, 1, 1, 0, 0],
                  [0, 1, 0, 0, 1, 1, 0, 1, 1, 1],
                  [1, 0, 0, 0, 1, 1, 1, 1, 1, 0],
                  [0, 1, 0, 1, 0, 0, 1, 1, 1, 1],
                  [0, 1, 1, 0, 1, 0, 1, 0, 1, 0]], dtype='int')

    item = np.array(
        [[[0, 0, 0, 0, 0],
          [1, 1, 1, 1, 1],
          [2, 2, 2, 2, 2],
          [3, 0, 0, 4, 0],
          [4, 1, 1, 1, 1],
          [5, 0, 2, 3, 2],
          [5, 2, 1, 2, 5]],

         [[0, 0, 0, 1, 0],
          [1, 1, 1, 3, 1],
          [2, 2, 4, 3, 2],
          [3, 0, 0, 0, 1],
          [4, 4, 1, 1, 1],
          [5, 2, 6, 2, 2],
          [5, 2, 2, 8, 0]]])
    item = tf.Variable(item, dtype='float')
    user = np.array(
        [[[0, 1, 0, 1, 0],
          [1, 1, 1, 2, 1],
          [2, 2, 2, 4, 2]],

         [[1, 0, 1, 1, 0],
          [1, 1, 5, 3, 1],
          [4, 2, 4, 3, 2]]])
    user = tf.Variable(user, dtype='float')

    graph_ui = a[0:num_user, num_user:]
    from bpr_loss import BPRLossWithinSample
    bpr_loss = BPRLossWithinSample(graph_ui=graph_ui)
    loss = bpr_loss([user, item, tf.matmul(user, item, transpose_b=True)])
    print(loss)


def metrics_test():
    training = np.array([[0, 1, 0, 0, 1],
                         [1, 0, 0, 0, 0],
                         [0, 0, 0, 1, 1]], dtype=np.bool)
    test = np.array([[1, 0, 1, 0, 0],
                     [0, 1, 0, 0, 0],
                     [0, 1, 0, 0, 0]], dtype=np.int)

    rating = np.array(
        [[3, 1, 2, 4, 0],
         [0, 6, 3, 2, 1],
         [2, 0, 1, 4, 9]])

    import metrics
    rel = metrics.calculate_relevant_mat(training, test, rating)

    metrics_list = ['pre@2', 're@3', 're', 'ndcg@3', 'ap@2']
    metrics_call = metrics.metrics_function(training, test, metrics_list)
    np_version = metrics_call(rating)

    from tensor_utils import to_sparse_tensor
    train_ui = to_sparse_tensor(training, dtype=tf.float32)
    test_ui = to_sparse_tensor(test, dtype=tf.float32)
    rating = tf.constant(rating, dtype='float32')

    rel_mat = metrics.sparse_tensor_calculate_relevant_mat(rating, train_ui, test_ui)
    print(rel)
    print(rel_mat)

    tf_version = metrics.sparse_tensor_metrics_function(train_ui, test_ui, metrics_list)(rating)

    layer = metrics.MetricsLayer(train_ui, test_ui, metrics_list)
    layer_version = layer(tf.expand_dims(rating, axis=0))

    from other_utils import print_dict
    print(print_dict(np_version))
    print('=' * 20)
    print(print_dict(tf_version))
    print('=' * 20)
    for i, n in enumerate(layer.metrics_name):
        print(n, ' : ', layer_version[i])


def model_test():
    a = np.array([[1, 1, 0, 0, 0, 1, 0, 1, 1, 0],
                  [1, 1, 0, 0, 0, 1, 1, 0, 1, 0],
                  [0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
                  [0, 0, 1, 0, 1, 1, 0, 0, 1, 0],
                  [0, 0, 0, 1, 0, 1, 1, 1, 0, 1],
                  [1, 1, 0, 1, 1, 1, 1, 1, 0, 0],
                  [0, 1, 0, 0, 1, 1, 0, 1, 1, 1],
                  [1, 0, 0, 0, 1, 1, 1, 1, 1, 0],
                  [0, 1, 0, 1, 0, 0, 1, 1, 1, 1],
                  [0, 1, 1, 0, 1, 0, 1, 0, 1, 0]])
    num_user = 3
    num_item = a.shape[0] - num_user

    graph_uu = a[0:num_user, 0:num_user]
    graph_ui = a[0:num_user, num_user:]
    graph_iu = a[num_user:, 0:num_user]
    graph_ii = a[num_user:, num_user:]

    val_set = np.array([[1, 0, 0, 1, 0, 0, 0],
                        [1, 0, 0, 0, 0, 0, 1],
                        [0, 0, 1, 1, 1, 0, 0]])

    item_features = np.random.rand(num_item, 6)
    metrics = ['pre@3', 'recall@2', 'map@5', 'ndcg@4']
    from GCNREC_model import build_gcnrec_model
    model = build_gcnrec_model(num_user, num_item,
                               graph_uu, graph_ii, graph_ui, item_features, 5, 5, 0.001,
                               val_set, metrics)

    model.compile(optimizer=Adam())

    # user = user.repeat(1, axis=0)
    # item = item.repeat(1, axis=0)
    from citeu_data import CiteuData
    pos_g, neg_g = CiteuData.create_pos_neg_sample_generator(train_graph_ui=graph_ui)

    def generator():
        user = np.arange(0, num_user)[np.newaxis, :]
        item = np.arange(0, num_item)[np.newaxis, :]

        while True:
            pos = next(pos_g)
            neg = next(neg_g)
            pos = np.array(pos)[np.newaxis, :]
            neg = np.array(neg)[np.newaxis, :]
            yield [user, item, pos, neg], None

    from callbacks import CustomEarlyStop
    from callbacks import UpdateInnerGraph
    g = generator()
    model.fit(g,
              steps_per_epoch=1,
              callbacks=[UpdateInnerGraph(update_time='batch', patience=0, sim_percentile=50),
                         CustomEarlyStop(metrics, patience=5)],
              epochs=20)

    print(model.get_layer('user_static_graph').graph)
    print(model.get_layer('user_dynamic_graph').graph)
    print(model.get_layer('user_inter_graph').graph)

    save_root = './test_models'
    model_name = 'test_save'
    import uuid
    model_uuid = uuid.uuid5(uuid.NAMESPACE_X500, model_name)
    model_uuid = ''.join(str(model_uuid).split('-'))
    from os.path import join
    model_path = join(save_root, model_name, model_uuid + '.h5')
    import os
    if os.path.exists(join(save_root, model_name)) is not True:
        os.makedirs(join(save_root, model_name))
    # model.save(model_path)
    save_model(model, model_path, save_format='h5')

    from GCNREC_model import custom_objects
    loaded_model: tf.keras.Model = load_model(model_path, custom_objects=custom_objects)
    # loaded_model.compile(optimizer=Adam(),
    #                      run_eagerly=True)
    # loaded_model = saved_model.load(model_path)

    print(loaded_model.get_layer('user_static_graph').graph)
    print(loaded_model.get_layer('user_dynamic_graph').graph)
    print(loaded_model.get_layer('user_inter_graph').graph)

    predict_user = np.arange(0, num_user).reshape((1, num_user))
    predict_item = np.arange(0, num_item).reshape((1, num_item))
    pos = np.array(next(pos_g))[np.newaxis, :]
    neg = np.array(next(neg_g))[np.newaxis, :]
    outputs = model.predict([predict_user, predict_item])
    load_outputs = loaded_model.predict([predict_user, predict_item])
    print(outputs)
    print('=' * 20)
    print(load_outputs)
    np.testing.assert_allclose(load_outputs, outputs)


def sparse_data_load_test():
    from citeu_data import CiteuData

    sparse_data: CiteuData
    data: CiteuData
    sparse_data, sparse_data_time = function_timer(CiteuData)(graph_in_dense=False, sim_percentile=90)
    data, data_time = function_timer(CiteuData)(graph_in_dense=True, sim_percentile=90)
    print(sparse_data_time)
    print(data_time)
    np.testing.assert_allclose(sparse_data.train_graph_uu.toarray(), data.train_graph_uu)
    np.testing.assert_allclose(sparse_data.train_graph_ii.toarray(), data.train_graph_ii)
    np.testing.assert_allclose(sparse_data.train_graph_ui.toarray(), data.train_graph_ui)
    np.testing.assert_allclose(sparse_data.test_graph_ui.toarray(), data.test_graph_ui)
    np.testing.assert_allclose(sparse_data.val_graph_ui.toarray(), data.val_graph_ui)


def generate_pos_neg_sets_test():
    from citeu_data import CiteuData

    sparse_data: CiteuData
    data: CiteuData
    sparse_data, sparse_data_time = function_timer(CiteuData)(graph_in_dense=False, sim_percentile=95)
    data, data_time = function_timer(CiteuData)(graph_in_dense=True, sim_percentile=95)
    print(sparse_data_time)
    print(data_time)
    set1, sparse_data_time = function_timer(sparse_data.create_pos_neg_sample_generator)()
    pos_sets1, neg_sets1 = set1
    print(sparse_data_time)
    set2, data_time = function_timer(data.create_pos_neg_sample_generator)()
    pos_sets2, neg_sets2 = set2
    print(data_time)
    print(all(map(lambda x, y: x == y, pos_sets1, pos_sets2)))
    print(all(map(lambda x, y: x == y, neg_sets1, neg_sets2)))


if __name__ == '__main__':
    # model_test()
    # metrics_test()
    # tensor_array_test()
    # loop_test()
    sparse_data_load_test()
