import argparse

import tensorflow as tf
import tensorflow.keras.models
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2

from graph import GraphConvolution
from utils import *


# class Config(object):
#     dataset = 'cora'
#     filter = 'localpool'  # Local pooling filters (see 'renormalization trick' in Kipf & Welling, arXiv 2016)
#     # filter = 'chebyshev'  # Chebyshev polynomial basis filters (Defferard et al., NIPS 2016)
#     max_degree = 2  # maximum polynomial degree
#     sym_norm = True  # symmetric (True) vs. left-only (False) normalization
#     NB_EPOCH = 20
#     PATIENCE = 10  # early stopping patience
#     support = 1
#     epochs = 100


def convert_sparse_matrix_to_sparse_tensor(x):
    coo = x.tocoo()
    indices = np.mat([coo.row, coo.col]).transpose()
    return tf.SparseTensor(indices, coo.data, coo.shape)


def get_inputs(adj, x):
    if FLAGS.filter == 'localpool':
        print('Using local pooling filters...')
        adj_ = preprocess_adj(adj, FLAGS.sym_norm)
        adj_ = adj_.todense()
        graph = [x, adj_]
        adj_input = [Input(batch_shape=(None, None), sparse=False)]
    elif FLAGS.filter == 'chebyshev':
        print('Using Chebyshev polynomial basis filters...')
        L = normalized_laplacian(adj, FLAGS.sym_norm)
        L_scaled = rescale_laplacian(L)
        T_k = chebyshev_polynomial(L_scaled, FLAGS.max_degree)
        support = FLAGS.max_degree + 1
        graph = [x] + T_k
        adj_input = [Input(batch_shape=(None, None), sparse=False) for _ in range(support)]
    else:
        raise Exception('Invalid filter type.')
    return graph, adj_input


def build_model(x, y, adj_input):
    fea_input = Input(batch_shape=(None, x.shape[1]), name='fea_input')
    net = Dropout(0.5)(fea_input)
    net = GraphConvolution(512, FLAGS.support, activation='relu', kernel_regularizer=l2(5e-4))([net] + adj_input)
    net = Dropout(0.5)(net)
    net = GraphConvolution(256, FLAGS.support, activation='relu', kernel_regularizer=l2(5e-4))([net] + adj_input)
    net = Dropout(0.5)(net)
    # net = GraphConvolution(128, FLAGS.support, activation='relu', kernel_regularizer=l2(5e-4))([net] + adj_input)
    # net = Dropout(0.2)(net)
    # net = GraphConvolution(64, FLAGS.support, activation='relu', kernel_regularizer=l2(5e-4))([net] + adj_input)
    # net = Dropout(0.2)(net)
    net = Flatten()(net)
    output = Dense(y.shape[1], activation='softmax')(net)
    # output = GraphConvolution(y.shape[1], FLAGS.support, activation='softmax')([net] + adj_input)
    model = Model(inputs=[fea_input] + adj_input, outputs=output)
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01))
    return model


def train_model(x, y, model: tensorflow.keras.models.Model, train_mask, y_train, y_val, idx_train, idx_val, batch_size):
    class TrainCallback(tensorflow.keras.callbacks.Callback):

        def __init__(self):
            super().__init__()

        def on_epoch_begin(self, epoch, logs=None):
            # logs.info("on_epoch_begin")
            print(epoch)
            print(logs)
            return super().on_epoch_begin(epoch, logs)

    for i in range(FLAGS.epochs):
        # model.fit(x, y, sample_weight=train_mask, batch_size=batch_size, shuffle=False, epochs=1, callbacks=[TrainCallback()],
        #           verbose=1)

        model.fit(x, y, sample_weight=train_mask, batch_size=batch_size, shuffle=False, verbose=1)
        y_pred = model.predict(x, batch_size=batch_size)
        train_val_loss, train_val_acc = evaluate_preds(y_pred, [y_train, y_val], [idx_train, idx_val])
        print("train_loss= {:.2f}".format(train_val_loss[0]), "train_acc= {:.2f}".format(train_val_acc[0]),
              "val_loss= {:.2f}".format(train_val_loss[1]), "val_acc= {:.2f}".format(train_val_acc[1]))
    return model


def estimate_model(model, x, y_test, idx_test, batch_size):
    y_pred = model.predict(x, batch_size=batch_size)
    test_loss, test_acc = evaluate_preds(y_pred, [y_test], [idx_test])
    print("Test set results:", "loss= {:.2f}".format(test_loss[0]), "accuracy= {:.4f}".format(test_acc[0]))


def main():
    x, adj, y = load_data(path=FLAGS.path, dataset=FLAGS.dataset)
    batch_size = adj.shape[1]
    x /= x.sum(1).reshape(-1, 1)  # Normalize X
    y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask = get_splits(y)
    x_graph, adj_input = get_inputs(adj, x)
    model = build_model(x, y, adj_input)
    model = train_model(x_graph, y, model, train_mask, y_train, y_val, idx_train, idx_val, batch_size)
    estimate_model(model, x_graph, y_test, idx_test, batch_size)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--path', type=str, default="./data/cora/",
                        help='data path.')
    parser.add_argument('--dataset', type=str, default='cora',
                        help='dataset name.')
    # parser.add_argument('--checkpointDir', type=str, default='',
    #                     help='checkpoint data path')
    # parser.add_argument('--summaryDir', type=str, default='',
    #                     help='summary path')

    parser.add_argument('--filter', type=str, default='localpool',
                        help="Local pooling filters (see 'renormalization trick' in Kipf & Welling, arXiv 2016).")

    # parser.add_argument('--filter', type=str, default='chebyshev',
    #                     help="Chebyshev polynomial basis filters (Defferard et al., NIPS 2016).")
    parser.add_argument('--max_degree', type=int, default=2,
                        help="maximum polynomial degree.")
    parser.add_argument('--support', type=int, default=1,
                        help="maximum polynomial degree.")
    parser.add_argument('--sym_norm', type=bool, default=True,
                        help="symmetric (True) vs. left-only (False) normalization.")

    parser.add_argument('--PATIENCE', type=int, default=5,
                        help="early stopping patience.")
    parser.add_argument('--epochs', type=int, default=5,
                        help="train epochs.")

    FLAGS = parser.parse_args()

    # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
    from tensorflow.python.client import device_lib

    print([device.name for device in device_lib.list_local_devices()])
    main()
