from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import scipy.sparse

from utils import *
from models import GCN, MLP

import numpy as np
from scipy import sparse

# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)

# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS

#flags.DEFINE_string('dataset', 'cora', 'Dataset string.')  # 'cora', 'citeseer', 'pubmed'
flags.DEFINE_string('model', 'gcn', 'Model string.')  # 'gcn', 'gcn_cheby', 'dense'
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 50, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 16, 'Number of units in hidden layer 1.')
flags.DEFINE_float('dropout', 0.1, 'Dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_integer('early_stopping', 5, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_integer('max_degree', 3, 'Maximum Chebyshev polynomial degree.')
patience = 5


# Load data
features_data, labels_data = generate_3D_data_sparse(100)
nb_nodes = features_data.shape[1]
ft_size = features_data.shape[2]
nb_classes = labels_data.shape[2]
batch_size = 1
mask = (np.array(np.ones((nb_nodes)), dtype=np.bool))
adj = generate_adj('data/subject1/lhedges.txt')#(10242, 10242) <class 'scipy.sparse.csr.csr_matrix'>
x_train, x_val, x_test, y_val, y_train, y_test, test_list = split_data_sparse(100, features_data, labels_data, nb_nodes)

y_train = np.array(y_train)#(80, 10242, 36) <class 'numpy.ndarray'>
y_val = np.array(y_val)
y_test = np.array(y_test)


# Some preprocessing
features = sparse.lil_matrix(features_data[0].reshape(10242,6))
features = preprocess_features(features)

if FLAGS.model == 'gcn':
    support = [preprocess_adj(adj)]
    num_supports = 1
    model_func = GCN
elif FLAGS.model == 'gcn_cheby':
    support = chebyshev_polynomials(adj, FLAGS.max_degree)
    num_supports = 1 + FLAGS.max_degree
    model_func = GCN
elif FLAGS.model == 'dense':
    support = [preprocess_adj(adj)]  # Not used
    num_supports = 1
    model_func = MLP
else:
    raise ValueError('Invalid argument for model: ' + str(FLAGS.model))

# Define placeholders
placeholders = {
    'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
    'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)),
    'labels': tf.placeholder(tf.float32, shape=(None, 36)),
    'labels_mask': tf.placeholder(tf.int32),
    'dropout': tf.placeholder_with_default(0., shape=()),
    'num_features_nonzero': tf.placeholder(tf.int32)  # helper variable for sparse dropout
}

# Create model
model = model_func(placeholders, input_dim=features[2][1], logging=True)
# Initialize session
sess = tf.Session()


# Define model evaluation function
def evaluate(features, support, labels, mask, placeholders):
    feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
    outs_val = sess.run([model.loss, model.accuracy,model.dice,model.outputs], feed_dict=feed_dict_val)
    return outs_val[0], outs_val[1],outs_val[2],outs_val[3]


# Init variables
sess.run(tf.global_variables_initializer())

cost_val = []
vlss_mn = np.inf
vacc_mx = 0.0
vdice_mx = 0.0
curr_step = 0

# Train model
for epoch in range(FLAGS.epochs):
    train_step = 0
    train_size = x_train.shape[0]
    train_loss_avg = 0
    train_acc_avg = 0
    train_dice_avg = 0
    while train_step < train_size:
        features = x_train[train_step :(train_step + 1) ]
        features = sparse.lil_matrix(features.reshape(10242,6))
        features = preprocess_features(features)

        y_train1 = (y_train[train_step :(train_step + 1)]).reshape(10242,36)

        feed_dict = construct_feed_dict(features, support, y_train1, mask, placeholders)
        feed_dict.update({placeholders['dropout']: FLAGS.dropout})
        # Training step
        outs = sess.run([model.opt_op, model.loss, model.accuracy,model.dice,model.predict()], feed_dict=feed_dict)
        #print("train_pred:", outs[4].shape, type(outs[4]), outs[4])
        train_loss_avg += outs[1]
        train_acc_avg += outs[2]
        train_dice_avg += outs[3]
        train_step += 1

    val_loss_avg = 0
    val_acc_avg = 0
    val_dice_avg = 0
    val_step = 0
    val_size = x_val.shape[0]  # 8
    while val_step * batch_size < val_size:
        # Validation
        features = x_val[val_step * batch_size:(val_step + 1) * batch_size]
        features = sparse.lil_matrix(features.reshape(10242, 6))
        features = preprocess_features(features)
        y_val1 = (y_val[val_step :(val_step + 1)]).reshape(10242,36)
        cost, acc, dice, pred = evaluate(features, support, y_val1, mask, placeholders)
        #print("val_pred:",pred.shape,type(pred),pred)
        val_loss_avg += cost
        val_acc_avg += acc
        val_dice_avg += dice
        val_step += 1
        cost_val.append(cost)

    # Print results
    print('epoch=%d|Training: loss = %.5f, acc = %.5f, dice_coff = %.5f | Val: loss = %.5f, acc = %.5f, dice_coff = %.5f' %
        (epoch+1, train_loss_avg / train_step, train_acc_avg / train_step, train_dice_avg / train_step,
         val_loss_avg / val_step, val_acc_avg / val_step, val_dice_avg / val_step))

    '''if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
        print("Early stopping...")
        break'''

    if val_dice_avg / val_step >= vdice_mx or val_loss_avg / val_step <= vlss_mn:
        if val_dice_avg / val_step >= vdice_mx and val_loss_avg / val_step <= vlss_mn:
            vdice_early_model = val_dice_avg / val_step
            vlss_early_model = val_loss_avg / val_step
        vdice_mx = np.max((val_dice_avg / val_step, vdice_mx))
        vlss_mn = np.min((val_loss_avg / val_step, vlss_mn))
        curr_step = 0
    else:
        curr_step += 1
        if curr_step == patience:
            print('Early stop! Min loss: ', vlss_mn, ', Max dice: ', vdice_mx)
            print('Early stop model validation loss: ', vlss_early_model, ', dice: ', vdice_early_model)
            break




ts_size = x_test.shape[0]
ts_step = 0
ts_loss = 0.0
ts_acc = 0.0
ts_dice = 0
while ts_step * batch_size < ts_size:
    features = x_test[ts_step * batch_size:(ts_step + 1) * batch_size]
    features = sparse.lil_matrix(features.reshape(10242, 6))
    features = preprocess_features(features)
    y_test1 = (y_test[ts_step :(ts_step + 1)]).reshape(10242,36)
    test_cost, test_acc,test_dice, test_pred = evaluate(features, support, y_test1, mask, placeholders)

    ts_loss += test_cost
    ts_acc += test_acc
    ts_dice += test_dice
    ts_step +=1
    #print("test_pred:",test_pred.shape,type(test_pred),test_pred)#(10242, 36) <class 'numpy.ndarray'>
    pred_ts1 = np.argmax(test_pred, 1)
    np.savetxt('pred/GCN/pred_ts' + str(ts_step+90) + '.txt', pred_ts1, fmt='%d')


print("Test set results:", "cost=", "{:.5f}".format(ts_loss/ts_step),"accuracy=", "{:.5f}".format(ts_acc/ts_step),"dice=", "{:.5f}".format(ts_dice/ts_step))
