import time
import tensorflow as tf
import numpy as np
import scipy.sparse as sp
import os
from flags import FLAGS

from utils import *
from models import GCN, MLP
from process_data import load_biased_data

# 使用1.x兼容性
tf.compat.v1.disable_eager_execution()

# 定义检查点文件路径
checkpt_file = 'pre_trained/mod_cora_baseline{}.ckpt'.format(time.time())

# 设置随机种子（如果需要）
# seed = 123
# np.random.seed(seed)
# tf.compat.v1.set_random_seed(seed)


# 打印超参数
print("lambda1: ", FLAGS.lambda1)
print("lambda2: ", FLAGS.lambda2)
print("use_alpha: ", FLAGS.use_alpha)
print("early_stopping: ", FLAGS.early_stopping)

# 加载数据
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_biased_data(FLAGS.dataset)


# 定义辅助函数，获取列表中指定值的索引
def get_index(lst=None, item=''):
    lst = lst.tolist()
    return [index for (index, value) in enumerate(lst) if value == item]


# 将掩码转换为布尔类型
val_mask = val_mask.astype(np.bool)
test_mask = test_mask.astype(np.bool)

# 屏蔽验证和测试节点
val_test_mask = val_mask + test_mask
train_adj = adj.copy()
train_adj[val_test_mask, :] = 0
train_adj[:, val_test_mask] = 0
delete_mask = ((np.sum(train_adj, 1) == 0).reshape(train_mask.shape))

# 生成所有训练掩码
all_train_mask = np.ones(train_mask.shape) - delete_mask
train_mask = train_mask.astype(np.bool)

# 配置GPU选项，允许按需增长
gpu_options = tf.compat.v1.GPUOptions(allow_growth=True)
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
sess = tf.compat.v1.Session(config=config)

# 预处理特征
fea_size = features.shape[1]
features = preprocess_features(sp.coo_matrix(features))

# 根据模型选择相应的预处理方式和模型函数
if FLAGS.model == 'gcn':
    support_mask = [preprocess_adj(train_adj)]
    support = [preprocess_adj(adj)]
    num_supports = 1
    model_func = GCN
elif FLAGS.model == 'gcn_cheby':
    support = chebyshev_polynomials(adj, FLAGS.max_degree)
    num_supports = 1 + FLAGS.max_degree
    model_func = GCN
elif FLAGS.model == 'dense':
    support_mask = [preprocess_adj(train_adj)]
    support = [preprocess_adj(adj)]
    num_supports = 1
    model_func = MLP
else:
    raise ValueError('无效的模型参数: ' + str(FLAGS.model))

# 定义占位符
train_size = np.sum(train_mask)
train_mask = train_mask.astype(bool)
placeholders = {
    'support': [tf.compat.v1.sparse_placeholder(tf.float32) for _ in range(num_supports)],
    'features': tf.compat.v1.sparse_placeholder(tf.float32),
    'labels': tf.compat.v1.placeholder(tf.float32, shape=(None, y_train.shape[1])),
    'labels_mask': tf.compat.v1.placeholder(tf.int32, shape=([None])),
    'dropout': tf.compat.v1.placeholder_with_default(0., shape=()),
    'num_features_nonzero': tf.compat.v1.placeholder(tf.int32)
}

# 初始化模型
value = np.ones((train_size, 1))
weight_init = tf.compat.v1.constant_initializer(value)
weight = tf.compat.v1.get_variable('weight', shape=[train_size], initializer=weight_init)
model = model_func(placeholders, input_dim=features[2][1], weight=weight, train_size=train_size,
                   label_size=train_mask.shape[0], lambda1=FLAGS.lambda1, lambda2=FLAGS.lambda2, logging=True)

# 初始化会话
saver = tf.compat.v1.train.Saver()
sess.run(tf.compat.v1.global_variables_initializer())


# 定义评估函数
def evaluate(features, support, labels, mask, placeholders, test=False):
    t_test = time.time()
    feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
    outs_val = sess.run([model.lossc, model.accuracy, model.hidden_embedding], feed_dict=feed_dict_val)
    return outs_val[0], outs_val[1], (time.time() - t_test), outs_val[2]


# 训练模型
feed_dict = construct_feed_dict(features, support_mask, y_train, train_mask, placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
start_time = time.time()
cost_val = []
vlss_mn = np.inf
vacc_mx = 0.0
curr_step = 0

# 训练循环
for epoch in range(FLAGS.epochs):
    t = time.time()
    outs = sess.run([model.opt_op, model.loss, model.accuracy, model.weight, model.hidden_embedding],
                    feed_dict=feed_dict)

    if FLAGS.use_DVD:
        for _ in range(1):
            outsb = sess.run([model.opt_opb, model.lossb, model.accuracy], feed_dict=feed_dict)

    # 可选的验证部分（已注释）
    # cost, acc, duration, _ = evaluate(features, support, y_val, val_mask, placeholders)
    # cost_val.append(cost)

    # 打印结果（已注释）
    # print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(outs[1]),
    #       "train_acc=", "{:.5f}".format(outs[2]), "val_loss=", "{:.5f}".format(cost),
    #       "val_acc=", "{:.5f}".format(acc), "time=", "{:.5f}".format(time.time() - t),"eval time=", "{:.5f}".format(duration))

    # 早停机制（已注释）
    # if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(cost_val[-(FLAGS.early_stopping+1):-1]):
    #    print("Early stopping...")
    #    break

print("Optimization Finished!")
print("Total time:", time.time() - start_time)

# 测试
train_idx = get_index(train_mask, 1)
test_cost, test_acc, test_duration, test_hidden = evaluate(features, support, y_test, test_mask, placeholders)

# 记录测试结果（已注释）
# if FLAGS.use_DVD == 1:
#   f = open("./"+FLAGS.dataset+str(FLAGS.use_alpha)+".txt","a")
#   f.write(str(test_acc)+"\n")
#   f.close()
# else:
#   f=open("./" + FLAGS.dataset + "_base.txt","a")
#   f.write(str(test_acc)+"\n")
#   f.close()

print("Test set results:", "cost=", "{:.5f}".format(test_cost),
      "accuracy=", "{:.5f}".format(test_acc), "time=", "{:.5f}".format(test_duration))

sess.close()
