import tensorflow as tf
import numpy as np
import scipy.sparse as sp
import sys

from .preprocessing import sparse_to_tuple, preprocess_user_item_features, globally_normalize_bipartite_adjacency,\
	load_official_trainvaltest_split, normalize_features
from .model import RecommenderGAE, RecommenderSideInfoGAE
from .utils import construct_feed_dict

# Settings

# Define parameters
DO = 0.7
HIDDEN = [500, 75]
FEATHIDDEN = 64
BASES = 2
LR = 0.01
FEATURES = False
SYM = False
ACCUM = 'stack'
SELFCONNECTIONS = False
NUMCLASSES = 5

# Splitting dataset in training, validation and test set

u_features, v_features, adj_train, train_labels, train_u_indices, train_v_indices, class_values,\
	u_dict, v_dict = load_official_trainvaltest_split()

num_users, num_items = adj_train.shape

num_side_features = 0

# feature loading
if not FEATURES:
	u_features = sp.identity(num_users, format='csr') # features is just one-hot vector!
	v_features = sp.identity(num_items, format='csr')

	u_features, v_features = preprocess_user_item_features(u_features, v_features)

elif FEATURES and u_features is not None and v_features is not None:
	# use features as side information and node_id's as node input features

	print("Normalizing feature vectors...")
	u_features_side = normalize_features(u_features)
	v_features_side = normalize_features(v_features)

	u_features_side, v_features_side = preprocess_user_item_features(u_features_side, v_features_side)

	u_features_side = np.array(u_features_side.todense(), dtype=np.float32)
	v_features_side = np.array(v_features_side.todense(), dtype=np.float32)

	num_side_features = u_features_side.shape[1]

	# node id's for node input features
	id_csr_v = sp.identity(num_items, format='csr')
	id_csr_u = sp.identity(num_users, format='csr')

	u_features, v_features = preprocess_user_item_features(id_csr_u, id_csr_v)

else:
	raise ValueError('Features flag is set to true but no features are loaded from dataset ')

print("--User features shape: " + str(u_features.shape))
print("--Item features shape: " + str(v_features.shape))
print("adj_train shape: " + str(adj_train.shape))


# global normalization
support = []
support_t = []
adj_train_int = sp.csr_matrix(adj_train, dtype=np.int32)

for i in range(NUMCLASSES):
	# 为每个评级构建单独的评级矩阵，每个矩阵为稀疏矩阵
	support_unnormalized = sp.csr_matrix(adj_train_int == i + 1, dtype=np.float32)

	if support_unnormalized.nnz == 0:
		# yahoo music has dataset split with not all ratings types present in training set.
		# this produces empty adjacency matrices for these ratings.
		sys.exit('ERROR: normalized bipartite adjacency matrix has only zero entries!!!!!')
	#对每个评级矩阵做归一化，support为user-item，其中support_t为邻接矩阵转置后的item-user
	support_unnormalized_transpose = support_unnormalized.T
	support.append(support_unnormalized)
	support_t.append(support_unnormalized_transpose)


support = globally_normalize_bipartite_adjacency(support, symmetric=SYM)
support_t = globally_normalize_bipartite_adjacency(support_t, symmetric=SYM)

if SELFCONNECTIONS:
	support.append(sp.identity(u_features.shape[0], format='csr'))
	support_t.append(sp.identity(v_features.shape[0], format='csr'))

num_support = len(support)

#将5个评级矩阵水平堆叠,support(943,1682*5),support_t(1682,943*5)
support = sp.hstack(support, format='csr')
support_t = sp.hstack(support_t, format='csr')

# support and support_t become 3000x15000 (for douban with 3000 users/items and 5 ratings)
# support is n_users x (n_items*n_ratings). support_t is n_items x (n_users*ratings)
# NOTE: support is sparse matrix so the shape may not be as large as expected (?)
# When is num_support ever not == num_rating_classes?
print('support shape: ' + str(support.shape))
print('support_t shape: ' + str(support_t.shape))


##################################################################################################################
""" support contains only training set ratings. index into support using user/item indices to create test set support. """

# Collect all user and item nodes for train set
train_u = list(set(train_u_indices))
train_v = list(set(train_v_indices))


train_support = support[np.array(train_u)]
train_support_t = support_t[np.array(train_v)]

print('train_support_shape: {}'.format(train_support.shape)) # if GCMC_INDICES, THIS IS NO LONGER (n_users, n_items*n_rating_types). but < n_users
##################################################################################################################

# features as side info
if FEATURES:
	train_u_features_side = u_features_side[np.array(train_u)]
	train_v_features_side = v_features_side[np.array(train_v)]

else:
	train_u_features_side = None
	train_v_features_side = None

placeholders = {
	'u_features': tf.sparse_placeholder(tf.float32, shape=np.array(u_features.shape, dtype=np.int64)),
	'v_features': tf.sparse_placeholder(tf.float32, shape=np.array(v_features.shape, dtype=np.int64)),
	'u_features_nonzero': tf.placeholder(tf.int32, shape=()),
	'v_features_nonzero': tf.placeholder(tf.int32, shape=()),
	'labels': tf.placeholder(tf.int32, shape=(None,)),

	'u_features_side': tf.placeholder(tf.float32, shape=(None, num_side_features)),
	'v_features_side': tf.placeholder(tf.float32, shape=(None, num_side_features)),

	'user_indices': tf.placeholder(tf.int32, shape=(None,)),
	'item_indices': tf.placeholder(tf.int32, shape=(None,)),

	'class_values': tf.placeholder(tf.float32, shape=class_values.shape),

	'dropout': tf.placeholder_with_default(0., shape=()),
	'weight_decay': tf.placeholder_with_default(0., shape=()),

	'support': tf.sparse_placeholder(tf.float32, shape=(None, None)),
	'support_t': tf.sparse_placeholder(tf.float32, shape=(None, None)),
}

# create model
if FEATURES:
	model = RecommenderSideInfoGAE(placeholders,
								   input_dim=u_features.shape[1],
								   feat_hidden_dim=FEATHIDDEN,
								   num_classes=NUMCLASSES,
								   num_support=num_support,
								   self_connections=SELFCONNECTIONS,
								   num_basis_functions=BASES,
								   hidden=HIDDEN,
								   num_users=num_users,
								   num_items=num_items,
								   accum=ACCUM,
								   learning_rate=LR,
								   num_side_features=num_side_features,
								   logging=True)
else:
	model = RecommenderGAE(placeholders,
						   input_dim=u_features.shape[1],
						   num_classes=NUMCLASSES,
						   num_support=num_support,
						   self_connections=SELFCONNECTIONS,
						   num_basis_functions=BASES,
						   hidden=HIDDEN,
						   num_users=num_users,
						   num_items=num_items,
						   accum=ACCUM,
						   learning_rate=LR,
						   logging=True)

# Convert sparse placeholders to tuples to construct feed_dict. sparse placeholders expect tuple of (indices, values, shape)
train_support = sparse_to_tuple(train_support)
train_support_t = sparse_to_tuple(train_support_t)

u_features = sparse_to_tuple(u_features)
v_features = sparse_to_tuple(v_features)
assert u_features[2][1] == v_features[2][1], 'Number of features of users and items must be the same!'

num_features = u_features[2][1]
u_features_nonzero = u_features[1].shape[0]
v_features_nonzero = v_features[1].shape[0]

sess = tf.Session()
variables_to_restore = model.variable_averages.variables_to_restore()


def data_restore(user_id):
	train_u_indices = np.full(num_items, u_dict[user_id])
	train_v_indices = np.array(range(num_items))
	train_feed_dict = construct_feed_dict(placeholders, u_features, v_features, u_features_nonzero,
										  v_features_nonzero, train_support, train_support_t,
										  train_labels, train_u_indices, train_v_indices, class_values, DO,
										  train_u_features_side, train_v_features_side)
	# restore with polyak averages of parameters

	saver = tf.train.Saver(variables_to_restore)
	saver.restore(sess, tf.train.latest_checkpoint('restore/tmp/'))
	outputs = sess.run(model.outputs, feed_dict=train_feed_dict)
	outputs = np.argmax(outputs, axis=1) + 1

	return outputs, v_dict


