import tensorflow as tf
tf.compat.v1.disable_v2_behavior()
tf.compat.v1.disable_eager_execution()

print("tf.test.gpu_device_name: ", tf.test.gpu_device_name())
print("tf.test.is_gpu_available: ", tf.test.is_gpu_available())


import numpy as np
from rank_model_tf.common.DNN import DNN
print(tf.__version__)

np.set_printoptions(precision=6, suppress=True)

x = tf.placeholder(tf.float32, shape=[None, 8], name='input')
label = tf.placeholder(tf.float32, shape=[None, 1], name='label')



model = DNN([16,4,1], name="model")
logit = model.logit(x)
y = tf.nn.sigmoid(logit, name = 'y')

loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=label, logits=logit, name='loss')
dense_optimizer = tf.train.AdamOptimizer(learning_rate=0.000005)
train_op = dense_optimizer.minimize(loss, name="train_op")

my_vars = tf.get_collection(tf.GraphKeys.VARIABLES)
grads_with_none = tf.gradients(loss, my_vars, name="my_grads")
gg = []
for g in grads_with_none:
    if g is not None:
        gg.append(g)
grads = gg

with tf.compat.v1.Session() as sess:
    sess.run(tf.compat.v1.global_variables_initializer())

    grads_, _ = sess.run([grads, loss], feed_dict={x:[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], label:[[0.0]]})
    print([g.shape for g in grads_])
