# import tensorflow as tf
# import numpy as np


# def softmax_grad_fn(grad, softmax_out):
#     '''
#         i == j : yi(1 - yi)
#         i != j : -yi*yj
#         grad_i = yi - yi(yi .. yj)
#     '''
#     dst = grad * softmax_out
#     dst = tf.reduce_sum(dst, axis=-1, keepdims=True)
#     dst = (grad - dst) * softmax_out
#     return dst
# # def matmul_same_rank_grad( grad, A, B, transpose_b=False, reduce_b_along_axis=None):
# #     '''
# #         注意支支持3维矩阵的matmul反向
# #         @param grad [b, A, B]
# #         @param transpose_b 对应的是matmul中transpose_b
# #         @param reduce_b_along_axis是定制[b, s, h] * [h, s]这种情况，需要对b做reduce sum
# #     '''
# #     if not transpose_b:
# #         B = tf.transpose(B, perm = [0, 2, 1])
# #     A = tf.transpose(A, perm = [0, 2, 1])

# #     dB = tf.matmul(A, grad) # 
# #     dA = tf.matmul(grad, B)

# #     if transpose_b:
# #         dB = tf.transpose(dB, perm = [0, 2, 1])
    
# #     if reduce_b_along_axis is not None:
# #         dB = tf.reduce_sum(dB, axis=reduce_b_along_axis)

# #     return dA, dB
# attn_dim = 16

# a = tf.Variable(initial_value=np.random.randn(32, 4, 8), dtype=tf.float32)

# # b = tf.Variable(initial_value=np.random.randn(16, 8), dtype=tf.float32)

# # c = tf.matmul(a, b, transpose_b=True)

# variable_init = tf.global_variables_initializer()

# # def softmax_grad(softmaxout, B):
   
# #    pass
# a = a/tf.sqrt(tf.cast(attn_dim, tf.float32))
# b = tf.nn.softmax(a, axis=-1)



# vars = tf.gradients(b, [a])
# result = softmax_grad_fn(tf.ones_like(b), b)
# result = result/tf.sqrt(tf.cast(attn_dim, tf.float32))

# d = vars[0] - result[0]
# with tf.Session() as sess:
#     sess.run(variable_init)
#     print(sess.run(result))
#     print(sess.run(d))

# # a = tf.Variable(initial_value=np.random.randn(2, 2, 3), dtype=tf.float32)



# # def pb_to_txt():
# #     graph_def = tf.get_default_graph().as_graph_def()
# #     tf.io.write_graph(graph_def, "./export_graph ", 'graph.txt', as_text=True)

# # vars = tf.gradients(b, [a])
# # pb_to_txt()

    
# #print(tf.get_default_graph().as_graph_def())

# # a = tf.compat.v1.graph_util.extract_sub_graph(tf.get_default_graph().as_graph_def(), ["gradients/MatMul_grad/BroadcastGradientArgs"])
# # a = tf.compat.v1.graph_util.extract_sub_graph(tf.get_default_graph().as_graph_def(), ["gradients/MatMul_grad/Shape_1"])


# # g = tf.get_default_graph()
# # a = g.get_operation_by_name("gradients/Softmax_grad/mul_1")
# # # # b = g.get_operation_by_name("gradients/MatMul_grad/Reshape_1")
# # # print(dir(a))
# # value = [a.inputs[0],  a.inputs[1]]
# # print(value)
# # print(a.get_attr("transpose_b"))

# # vars = tf.gradients(c, [a, b])
# # result = matmul_same_rank_grad(tf.ones_like(c), a, b, transpose_b=True, reduce_b_along_axis=0)
# # d = vars[0] - result[0]
# # with tf.Session() as sess:
# #     sess.run(variable_init)
# #     print(sess.run(result))

# #     print(sess.run(d))

#     # print(baseline[0])
#     # print(baseline[1])

# #     # output = sess.run(my_grad)
# #     print(baseline)
#     # print(l[0]- l[1])
#     # print()
# #print(type(a.node[0]))
# #print(a.node[0])
# # graph_def = 
# # pb_to_txt()

# # shape 32 16 16
# # shape 16 16

# # a = tf.constant([32, 1], dtype=tf.int32)
# # b = tf.constant([32, 64], dtype=tf.int32)
# # c = tf.raw_ops.BroadcastGradientArgs(s0=a, s1=b)
# # with tf.Session() as sess:
# #     l = sess.run(c)
# #     print(l)