import tensorflow as tf
import numpy as np
import math

def pb_to_txt():
    graph_def = tf.get_default_graph().as_graph_def()
    tf.io.write_graph(graph_def, "./export_graph ", 'graph.txt', as_text=True)


class AttentionLayerCpuValidateConfig:
    def __init__(self):
        # weight
        self.qw_shape = (16, 16)
        self.kw_shape = (16, 16)
        self.vw_shape = (32, 32)

        # 输入的q k v
        self.q_shape = (32, 4, 16)
        self.k_shape = (32, 8, 16)
        self.v_shape = (32, 8, 32)     

        self.mask_shape = (32, 4, 8)  


class AttentionLayerCpuValidate():
    def __init__(self, config: AttentionLayerCpuValidateConfig) -> None:
        self.config = config
        # 输入
        self.qw = None
        self.kw = None
        self.vw = None

        self.q = None
        self.k = None
        self.v = None       

        self.mask = None

        self.attn_dim = 16
        # 输出
        self.softmax_out = None
        self.out = None

        #### 反向的输出
        self.softmax_out_grad = None

        self.q_grad = None
        self.k_grad = None
        self.v_grad = None

        self.qw_grad = None
        self.kk_grad = None
        self.vw_grad = None

        self._init_variable()
        
    def _init_variable(self):
        if (self.qw is not None):
            return
        
        self.qw = tf.Variable(initial_value=np.random.rand(*self.config.qw_shape).astype(np.float32), name="qw")
        self.kw = tf.Variable(initial_value=np.random.randn(*self.config.kw_shape).astype(np.float32), name="kw")
        self.vw = tf.Variable(initial_value=np.random.randn(*self.config.vw_shape).astype(np.float32), name="vw")

        self.q = tf.Variable(initial_value=np.random.randn(*self.config.q_shape).astype(np.float32), name="q")
        self.k = tf.Variable(initial_value=np.random.randn(*self.config.k_shape).astype(np.float32), name="k")
        self.v = tf.Variable(initial_value=np.random.randn(*self.config.v_shape).astype(np.float32), name="v")

        self.mask = tf.ones(self.config.mask_shape, name="mask")

    def param_attn_layer(self):
        # q k v 
        with tf.name_scope("weigth_matmul"):
            self.weight_q = tf.matmul(self.q, self.qw, transpose_b=True, name="qw_matmul")
            self.weight_k = tf.matmul(self.k, self.kw, transpose_b=True, name="kw_matmul")
            self.weight_v = tf.matmul(self.v, self.vw, transpose_b=True, name="vw_matmul")

        with tf.name_scope("qk_matmul"):
            qk = tf.matmul(self.weight_q, self.weight_k, transpose_b=True)
            
        with tf.name_scope("div"):
            qk_div = qk/tf.sqrt(tf.cast(self.attn_dim, tf.float32))
            mask = qk_div + (1-self.mask)*10000

        with tf.name_scope("softmax_out"):
            self.softmax_out = tf.nn.softmax(mask, axis=-1)

        with tf.name_scope("out_matmul"):    
            self.out = tf.matmul(self.softmax_out, self.weight_v)

        return self.out
    
    def softmax_grad_fn(self, grad, softmax_out):
        '''
            grad_i = (1-y_i)*y_i*grad_i + (y_i*y_j)*grad_j
        '''
        dst = grad * softmax_out
        dst = tf.reduce_sum(dst, axis=-1, keepdims=True)
        dst = (grad - dst) * softmax_out
        return dst
    
    def matmul_grad(self, grad, A, B, transpose_b=False, reduce_b_along_axis=None):
        '''
            注意支支持3维矩阵的matmul反向
            @param grad [b, A, B]
            @param transpose_b 对应的是matmul中transpose_b
            @param reduce_b_along_axis是定制[b, s, h] * [h, s]这种情况，需要对b做reduce sum
        '''
        if not transpose_b:
            B = tf.transpose(B, perm = [0, 2, 1])
        A = tf.transpose(A, perm = [0, 2, 1])

        dB = tf.matmul(A, grad) # 
        dA = tf.matmul(grad, B)

        if transpose_b:
            dB = tf.transpose(dB, perm = [0, 2, 1])
        
        if reduce_b_along_axis is not None:
            dB = tf.reduce_sum(dB, axis=reduce_b_along_axis)

        return dA, dB

    def param_attn_layer_grad(self, grad):
        # out_matmul部分的求导
        softmax_out_grad, weight_v_grad = self.matmul_grad(grad, self.softmax_out, validate.weight_v)
        d_v = weight_v_grad

        # softmax反向求导
        qk_div_grad = self.softmax_grad_fn(softmax_out_grad, self.softmax_out)
        qk_div_grad = qk_div_grad/tf.sqrt(tf.cast(self.attn_dim, tf.float32))

        # kv求导
        d_q, d_k = self.matmul_grad(qk_div_grad, self.weight_q, validate.weight_k, transpose_b=True)
        return d_q, d_k, d_v


attn_config = AttentionLayerCpuValidateConfig()
validate = AttentionLayerCpuValidate(attn_config)

result = validate.param_attn_layer()

# 整体公式是否争取
grads_and_vars = tf.gradients(validate.out, [validate.weight_q, validate.weight_k, validate.weight_v])
out_put_grad = validate.param_attn_layer_grad(tf.ones_like(validate.out))

diff = tf.reduce_sum(grads_and_vars[0]-out_put_grad[0]) \
        + tf.reduce_sum(grads_and_vars[1]-out_put_grad[1]) \
        + tf.reduce_sum(grads_and_vars[2]-out_put_grad[2]) \

# 导出图
pb_to_txt()
variable_init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(variable_init)
    print(sess.run(grads_and_vars[0])) 
    print("================================")  
    print(sess.run(out_put_grad[0]))  
    diff = sess.run(diff)
    print(diff)
    assert np.abs(diff) < 0.000001