import sys
from ops import npu_flash_attention

import tensorflow as tf
import numpy as np
tf.compat.v1.disable_eager_execution()

import npu_device
from npu_device.compat.v1.npu_init import *
npu_device.compat.enable_v1()

def sess_config():
    config = tf.compat.v1.ConfigProto()
    custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
    custom_op.name = "NpuOptimizer"
    config.graph_options.rewrite_options.remapping = RewriterConfig.OFF
    config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF
    return config


def param_attn_layer(query, key, value, attn_dim, mask=None, scope_prefix=''):
    attn_weight = tf.matmul(query, key, transpose_b=True)
    attn_weight = tf.nn.softmax(attn_weight, axis=-1)
    attn_output = tf.matmul(attn_weight, value)
    return attn_output


def numpy_golden(query, key, value, scope_prefix=''):

    def softmax(x):
        x -= np.max(x, axis=-1, keepdims=True)
        x = np.exp(x) / np.sum(np.exp(x), axis=-1, keepdims=True)
        return x
    
    attn_weight = np.matmul(query, key.transpose(0, 2, 1))
    attn_weight = softmax(attn_weight)
    attn_output = np.matmul(attn_weight, value)
    return attn_output


#def numpy_golden_refined(query, key, value, drop_mask, atten_mask, pse, scale, keep_prob):
#   if pse is None or len(pse.shape) == 0:
#       qk = torch.matmul(q, k.permute(0, 1, 3, 2)).mul(scale)
#   else:
#       qk = (torch.matmul(q, k.permute(0, 1, 3, 2)) + pse).mul(scale)
#   if atten_mask is None or len(atten_mask.shape) == 0:
#       qk = qk
#   else:
#       qk = qk + atten_mask * (-40000.0)
#   softmax_res, xmax, xsum = tsoftmax(qk)
#   softmax_res[atten_mask.bool().broadcast_to(softmax_res.shape)] = 0
#   if drop_mask is None or len(drop_mask.shape) == 0
#       drop_res = softmax_res
#   else:
#       drop_res = softmax_res * drop_mask * (1.0 / keep_prob)
#   y = torch.matmul(drop_res, v)
#   return y, softmax_res


shape = [1, 32, 32]
query_np = np.random.randn(*shape).astype(np.float16)
key_np = np.random.randn(*shape).astype(np.float16)
value_np = np.random.randn(*shape).astype(np.float16)

query = tf.Variable(query_np, tf.float16)
key = tf.Variable(key_np, tf.float16)
value = tf.Variable(value_np, tf.float16)

mask = tf.zeros(shape=(shape[0], 1, shape[1], shape[1]), dtype=tf.uint8)

head_num = 1
input_layout = "BSH"
flash_result_t = npu_flash_attention(query, key, value, head_num, input_layout, atten_mask=mask)

origin_result_t = param_attn_layer(query, key, value, shape[2])
loss_golden = tf.reduce_mean(origin_result_t, keep_dims=False)

with tf.compat.v1.Session(config=sess_config()) as sess:
    sess.run(tf.compat.v1.global_variables_initializer())
    
    print("========== origin ==========")
    origin_result = sess.run(origin_result_t)
    print(origin_result)
    print("========== numpy - origin ==========")
    numpy_result = numpy_golden(query_np, key_np, value_np, shape[2])
    print(origin_result - numpy_result)
    print("========== flash ==========")
    flash_result = sess.run(flash_result_t)
    print(flash_result)
    print("========== numpy - flash ==========")
    print(numpy_result - flash_result[3])