from tensorflow.python.framework import ops
import tensorflow as tf
tfOpLib = tf.load_op_library("../build/tf_ops/libflashattention.so")

def create_optional_input_list(input):
    input_list = []
    if not input is None:
        input_list.append(input)
    return input_list

# flash_attention_score 封装函数
def npu_flash_attention(query, key, value, head_num, input_layout, real_shift=None, drop_mask=None, padding_mask=None,
                          atten_mask=None, prefix=None, actual_seq_qlen=None, actual_seq_kvlen=None,
                          q_start_idx=None, kv_start_idx=None, scale_value=1.0, keep_prob=1.0,
                          pre_tockens=2147483647, next_tockens=2147483647, inner_precise=0, sparse_mode=0,
                          pse_type=1):
    output = tfOpLib.flash_attention_score(query=query, key=key, value=value,
        real_shift=create_optional_input_list(real_shift), drop_mask=create_optional_input_list(drop_mask),
        padding_mask=create_optional_input_list(padding_mask), atten_mask=create_optional_input_list(atten_mask),
        prefix=create_optional_input_list(prefix), actual_seq_qlen=create_optional_input_list(actual_seq_qlen),
        actual_seq_kvlen=create_optional_input_list(actual_seq_kvlen), q_start_idx=create_optional_input_list(q_start_idx),
        kv_start_idx=create_optional_input_list(kv_start_idx), scale_value=scale_value, keep_prob=keep_prob,
        pre_tockens=pre_tockens, next_tockens=next_tockens, head_num=head_num, input_layout=input_layout,
        inner_precise=inner_precise, sparse_mode=sparse_mode, pse_type=pse_type)
    return output
