#include <algorithm>
#include <atomic>
#include <map>

#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"

using namespace tensorflow;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;

using namespace std;
using namespace chrono;

using OpKernelConstructionPtr = OpKernelConstruction*;
using OpKernelContextPtr = OpKernelContext*;
using InferenceContextPtr = ::tensorflow::shape_inference::InferenceContext*;

namespace {
class CustOps : public OpKernel {
public:
    explicit CustOps(OpKernelConstructionPtr context) : OpKernel(context) {}

    void Compute(OpKernelContextPtr context) override
    {
        std::cout << "Cust Ops not installed!!" << std::endl;
    }

    ~CustOps() override = default;
};
}  // namespace

namespace tensorflow {
REGISTER_OP("FlashAttentionScore")
    .Input("query: T")
    .Input("key: T")
    .Input("value: T")
    .Input("real_shift: real_shift_type")
    .Input("drop_mask: drop_mask_type")
    .Input("padding_mask: padding_mask_type")
    .Input("atten_mask: atten_mask_type")
    .Input("prefix: prefix_type")
    .Input("actual_seq_qlen: actual_seq_qlen_type")
    .Input("actual_seq_kvlen: actual_seq_kvlen_type")
    .Input("q_start_idx: q_start_idx_type")
    .Input("kv_start_idx: kv_start_idx_type")
    .Output("softmax_max: float32")
    .Output("softmax_sum: float32")
    .Output("softmax_out: T")
    .Output("attention_out: T")
    .Attr("scale_value: float = 1.0")
    .Attr("keep_prob: float = 1.0")
    .Attr("pre_tockens: int = 2147483647")
    .Attr("next_tockens: int = 2147483647")
    .Attr("head_num: int")
    .Attr("input_layout: string")
    .Attr("inner_precise: int = 0")
    .Attr("sparse_mode: int = 0")
    .Attr("pse_type: int = 1")
    .Attr("T: {float16, float32, bfloat16} = DT_FLOAT")
    .Attr("real_shift_type: list({float16, float32, bfloat16}) >= 0")
    .Attr("drop_mask_type: list({uint8}) >= 0")
    .Attr("padding_mask_type: list({float16, float32, bfloat16}) >= 0")
    .Attr("atten_mask_type: list({bool, uint8}) >= 0")
    .Attr("prefix_type: list({int64}) >= 0")
    .Attr("actual_seq_qlen_type: list({int64}) >= 0")
    .Attr("actual_seq_kvlen_type: list({int64}) >= 0")
    .Attr("q_start_idx_type: list({int64}) >= 0")
    .Attr("kv_start_idx_type: list({int64}) >= 0")
    .SetShapeFn([](InferenceContext *c) {
      return Status::OK();
    });
REGISTER_KERNEL_BUILDER(Name("FlashAttentionScore").Device(DEVICE_CPU), CustOps)
}