// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved

#include "gating_attention.h"
#include "utils/bf16.h"
#include <kutacc.h>
#include <ATen/native/cpu/utils.h>
#include <ATen/ops/empty.h>
#include <ATen/record_function.h>
#include <utils/memory.h>

namespace alphafold {
namespace {
int64_t default_block_size(int64_t seq_len)
{
    if (seq_len < 300) {
        return 176;
    } else if (seq_len < 600) {
        return 128;
    } else if (seq_len < 800) {
        return 80;
    } else if (seq_len < 1300) {
        return 64;
    } else if (seq_len < 1700) {
        return 48;
    } else {
        return 32;
    }
}
}

GatingAttentionWeight::GatingAttentionWeight(at::Tensor &query_w, at::Tensor &key_w, at::Tensor &value_w,
    at::Tensor &gating_w, at::Tensor &gating_b, at::Tensor &output_w, at::Tensor &output_b)
    :nchannels(query_w.sizes()[2]), nheads(query_w.sizes()[0]), head_size(query_w.sizes()[1])
{
    KPEX_CHECK(nchannels == nheads * head_size, "invalid query_w shape [", nchannels, ", ", nheads, ", ", head_size,
        "]");
    KPEX_CHECK_TENSOR_SHAPE(query_w, nheads, head_size, nchannels);
    KPEX_CHECK_TENSOR_SHAPE(key_w, nheads, head_size, nchannels);
    KPEX_CHECK_TENSOR_SHAPE(value_w, nheads, head_size, nchannels);
    KPEX_CHECK_TENSOR_SHAPE(gating_w, nheads, head_size,nchannels);
    KPEX_CHECK_TENSOR_SHAPE(gating_b, nheads, head_size);
    KPEX_CHECK_TENSOR_SHAPE(output_w, nchannels, nheads, head_size);
    KPEX_CHECK_TENSOR_SHAPE(output_b, nchannels);

    auto float_opt = query_w.options().device(kpex::device()).dtype(c10::kFloat);
    auto bf16_opt = query_w.options().device(kpex::device()).dtype(c10::kBFloat16);
    query_w = query_w.to(bf16_opt).contiguous().view({nchannels, nchannels});
    key_w = key_w.to(bf16_opt).contiguous().view({nchannels, nchannels});
    value_w = value_w.to(bf16_opt).contiguous().view({nchannels, nchannels});
    gating_w = gating_w.to(bf16_opt).contiguous().view({nchannels, nchannels});
    output_w = output_w.to(bf16_opt).contiguous().view({nchannels, nchannels});

    this->query_w = query_w;
    this->key_w = key_w;
    this->value_w = value_w;
    this->gating_w = gating_w;
    this->output_w = output_w;
    
    this->gating_b = gating_b.to(float_opt).contiguous();
    this->output_b = output_b.to(float_opt).contiguous();
}

at::Tensor gating_attention(at::Tensor &q_data, at::Tensor &m_data, at::Tensor &bias, at::Tensor &nonbatched_bias,
    const GatingAttentionWeight &weights, std::optional<int64_t> block_size)
{
    at::Tensor out = at::empty(q_data.sizes(), q_data.options());
    int64_t batch = q_data.sizes()[0];
    int64_t seq_len = q_data.sizes()[1];
    int64_t nchannels = weights.nchannels;
    int64_t nheads = weights.nheads;
    int64_t head_size = weights.head_size;
    int64_t block_size_ = default_block_size(seq_len);
    block_size_ = block_size.value_or(block_size_);

    RECORD_FUNCTION("gating_attention", c10::ArrayRef<c10::IValue>({batch, seq_len, nheads, head_size}));
    KPEX_CHECK(q_data.dtype() == c10::kBFloat16, q_data.dtype());
    KPEX_CHECK(m_data.dtype() == c10::kBFloat16, m_data.dtype());
    KPEX_CHECK(bias.dtype() == c10::kBFloat16, bias.dtype());
    KPEX_CHECK(nonbatched_bias.dtype() == c10::kBFloat16, bias.dtype());
    KPEX_CHECK_TENSOR_SHAPE(q_data, batch, seq_len, nchannels);
    KPEX_CHECK_TENSOR_SHAPE(bias, batch, 1, 1, seq_len);
    if (nonbatched_bias.sizes()[0] != 0) {
        KPEX_CHECK_TENSOR_SHAPE(nonbatched_bias, nheads, seq_len, seq_len);
    }

    bias = bias.contiguous();
    nonbatched_bias = nonbatched_bias.contiguous();

    auto q = q_data.new_empty({batch, seq_len, nheads, head_size});
    auto k = q_data.new_empty({batch, seq_len, nheads, head_size});
    auto v = q_data.new_empty({nheads, head_size, batch, seq_len});
    auto gate = q_data.new_empty({batch, seq_len, nheads, head_size});
    auto weighted_avg = q_data.new_empty({batch, seq_len, nheads, head_size});
    at::Tensor input;
    {
        RECORD_FUNCTION("input_prepack", c10::ArrayRef<c10::IValue>({}));
        input = q_data.view({batch * seq_len, nchannels});
    }
    kutacc::gating_attention(
        batch, seq_len, nchannels, nheads, head_size, block_size_, 
        bias.data_ptr(), bias.strides().vec(), nonbatched_bias.data_ptr(), nonbatched_bias.sizes().vec(), nonbatched_bias.strides().vec(),
        input.data_ptr(), out.data_ptr(), out.strides().vec(), gate.data_ptr(), k.data_ptr(), v.data_ptr(), q.data_ptr(), q.strides().vec(),
        gate.strides().vec(), v.strides().vec(), k.strides().vec(), weights.value_w.data_ptr(), 
        weighted_avg.data_ptr(), weighted_avg.strides().vec(), weights.query_w.data_ptr(), weights.key_w.data_ptr(), 
        weights.gating_w.data_ptr(), weights.gating_b.data_ptr(),weights.output_w.data_ptr(), weights.output_b.data_ptr()
    );
    return out;
}
}   // namespace alphafold