// SPDX-FileCopyrightText: © 2025 Tenstorrent AI ULC
//
// SPDX-License-Identifier: Apache-2.0

#include "dataflow_api.h"
#include "ttnn/deprecated/tt_dnn/kernels/dataflow/generate_reduce_scaler.hpp"
#include "ttnn/deprecated/tt_dnn/kernels/dataflow/generate_bcast_scalar.hpp"

// HW-bcast scale for fused scale-attn-softmax
FORCE_INLINE void generate_inv_sqrt_hw_bcast_tile() {
    constexpr auto cb_fused_scale = tt::CBIndex::c_2;
    uint32_t u = get_arg_val<uint32_t>(1);
    cb_reserve_back(cb_fused_scale, 1);
    auto ptr = reinterpret_cast<uint16_t*>(get_write_ptr(cb_fused_scale));
    ptr[0] = u >> 16;
    cb_push_back(cb_fused_scale, 1);
}

void kernel_main() {
    constexpr uint32_t cb_reduce_scaler = tt::CBIndex::c_1;
    const uint32_t reduce_scaler = get_arg_val<uint32_t>(0);

#if FUSED_SCALE_MASK
    constexpr uint32_t block_wt = get_compile_time_arg_val(0);
    constexpr auto mask_args = TensorAccessorArgs<1>();
    const uint32_t mask_addr = get_arg_val<uint32_t>(2);
    const uint32_t mask_start_tile_id = get_arg_val<uint32_t>(3);

    constexpr uint32_t cb_attn = tt::CBIndex::c_3;
    uint32_t mask_tile_bytes = get_tile_size(cb_attn);
    uint32_t mask_id = mask_start_tile_id;

    const auto addr_mask = TensorAccessor(mask_args, mask_addr, mask_tile_bytes);

    constexpr auto cb_fused_scale = tt::CBIndex::c_2;
    const uint32_t pre_scale = get_arg_val<uint32_t>(1);
    generate_bcast_unary_scalar(cb_fused_scale, pre_scale);

#if defined(CAUSAL_MASK) && !defined(SHARDED_CAUSAL_MASK)

    constexpr uint32_t fused_head = get_compile_time_arg_val(mask_args.next_compile_time_args_offset() + 2);
    constexpr uint32_t mask_block_ht = get_compile_time_arg_val(mask_args.next_compile_time_args_offset() + 4);

    for (uint32_t f = 0; f < fused_head; f++) {
        mask_id = mask_start_tile_id;

        for (uint32_t h = 0; h < mask_block_ht; h++) {
            cb_reserve_back(cb_attn, block_wt);
            uint32_t l1_write_addr = get_write_ptr(cb_attn);
            for (uint32_t w = 0; w < block_wt; w++) {
                noc_async_read_tile(mask_id, addr_mask, l1_write_addr);
                l1_write_addr += mask_tile_bytes;
                ++mask_id;
            }
            noc_async_read_barrier();
            cb_push_back(cb_attn, block_wt);

            if (f == 0 && h == 0) {
                generate_reduce_scaler(cb_reduce_scaler, reduce_scaler);
            }
        }
    }
#elif defined(CAUSAL_MASK) && defined(SHARDED_CAUSAL_MASK)
    generate_reduce_scaler(cb_reduce_scaler, reduce_scaler);
#else
    cb_reserve_back(cb_attn, block_wt);
    uint32_t l1_write_addr = get_write_ptr(cb_attn);
    for (uint32_t w = 0; w < block_wt; w++) {
        noc_async_read_tile(mask_id, addr_mask, l1_write_addr);
        l1_write_addr += mask_tile_bytes;
        ++mask_id;
    }
    noc_async_read_barrier();
    cb_push_back(cb_attn, block_wt);

    generate_reduce_scaler(cb_reduce_scaler, reduce_scaler);
#endif

#else
    generate_reduce_scaler(cb_reduce_scaler, reduce_scaler);
#endif
}
