#include "selfattention_binder.h" 
namespace atb_speed { 
SelfAttentionBinder::SelfAttentionBinder() {} 
SelfAttentionBinder::~SelfAttentionBinder() {} 

void SelfAttentionBinder::ParseParam(const nlohmann::json &paramJson) 
{ 
    tokenOffset_.clear(); if (paramJson.contains("tokenOffset")) { 
        for (auto item : paramJson["tokenOffset"]) { 
            tokenOffset_.push_back(item.get<int32_t>()); 
        } 
    } 
    seqLen_.clear(); 
    for (auto item : paramJson["seqLen"]) { 
        seqLen_.push_back(item.get<int32_t>()); 
    } 
} 

void SelfAttentionBinder::BindTensor(atb::VariantPack &variantPack) 
{ 
    if (variantPack.inTensors.size() == 5) {// 5: flash encoder input num 
        const uint32_t seqLenTensorId = 4; 
        variantPack.inTensors.at(seqLenTensorId).hostData = seqLen_.data(); 
    } else { 
        const uint32_t tokenOffsetTensorId = 6; // 6: 设置tokenOffset的tensor位置 
        const uint32_t seqLenTensorId = 7; // 7: 设置seqLen的tensor位置 
        variantPack.inTensors.at(tokenOffsetTensorId).hostData = tokenOffset_.data();
        variantPack.inTensors.at(seqLenTensorId).hostData = seqLen_.data(); 
    } 
} 
}