#include "gpu/gpu_impl_list.hpp"
#include "gpu/nvidia/cudnn_softmax.hpp"


#if DNNL_GPU_VENDOR == DNNL_VENDOR_NVIDIA
#include "gpu/nvidia/cudnn_spatialtf.hpp"
#endif

namespace dnnl {
namespace impl {
namespace gpu {

namespace {
using namespace dnnl::impl::prop_kind;

const std::map<pk_impl_key_t, std::vector<impl_list_item_t>> impl_list_map REG_SPATIALTF_P({
    {{forward}, {
        GPU_INSTANCE_NVIDIA(nvidia::cudnn_spatialtf_grid_generator_fwd_t)
        GPU_INSTANCE_NVIDIA(nvidia::cudnn_softmax_fwd_t)
        nullptr,
    }},
    {{backward}, REG_BWD_PK({
        nullptr,
    })},
});

constexpr impl_list_item_t impl_list[]  = REG_SPATIALTF_P({
    GPU_INSTANCE_NVIDIA(nvidia::cudnn_spatialtf_grid_generator_fwd_t)
        GPU_INSTANCE_NVIDIA(nvidia::cudnn_softmax_fwd_t)
        nullptr,});

} // namespace


const impl_list_item_t *get_spatialtf_impl_list(const spatialtf_desc_t *desc) {

    UNUSED(desc);
    return impl_list;

//    static const impl_list_item_t empty_list[] = {nullptr};
//    const bool is_fwd = utils::one_of(
//            desc->prop_kind, forward_training, forward_inference);
//    prop_kind_t prop_kind = is_fwd ? forward : backward;
//
//    const auto impl_list_it = impl_list_map.find({prop_kind});
//    std::cout << "impl_list_it: " <<(is_fwd?"forward":"backward") << std::endl;
//
//    return impl_list_it != impl_list_map.cend() ? impl_list_it->second.data()
//                                                : empty_list;
}

} // namespace gpu
} // namespace impl
} // namespace dnnl