// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <utility>
#include <vector>

#include <turbo/utility/status.h>
#include <nebula/util/cpu_info.h>
#include <nebula/version.h>

namespace nebula::internal {

    enum class DispatchLevel : int {
        // These dispatch levels, corresponding to instruction set features,
        // are sorted in increasing order of preference.
        NONE = 0,
        SSE4_2,
        AVX2,
        AVX512,
        NEON,
        MAX
    };

/*
  A facility for dynamic dispatch according to available DispatchLevel.

  Typical use:

    static void my_function_default(...);
    static void my_function_avx2(...);

    struct MyDynamicFunction {
      using FunctionType = decltype(&my_function_default);

      static std::vector<std::pair<DispatchLevel, FunctionType>> implementations() {
        return {
          { DispatchLevel::NONE, my_function_default }
    #if NEBULA_HAVE_RUNTIME_AVX2_SUPPORTED
          , { DispatchLevel::AVX2, my_function_avx2 }
    #endif
        };
      }
    };

    void my_function(...) {
      static DynamicDispatch<MyDynamicFunction> dispatch;
      return dispatch.func(...);
    }
*/
    template<typename DynamicFunction>
    class DynamicDispatch {
    protected:
        using FunctionType = typename DynamicFunction::FunctionType;
        using Implementation = std::pair<DispatchLevel, FunctionType>;

    public:
        DynamicDispatch() { resolve(DynamicFunction::implementations()); }

        FunctionType func = {};

    protected:
        // Use the Implementation with the highest DispatchLevel
        void resolve(const std::vector<Implementation> &implementations) {
            Implementation cur{DispatchLevel::NONE, {}};

            for (const auto &impl: implementations) {
                if (impl.first >= cur.first && is_supported(impl.first)) {
                    // Higher (or same) level than current
                    cur = impl;
                }
            }

            if (!cur.second) {
                turbo::invalid_argument_error("No appropriate implementation found").abort();
            }
            func = cur.second;
        }

    private:
        bool is_supported(DispatchLevel level) const {
            static const auto cpu_info = nebula::internal::CpuInfo::GetInstance();

            switch (level) {
                case DispatchLevel::NONE:
                    return true;
                case DispatchLevel::SSE4_2:
                    return cpu_info->is_supported(CpuInfo::SSE4_2);
                case DispatchLevel::AVX2:
                    return cpu_info->is_supported(CpuInfo::AVX2);
                case DispatchLevel::AVX512:
                    return cpu_info->is_supported(CpuInfo::AVX512);
                default:
                    return false;
            }
        }
    };


}  // namespace nebula::internal
