// Copyright 2022 jeff.li. and/or its affiliates.
/*
 * Acknowledgement: This file originates from incubator-tvm.
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */
#include <cstring>
#include <mutex>

#include <cuda.h>
#include <cuda_runtime.h>

#include "core/device/cuda/cuda_allocator.h"
#include "core/framework/allocator.h"
#include "core/framework/arena.h"
#include "core/framework/bfc_arena.h"

#include "cuda_common.h"
#include "cuda_functions.h"

#include <tbir/runtime/c_runtime_api.h>
#include <tbir/runtime/device_api.h>
#include <tbir/runtime/dlpack.h>
#include <tbir/runtime/registry.h>

namespace tbir::runtime::cuda {

    static void *create_stream(int device_id) {
        TBIR_CUDA_CALL(cudaSetDevice(device_id));
        cudaStream_t retval;
        TBIR_CUDA_CALL(cudaStreamCreate(&retval));
        return static_cast<TbirStreamHandle>(retval);
    }

    static void free_stream(int device_id, void *stream) {
        TBIR_CUDA_CALL(cudaSetDevice(device_id));
        cudaStream_t cu_stream = static_cast<cudaStream_t>(stream);
        TBIR_CUDA_CALL(cudaStreamDestroy(cu_stream));
    }

    // Global stream state and constants
    static std::once_flag init_flag;
    static int num_gpus = -1;

    // Thread-local current streams
    static thread_local std::unique_ptr<std::shared_ptr<void>[]> current_streams = nullptr;

    // Populates global values.
    // Warning: this function must only be called once!
    static void initGlobalStreamState() {
        num_gpus = device_count();
    }

    // Init front-end to ensure initialization only occurs once
    static std::unique_ptr<std::shared_ptr<void>[]> createDefaultCUDAStreams() {
        // Inits default streams (once, globally)
        std::call_once(init_flag, initGlobalStreamState);

        // Inits current streams (thread local) to default streams
        auto streams = std::make_unique<std::shared_ptr<void>[]>(num_gpus);
        for (int i = 0; i < num_gpus; ++i) {
            streams[i] =
                    std::shared_ptr<void>(create_stream(i), [i](void *stream) { free_stream(i, stream); });
        }
        return streams;
    }

    static const std::unique_ptr<std::shared_ptr<void>[]> &getDefaultCUDAStreams() {
        // Default streams
        static std::unique_ptr<std::shared_ptr<void>[]> default_streams = createDefaultCUDAStreams();
        return default_streams;
    }

    // Init front-end to ensure initialization only occurs once
    static void initCUDAStreamsOnce() {
        // Inits default streams (once, globally)
        std::call_once(init_flag, initGlobalStreamState);

        if (current_streams) {
            return;
        }

        // Inits current streams (thread local) to default streams
        current_streams = std::make_unique<std::shared_ptr<void>[]>(num_gpus);
        for (int i = 0; i < num_gpus; ++i) {
            current_streams[i] = getDefaultCUDAStreams()[i];
        }
    }

// Helper to verify the GPU index is valid
    static inline void check_gpu(int device_index) {
        TBIR_ASSERT(device_index >= 0 && device_index < num_gpus);
    }

    class CUDADeviceAPI final : public DeviceAPI {
    public:
        void SetDevice(TbirDevice device) final {
            TBIR_CUDA_CALL(cudaSetDevice(device.device_id));
        }

        void GetAttr(TbirDevice device, DeviceAttrKind kind, RTValue *rv) final {
            int value = 0;
            switch (kind) {
                case kExist:
                    value = (cudaDeviceGetAttribute(&value, cudaDevAttrMaxThreadsPerBlock, device.device_id) ==
                             cudaSuccess);
                    break;
                case kMaxThreadsPerBlock: {
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&value, cudaDevAttrMaxThreadsPerBlock, device.device_id));
                    break;
                }
                case kWarpSize: {
                    TBIR_CUDA_CALL(cudaDeviceGetAttribute(&value, cudaDevAttrWarpSize, device.device_id));
                    break;
                }
                case kMaxSharedMemoryPerBlock: {
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&value, cudaDevAttrMaxSharedMemoryPerBlock, device.device_id));
                    break;
                }
                case kComputeVersion: {
                    std::ostringstream os;
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&value, cudaDevAttrComputeCapabilityMajor, device.device_id));
                    os << value << ".";
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&value, cudaDevAttrComputeCapabilityMinor, device.device_id));
                    os << value;
                    *rv = String(os.str());
                    return;
                }
                case kDeviceName: {
                    String name(256, '\0');
                    TBIR_CUDA_DRIVER_CALL(cuDeviceGetName(&name[0], name.size(), device.device_id));
                    name.resize(strlen(name.c_str()));
                    *rv = std::move(name);
                    return;
                }
                case kMaxClockRate: {
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&value, cudaDevAttrClockRate, device.device_id));
                    break;
                }
                case kMultiProcessorCount: {
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&value, cudaDevAttrMultiProcessorCount, device.device_id));
                    break;
                }
                case kMaxThreadDimensions: {
                    int dims[3];
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&dims[0], cudaDevAttrMaxBlockDimX, device.device_id));
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&dims[1], cudaDevAttrMaxBlockDimY, device.device_id));
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&dims[2], cudaDevAttrMaxBlockDimZ, device.device_id));

                    std::stringstream ss;  // use json string to return multiple int values;
                    ss << "[" << dims[0] << ", " << dims[1] << ", " << dims[2] << "]";
                    *rv = String(ss.str());
                    return;
                }
                case kMaxRegistersPerBlock: {
                    TBIR_CUDA_CALL(
                            cudaDeviceGetAttribute(&value, cudaDevAttrMaxRegistersPerBlock, device.device_id));
                    break;
                }
                case kGcnArch:
                    return;
                case kApiVersion: {
                    *rv = CUDA_VERSION;
                    return;
                }
            }
            *rv = value;
        }

        void *Alloc(TbirDevice device, size_t nbytes) final {
            void *ret;
            if (device.device_type == kDLCUDAHost) {
                if (static_cast<size_t>(device.device_id) >= cudaPinnedBFCAllocators.size() ||
                    cudaPinnedBFCAllocators[device.device_id] == nullptr) {
                    InitPinAllocator(device);
                }
                ret = cudaPinnedBFCAllocators[device.device_id]->Alloc(nbytes);
            } else {
                if (static_cast<size_t>(device.device_id) >= cudaBFCAllocators.size() ||
                    cudaBFCAllocators[device.device_id] == nullptr) {
                    InitCudaAllocator(device);
                }
                ret = cudaBFCAllocators[device.device_id]->Alloc(nbytes);
            }
            return ret;
        }

        void *Alloc(TbirDevice device,
                    size_t nbytes,
                    size_t alignment,
                    DLDataType type_hint) final {
            MXCHECK_EQ(256 % alignment, 0U) << "CUDA space is aligned at 256 bytes";
            return Alloc(device, nbytes);
        }

        void *AllocRaw(TbirDevice device,
                       size_t nbytes,
                       size_t alignment,
                       DLDataType type_hint) final {
            MXCHECK_EQ(256 % alignment, 0U) << "CUDA space is aligned at 256 bytes";
            void *ret;
            if (device.device_type == kDLCUDAHost) {
                TBIR_CUDA_CALL(cudaMallocHost(&ret, nbytes));
            } else {
                TBIR_CUDA_CALL(cudaSetDevice(device.device_id));
                TBIR_CUDA_CALL(cudaMalloc(&ret, nbytes));
            }
            return ret;
        }

        void Free(TbirDevice device, void *ptr) final {
            if (device.device_type == kDLCUDAHost) {
                MXCHECK(static_cast<size_t>(device.device_id) < cudaPinnedBFCAllocators.size() &&
                        cudaPinnedBFCAllocators[device.device_id] != nullptr);
                cudaPinnedBFCAllocators[device.device_id]->Free(ptr);
            } else {
                MXCHECK(static_cast<size_t>(device.device_id) < cudaBFCAllocators.size() &&
                        cudaBFCAllocators[device.device_id] != nullptr);
                cudaBFCAllocators[device.device_id]->Free(ptr);
            }
        }

        void FreeRaw(TbirDevice device, void *ptr) final {
            if (device.device_type == kDLCUDAHost) {
                TBIR_CUDA_CALL(cudaFreeHost(ptr));
            } else {
                TBIR_CUDA_CALL(cudaSetDevice(device.device_id));
                TBIR_CUDA_CALL(cudaFree(ptr));
            }
        }

        ~CUDADeviceAPI() {
        }

    protected:
        void CopyDataFromTo(const void *from,
                            size_t from_offset,
                            void *to,
                            size_t to_offset,
                            size_t size,
                            TbirDevice device_from,
                            TbirDevice device_to,
                            DLDataType type_hint,
                            TbirStreamHandle stream) final {
            cudaStream_t cu_stream = static_cast<cudaStream_t>(stream);
            from = static_cast<const char *>(from) + from_offset;
            to = static_cast<char *>(to) + to_offset;

            if (device_from.device_type == kDLCUDAHost) {
                device_from.device_type = kDLCPU;
            }

            if (device_to.device_type == kDLCUDAHost) {
                device_to.device_type = kDLCPU;
            }

            // In case there is a copy from host mem to host mem */
            if (device_to.device_type == kDLCPU && device_from.device_type == kDLCPU) {
                memcpy(to, from, size);
                return;
            }

            if (device_from.device_type == kDLCUDA && device_to.device_type == kDLCUDA) {
                TBIR_CUDA_CALL(cudaSetDevice(device_from.device_id));
                if (device_from.device_id == device_to.device_id) {
                    GPUCopy(from, to, size, cudaMemcpyDeviceToDevice, cu_stream);
                } else {
                    cudaMemcpyPeerAsync(to, device_to.device_id, from, device_from.device_id, size, cu_stream);
                }
            } else if (device_from.device_type == kDLCUDA && device_to.device_type == kDLCPU) {
                TBIR_CUDA_CALL(cudaSetDevice(device_from.device_id));
                GPUCopy(from, to, size, cudaMemcpyDeviceToHost, cu_stream);
            } else if (device_from.device_type == kDLCPU && device_to.device_type == kDLCUDA) {
                TBIR_CUDA_CALL(cudaSetDevice(device_to.device_id));
                GPUCopy(from, to, size, cudaMemcpyHostToDevice, cu_stream);
            } else {
                MXLOG(FATAL) << "expect copy from/to GPU or between GPU";
            }
        }

    public:
        TbirStreamHandle CreateStream(TbirDevice device) {
            return create_stream(device.device_id);
        }

        void FreeStream(TbirDevice device, TbirStreamHandle stream) {
            return free_stream(device.device_id, stream);
        }

        TbirStreamHandle GetDefaultStream(TbirDevice device) final {
            initCUDAStreamsOnce();
            auto device_index = device.device_id;
            if (device_index == -1) {
                device_index = current_device();
            }
            check_gpu(device_index);
            return getDefaultCUDAStreams()[device_index].get();
        }

        TbirStreamHandle GetCurrentThreadStream(TbirDevice device) final {
            initCUDAStreamsOnce();
            auto device_index = device.device_id;
            if (device_index == -1) {
                device_index = current_device();
            }
            check_gpu(device_index);
            return current_streams[device_index].get();
        }

        std::shared_ptr<void> GetSharedCurrentThreadStream(TbirDevice device) final {
            initCUDAStreamsOnce();
            auto device_index = device.device_id;
            if (device_index == -1) {
                device_index = current_device();
            }
            check_gpu(device_index);
            return current_streams[device_index];
        }

        void SetCurrentThreadStream(TbirDevice device, std::shared_ptr<void> stream) final {
            initCUDAStreamsOnce();
            current_streams[device.device_id] = std::move(stream);
        }

        void StreamSync(TbirDevice device, TbirStreamHandle stream) final {
            TBIR_CUDA_CALL(cudaSetDevice(device.device_id));
            TBIR_CUDA_CALL(cudaStreamSynchronize(static_cast<cudaStream_t>(stream)));
        }

        void CreateEventSync(TbirStreamHandle stream) final {
            cudaEvent_t finish_event;
            TBIR_CUDA_CALL(cudaEventCreate(&finish_event))
            TBIR_CUDA_CALL(cudaEventRecord(finish_event, static_cast<cudaStream_t>(stream)))
            TBIR_CUDA_CALL(cudaEventSynchronize(finish_event));
            TBIR_CUDA_CALL(cudaEventDestroy(finish_event));
        }

        void SyncStreamFromTo(TbirDevice device,
                              TbirStreamHandle event_src,
                              TbirStreamHandle event_dst) {
            TBIR_CUDA_CALL(cudaSetDevice(device.device_id));
            cudaStream_t src_stream = static_cast<cudaStream_t>(event_src);
            cudaStream_t dst_stream = static_cast<cudaStream_t>(event_dst);
            cudaEvent_t evt;
            TBIR_CUDA_CALL(cudaEventCreate(&evt));
            TBIR_CUDA_CALL(cudaEventRecord(evt, src_stream));
            TBIR_CUDA_CALL(cudaStreamWaitEvent(dst_stream, evt, 0));
            TBIR_CUDA_CALL(cudaEventDestroy(evt));
        }

        static CUDADeviceAPI *Global() {
            // NOTE: explicitly use new to avoid exit-time destruction of global state
            // Global state will be recycled by OS as the process exits.
            static auto *inst = new CUDADeviceAPI();
            return inst;
        }

    private:
        TbirStreamHandle GetStream(TbirDevice device,
                                   std::vector<TbirStreamHandle> &pool) {
            std::lock_guard<std::mutex> lock(streamAllocMutex_);
            if (static_cast<size_t>(device.device_id) >= pool.size()) {
                pool.resize(device.device_id + 1, nullptr);
            }
            if (pool[device.device_id] == nullptr) {
                pool[device.device_id] = CreateStream(device);
            }
            return pool[device.device_id];
        }

        static void GPUCopy(
                const void *from, void *to, size_t size, cudaMemcpyKind kind, cudaStream_t stream) {
            if (stream != nullptr) {
                TBIR_CUDA_CALL(cudaMemcpyAsync(to, from, size, kind, stream));
            } else {
                TBIR_CUDA_CALL(cudaMemcpy(to, from, size, kind));
            }
        }

        void InitCudaAllocator(TbirDevice device) {
            std::lock_guard<std::mutex> lock(cudaAllocMutex_);
            if (static_cast<size_t>(device.device_id) >= cudaBFCAllocators.size()) {
                cudaBFCAllocators.resize(device.device_id + 1, nullptr);
            }
            if (cudaBFCAllocators[device.device_id] == nullptr) {
                cudaBFCAllocators[device.device_id] = new brt::BFCArena(
                        std::unique_ptr<brt::IAllocator>(new brt::CUDAAllocator(device.device_id, "cuda")),
                        1ULL << 35);
            }
        }

        void InitPinAllocator(TbirDevice device) {
            std::lock_guard<std::mutex> lock(pinAllocMutex_);
            if (static_cast<size_t>(device.device_id) >= cudaPinnedBFCAllocators.size()) {
                cudaPinnedBFCAllocators.resize(device.device_id + 1, nullptr);
            }
            if (cudaPinnedBFCAllocators[device.device_id] == nullptr) {
                cudaPinnedBFCAllocators[device.device_id] =
                        new brt::BFCArena(std::unique_ptr<brt::IAllocator>(
                                new brt::CUDAPinnedAllocator(device.device_id, "cuda_pin")),
                                          1ULL << 33);
            }
        }

        std::vector<brt::BFCArena *> cudaBFCAllocators;
        std::vector<brt::BFCArena *> cudaPinnedBFCAllocators;
        std::mutex cudaAllocMutex_;
        std::mutex pinAllocMutex_;
        std::mutex streamAllocMutex_;
    };

    TBIR_REGISTER_GLOBAL("device_api.gpu").set_body([](PyArgs args) -> RTValue {
        DeviceAPI *ptr = CUDADeviceAPI::Global();
        return static_cast<void *>(ptr);
    });

    TBIR_REGISTER_GLOBAL("device_api.cuda").set_body([](PyArgs args) -> RTValue {
        DeviceAPI *ptr = CUDADeviceAPI::Global();
        return static_cast<void *>(ptr);
    });

    TBIR_REGISTER_GLOBAL("device_api.cpu_pinned").set_body([](PyArgs args) -> RTValue {
        DeviceAPI *ptr = CUDADeviceAPI::Global();
        return static_cast<void *>(ptr);
    });

    TBIR_REGISTER_GLOBAL("device_api.cuda_host").set_body([](PyArgs args) -> RTValue {
        DeviceAPI *ptr = CUDADeviceAPI::Global();
        return static_cast<void *>(ptr);
    });

}  // namespace tbir::runtime::cuda
