#pragma once

#include <utils/cuda/CudaWrappers.h>

#include <algorithm>
#include <bit>
#include <cstddef>
#include <limits>
#include <memory>
#include <stdexcept>
#include <utility>

#include <cuda_runtime_api.h>
#include <driver_types.h>

namespace solar::cuda
{
    // A simulation of std::vector on device side
    // Some implementation is borrowed from libc++
    // Clang/LLVM does not support __builtin_alloca in CUDA until version 19
    // "allocate_on_stack" mode is not tested, use it at your own risk
    // CUDA's alloca is a preview feature since PTX 7.3
    template <class T> class Vector
    {
        public:
            using value_type = T;
            using size_type = std::size_t;
            using reference = value_type&;
            using const_reference = const value_type&;

            __host__ __device__ constexpr Vector() = default;

            __host__ __device__ constexpr Vector(bool allocate_on_stack)
            {
                this->allocate_on_stack_ = allocate_on_stack;
            }

            __host__ __device__ constexpr Vector(cudaStream_t stream,
                                                 bool allocate_on_stack = false)
            {
                this->stream_ = stream;
                this->allocate_on_stack_ = allocate_on_stack;
            }

            __host__ __device__ constexpr Vector(value_type* data, std::size_t size,
                                                 cudaStream_t stream = nullptr,
                                                 bool allocate_on_stack = false)
            {
                this->data_ = data;
                this->size_ = size;
                this->capacity_ = capacity_;
                this->stream_ = stream;
                this->allocate_on_stack_ = allocate_on_stack;
            }

            __host__ __device__ ~Vector()
            {
#ifdef __CUDA_ARCH__
                if (allocate_on_stack_)
                {
                    return;
                }
#endif
                if (size_ != 0 && data_ != nullptr)
                {
#ifdef __CUDA_ARCH__
                    delete this->data_;
#else
                    cudaFreeAsync(this->data_, this->stream_);
#endif
                }
            }

            __host__ __device__ constexpr Vector(const Vector& other)
            {
                this->data_ = other.data_;
                this->size_ = other.size_;
                this->capacity_ = other.capacity_;
                this->stream_ = other.stream_;
            }

            __host__ __device__ constexpr auto data() -> value_type* { return data_; }

            __host__ __device__ constexpr auto data() const -> const value_type* { return data_; }

            __host__ __device__ constexpr auto size() const -> std::size_t { return this->size_; }

            __host__ __device__ constexpr auto stream() const -> const cudaStream_t
            {
                return this->stream_;
            }

            __host__ __device__ constexpr auto capacity() const -> std::size_t
            {
                return this->capacity_;
            }

            __host__ __device__ constexpr auto max_size() const -> std::size_t
            {
                return std::numeric_limits<std::size_t>::max();
            }

            __host__ __device__ constexpr auto empty() const -> bool { return this->size_ == 0; }

            __host__ void reserve(size_type new_size)
            {
                if (new_size > this->capacity_)
                {
                    value_type* new_data = nullptr;
                    cudaMallocAsync(std::addressof(new_data), new_size, this->stream_);
                    cudaMemcpyAsync(new_data, data_, sizeof(value_type) * size_,
                                    cudaMemcpyDeviceToDevice, stream_);
                    cudaFreeAsync(this->data_, this->stream_);
                    this->data_ = new_data;
                    this->capacity_ = new_size;
                }
            }

            __device__ void reserve(size_type new_size)
            {
                if (new_size > this->capacity_)
                {
                    value_type* new_data = nullptr;
#if defined(__CUDACC__) && (defined(__clang__) && __clang_major__ >= 19 && 0)
                    if (allocate_on_stack_)
                    {
                        new_data = std::bit_cast<value_type*>(vector_alloca(new_size * sizeof(value_type)));
                    }
                    else
                    {
#endif
                        new_data = new value_type[new_size];
#if defined(__CUDACC__) && (defined(__clang__) && __clang_major__ >= 19 && 0)
                    }
#endif
                    for (size_type i = 0; i < this->size_; i++)
                    {
                        new_data[i] = this->data_[i];
                    }
#if defined(__CUDACC__) && (defined(__clang__) && __clang_major__ >= 19 && 0)
                    if (!allocate_on_stack_)
                    {
#endif
                        delete this->data_;
#if defined(__CUDACC__) && (defined(__clang__) && __clang_major__ >= 19 && 0)
                    }
#endif
                    this->data_ = new_data;
                    this->capacity_ = new_size;
                }
            }

            __host__ __device__ void push_back(const value_type& value)
            {
                if (size_ == capacity_)
                {
                    reserve(recommend(size_ + 1));
                }
                data_[size_] = value;
                size_++;
            }

            __host__ __device__ void push_back(value_type&& value)
            {
                if (size_ == capacity_)
                {
                    reserve(recommend(size_ + 1));
                }
                data_[size_] = std::move(value);
                size_++;
            }

            __host__ __device__ constexpr auto at(size_type n) -> reference
            {
#if !defined(__CUDA_ARCH__)
                if (n >= this->size_)
                {
                    throw std::out_of_range("vector");
                }
#endif
                return this->data_[n];
            }

            __host__ __device__ constexpr auto at(size_type n) const -> const_reference
            {
#if !defined(__CUDA_ARCH__)
                if (n >= this->size_)
                {
                    throw std::out_of_range("vector");
                }
#endif
                return this->data_[n];
            }

            __host__ __device__ constexpr auto operator[](size_type n) noexcept -> reference
            {
                return this->data_[n];
            }

            __host__ __device__ constexpr auto operator[](size_type n) const noexcept -> const_reference
            {
                return this->data_[n];
            }

        private:
            value_type* data_ = nullptr;
            std::size_t size_{};
            std::size_t capacity_{};
            cudaStream_t stream_ = nullptr;
            bool allocate_on_stack_ = false;

#if (defined(_MSC_VER) && (!defined(__clang__) || !defined(__CUDACC__)))
            void* vector_alloca(std::size_t size) { return _alloca(size); }
#else
            void* vector_alloca(std::size_t size) { return __builtin_alloca(size); }
#endif
#if defined(__CUDACC__) && (defined(__clang__) && __clang_major__ >= 19 && 0)
            __device__ void* vector_alloca(std::size_t size) { return __builtin_alloca(size); }
#endif

            __host__ __device__ constexpr auto recommend(size_type new_size) -> size_type
            {
                if (capacity_ > max_size() / 2)
                {
                    return max_size();
                }
                return std::max<size_type>(2 * capacity_, new_size);
            }
    };
} // namespace solar::cuda
