//
// Created by syshe on 2022/3/22.
//

#ifndef GENERAL_OCR_OBJECT_H
#define GENERAL_OCR_OBJECT_H

#include "utils/c++17.h"
#icnlude "utils/macros.h"
#include <atomic>
#include <climits>
#include <memory>
#include <stdexcept>

namespace pybind11 {
    template <typename, typename...>
    class class_;
}

namespace c10 {
    class intrusive_ptr_target;
    namespace raw {
        namespace weak_intrusive_ptr {
            inline void incref(intrusive_ptr_target* self);
        }
        namespace intrusive_ptr {
            inline void incref(intrusive_ptr_target* self);
        }

        template <typename TTarget>
        struct ExclusivelyOwnedTraits;

// constructor tag used by intrusive_ptr constructors
        struct DontIncreaseRefcount {};
    } // namespace raw
/**
 * intrusive_ptr<T> is an alternative to shared_ptr<T> that has better
 * performance because it does the refcounting intrusively
 * (i.e. in a member of the object itself).
 * Your class T needs to inherit from intrusive_ptr_target to allow it to be
 * used in an intrusive_ptr<T>. Your class's constructor should not allow
 *`this` to escape to other threads or create an intrusive_ptr from `this`.
 */

// Note [Stack allocated intrusive_ptr_target safety]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// A well known problem with std::enable_shared_from_this is that it
// allows you to create a std::shared_ptr from a stack allocated object,
// which is totally bogus because the object will die once you return
// from the stack.  In intrusive_ptr, we can detect that this has occurred,
// because we set the refcount/weakcount of objects which inherit from
// intrusive_ptr_target to zero, *unless* we can prove that the object
// was dynamically allocated (e.g., via make_intrusive).
//
// Thus, whenever you transmute a T* into a intrusive_ptr<T>, we check
// and make sure that the refcount isn't zero (or, a more subtle
// test for weak_intrusive_ptr<T>, for which the refcount may validly
// be zero, but the weak refcount better not be zero), because that
// tells us if the object was allocated by us.  If it wasn't, no
// intrusive_ptr for you!

    class C10_API intrusive_ptr_target {
            // Note [Weak references for intrusive refcounting]
            // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            // Here's the scheme:
            //
            //  - refcount == number of strong references to the object
            //    weakcount == number of weak references to the object,
            //      plus one more if refcount > 0
            //    An invariant: refcount > 0  =>  weakcount > 0
            //
            //  - c10::StorageImpl stays live as long as there are any strong
            //    or weak pointers to it (weakcount > 0, since strong
            //    references count as a +1 to weakcount)
            //
            //  - finalizers are called and data_ptr is deallocated when refcount == 0
            //
            //  - Once refcount == 0, it can never again be > 0 (the transition
            //    from > 0 to == 0 is monotonic)
            //
            //  - When you access c10::StorageImpl via a weak pointer, you must
            //    atomically increment the use count, if it is greater than 0.
            //    If it is not, you must report that the storage is dead.
            //
            mutable std::atomic<size_t> refcount_;
            mutable std::atomic<size_t> weakcount_;

            template <typename T, typename NullType>
            friend class intrusive_ptr;
            friend inline void raw::intrusive_ptr::incref(intrusive_ptr_target* self);

            template <typename T, typename NullType>
            friend class weak_intrusive_ptr;
            friend inline void raw::weak_intrusive_ptr::incref(
            intrusive_ptr_target* self);

            template <typename T>
            friend struct ExclusivelyOwnedTraits;

            protected:
            // protected destructor. We never want to destruct intrusive_ptr_target*
            // directly.
            virtual ~intrusive_ptr_target() {
// Disable -Wterminate and -Wexceptions so we're allowed to use assertions
// (i.e. throw exceptions) in a destructor.
// We also have to disable -Wunknown-warning-option and -Wpragmas, because
// some other compilers don't know about -Wterminate or -Wexceptions and
// will show a warning about unknown warning options otherwise.

#if defined(_MSC_VER) && !defined(__clang__)
                #pragma warning(push)
#pragma warning( \
    disable : 4297) // function assumed not to throw an exception but does
#else
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Wunknown-warning-option"
#pragma GCC diagnostic ignored "-Wterminate"
#pragma GCC diagnostic ignored "-Wexceptions"
#endif
                TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
                        // Second condition is there to accommodate
                        // unsafe_adapt_non_heap_allocated: since we are doing our own
                        // deallocation in that case, it is correct for each
                        // expected_decref to have happened (some user code tried to
                        // decref and thus free the object, but it didn't happen right
                        // away) or not (no user code tried to free the object, and
                        // now it's getting destroyed through whatever mechanism the
                        // caller of unsafe_adapt_non_heap_allocated wanted to
                        // use). We choose our reference count such that the count
                        // will not dip below INT_MAX regardless.
                        refcount_.load() == 0 || refcount_.load() >= INT_MAX,
                        "Tried to destruct an intrusive_ptr_target that still has intrusive_ptr to it; refcount was ",
                        refcount_.load());
                TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
                        // See ~intrusive_ptr for optimization that will frequently result in 1
                        // at destruction time.
                        weakcount_.load() == 1 || weakcount_.load() == 0 ||
                        weakcount_.load() == INT_MAX - 1 || weakcount_.load() == INT_MAX,
                        "Tried to destruct an intrusive_ptr_target that still has weak_intrusive_ptr to it");
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(pop)
#else
#pragma GCC diagnostic pop
#endif
            }

            constexpr intrusive_ptr_target() noexcept : refcount_(0), weakcount_(0) {}

            // intrusive_ptr_target supports copy and move: but refcount and weakcount
            // don't participate (since they are intrinsic properties of the memory
            // location)
            intrusive_ptr_target(intrusive_ptr_target&& /*other*/) noexcept
            : intrusive_ptr_target() {}

            intrusive_ptr_target& operator=(intrusive_ptr_target&& /*other*/) noexcept {
                return *this;
            }

            intrusive_ptr_target(const intrusive_ptr_target& /*other*/) noexcept
            : intrusive_ptr_target() {}

            intrusive_ptr_target& operator=(
            const intrusive_ptr_target& /*other*/) noexcept {
                return *this;
            }

            private:
            /**
             * This is called when refcount reaches zero.
             * You can override this to release expensive resources.
             * There might still be weak references, so your object might not get
             * destructed yet, but you can assume the object isn't used anymore,
             * i.e. no more calls to methods or accesses to members (we just can't
             * destruct it yet because we need the weakcount accessible).
             *
             * Even if there are no weak references (i.e. your class is about to be
             * destructed), this function is guaranteed to be called first.
             * However, if you use your class for an object on the stack that is
             * destructed by the scope (i.e. without intrusive_ptr), this function will
             * not be called.
             */
            virtual void release_resources() {}
    };

    namespace detail {
        template <class TTarget>
        struct intrusive_target_default_null_type final {
            static constexpr TTarget* singleton() noexcept {
                return nullptr;
            }
        };

        template <class TTarget, class ToNullType, class FromNullType>
        TTarget* assign_ptr_(TTarget* rhs) {
            if (FromNullType::singleton() == rhs) {
                return ToNullType::singleton();
            } else {
                return rhs;
            }
        }

// Increment needs to be acquire-release to make use_count() and
// unique() reliable.
        inline size_t atomic_refcount_increment(std::atomic<size_t>& refcount) {
            return refcount.fetch_add(1, std::memory_order_acq_rel) + 1;
        }

// weak_use_count() is only used for testing, so we don't need it to
// be reliable. Relaxed should be fine.
        inline size_t atomic_weakcount_increment(std::atomic<size_t>& weakcount) {
            return weakcount.fetch_add(1, std::memory_order_relaxed) + 1;
        }

// Both decrements need to be acquire-release for correctness. See
// e.g. std::shared_ptr implementation.
        inline size_t atomic_refcount_decrement(std::atomic<size_t>& refcount) {
            return refcount.fetch_sub(1, std::memory_order_acq_rel) - 1;
        }

        inline size_t atomic_weakcount_decrement(std::atomic<size_t>& weakcount) {
            return weakcount.fetch_sub(1, std::memory_order_acq_rel) - 1;
        }

    } // namespace detail

    template <class TTarget, class NullType>
    class weak_intrusive_ptr;

    template <
            class TTarget,
            class NullType = detail::intrusive_target_default_null_type<TTarget>>
    class intrusive_ptr final {
    private:
//  the following static assert would be nice to have but it requires
//  the target class T to be fully defined when intrusive_ptr<T> is instantiated
//  this is a problem for classes that contain pointers to themselves
//  static_assert(
//      std::is_base_of<intrusive_ptr_target, TTarget>::value,
//      "intrusive_ptr can only be used for classes that inherit from
//      intrusive_ptr_target.");
#ifndef _WIN32
        // This static_assert triggers on MSVC
        //  error C2131: expression did not evaluate to a constant
        static_assert(
                NullType::singleton() == NullType::singleton(),
                "NullType must have a constexpr singleton() method");
#endif
        static_assert(
                std::is_base_of<
                        TTarget,
                        typename std::remove_pointer<decltype(NullType::singleton())>::type>::
                value,
                "NullType::singleton() must return a element_type* pointer");

        TTarget* target_;

        template <typename T>
        friend struct ExclusivelyOwnedTraits;
        template <class TTarget2, class NullType2>
        friend class intrusive_ptr;
        friend class weak_intrusive_ptr<TTarget, NullType>;

        // Make pybind11::class_ be a friend class of intrusive_ptr, so that custom
        // smart holder in pybind11 could access the private constructor of
        // intrusive_ptr(T*) which took the ownership of the object. This is required
        // by customer holder macro PYBIND11_DECLARE_HOLDER_TYPE, where it uses
        // intrusive_ptr(TTarget*) to initialize and take ownership of the object. For
        // details, see
        // https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers
        template <typename, typename...>
        friend class pybind11::class_;

        void retain_() {
            if (target_ != NullType::singleton()) {
                size_t new_refcount =
                        detail::atomic_refcount_increment(target_->refcount_);
                TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
                        new_refcount != 1,
                        "intrusive_ptr: Cannot increase refcount after it reached zero.");
            }
        }

        void reset_() noexcept {
            if (target_ != NullType::singleton() &&
                detail::atomic_refcount_decrement(target_->refcount_) == 0) {
                // justification for const_cast: release_resources is basically a
                // destructor and a destructor always mutates the object, even for const
                // objects. NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDelete)
                const_cast<std::remove_const_t<TTarget>*>(target_)->release_resources();

                // See comment above about weakcount. As long as refcount>0,
                // weakcount is one larger than the actual number of weak references.
                // So we need to decrement it here.
                if (target_->weakcount_.load(std::memory_order_acquire) == 1 ||
                    detail::atomic_weakcount_decrement(target_->weakcount_) == 0) {
                    delete target_;
                }
            }
            target_ = NullType::singleton();
        }

        // raw pointer constructors are not public because we shouldn't make
        // intrusive_ptr out of raw pointers except from inside the make_intrusive(),
        // reclaim() and weak_intrusive_ptr::lock() implementations.

        // This constructor will increase the ref counter for you.
        // This constructor will be used by the make_intrusive(), and also pybind11,
        // which wrap the intrusive_ptr holder around the raw pointer and incref
        // correspondingly (pybind11 requires raw pointer constructor to incref by
        // default).
        explicit intrusive_ptr(TTarget* target)
                : intrusive_ptr(target, raw::DontIncreaseRefcount{}) {
            if (target_ != NullType::singleton()) {
                // We just created result.target_, so we know no other thread has
                // access to it, so we know we needn't care about memory ordering.
                // (On x86_64, a store with memory_order_relaxed generates a plain old
                // `mov`, whereas an atomic increment does a lock-prefixed `add`, which is
                // much more expensive: https://godbolt.org/z/eKPzj8.)
                TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
                        target_->refcount_ == 0 && target_->weakcount_ == 0,
                        "intrusive_ptr: Newly-created target had non-zero refcounts. Does its "
                        "constructor do something strange like incref or create an "
                        "intrusive_ptr from `this`?");
                target_->refcount_.store(1, std::memory_order_relaxed);
                target_->weakcount_.store(1, std::memory_order_relaxed);
            }
        }

    public:
        using element_type = TTarget;

        intrusive_ptr() noexcept
                : intrusive_ptr(NullType::singleton(), raw::DontIncreaseRefcount{}) {}

        // This constructor will not increase the ref counter for you.
        // We use the tagged dispatch mechanism to explicitly mark this constructor
        // to not increase the refcount
        explicit intrusive_ptr(TTarget* target, raw::DontIncreaseRefcount) noexcept
                : target_(target) {}

        explicit intrusive_ptr(std::unique_ptr<TTarget> rhs) noexcept
                : intrusive_ptr(rhs.release()) {}

        intrusive_ptr(intrusive_ptr&& rhs) noexcept : target_(rhs.target_) {
            rhs.target_ = NullType::singleton();
        }

        template <class From, class FromNullType>
        /* implicit */ intrusive_ptr(intrusive_ptr<From, FromNullType>&& rhs) noexcept
                : target_(
                detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
            static_assert(
                    std::is_convertible<From*, TTarget*>::value,
                    "Type mismatch. intrusive_ptr move constructor got pointer of wrong type.");
            rhs.target_ = FromNullType::singleton();
        }

        intrusive_ptr(const intrusive_ptr& rhs) : target_(rhs.target_) {
            retain_();
        }

        template <class From, class FromNullType>
        /* implicit */ intrusive_ptr(const intrusive_ptr<From, FromNullType>& rhs)
                : target_(
                detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
            static_assert(
                    std::is_convertible<From*, TTarget*>::value,
                    "Type mismatch. intrusive_ptr copy constructor got pointer of wrong type.");
            retain_();
        }

        ~intrusive_ptr() noexcept {
            reset_();
        }

        intrusive_ptr& operator=(intrusive_ptr&& rhs) & noexcept {
            return operator=<TTarget, NullType>(std::move(rhs));
        }

        template <class From, class FromNullType>
        intrusive_ptr& operator=(intrusive_ptr<From, FromNullType>&& rhs) & noexcept {
            static_assert(
                    std::is_convertible<From*, TTarget*>::value,
                    "Type mismatch. intrusive_ptr move assignment got pointer of wrong type.");
            intrusive_ptr tmp = std::move(rhs);
            swap(tmp);
            return *this;
        }

        intrusive_ptr& operator=(const intrusive_ptr& rhs) & noexcept {
            return operator=<TTarget, NullType>(rhs);
        }

        template <class From, class FromNullType>
        intrusive_ptr& operator=(const intrusive_ptr<From, NullType>& rhs) & {
            static_assert(
                    std::is_convertible<From*, TTarget*>::value,
                    "Type mismatch. intrusive_ptr copy assignment got pointer of wrong type.");
            intrusive_ptr tmp = rhs;
            swap(tmp);
            return *this;
        }

        TTarget* get() const noexcept {
            return target_;
        }

        TTarget& operator*() const noexcept {
            return *target_;
        }

        TTarget* operator->() const noexcept {
            // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDelete)
            return target_;
        }

        operator bool() const noexcept {
            return target_ != NullType::singleton();
        }

        void reset() noexcept {
            reset_();
        }

        void swap(intrusive_ptr& rhs) noexcept {
            TTarget* tmp = target_;
            target_ = rhs.target_;
            rhs.target_ = tmp;
        }

        // We do a lot of null-pointer checks in our code, good to have this be cheap.
        bool defined() const noexcept {
            return target_ != NullType::singleton();
        }

        size_t use_count() const noexcept {
            if (target_ == NullType::singleton()) {
                return 0;
            }
            return target_->refcount_.load(std::memory_order_acquire);
        }

        size_t weak_use_count() const noexcept {
            if (target_ == NullType::singleton()) {
                return 0;
            }
            return target_->weakcount_.load(std::memory_order_acquire);
        }

        bool unique() const noexcept {
            return use_count() == 1;
        }

        /**
         * Returns an owning (!) pointer to the underlying object and makes the
         * intrusive_ptr instance invalid. That means the refcount is not decreased.
         * You *must* put the returned pointer back into a intrusive_ptr using
         * intrusive_ptr::reclaim(ptr) to properly destruct it.
         * This is helpful for C APIs.
         */
        TTarget* release() noexcept {
            // NOLINTNEXTLINE(clang-analyzer-core.uninitialized.Assign)
            TTarget* result = target_;
            target_ = NullType::singleton();
            return result;
        }

        /**
         * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes
         * over ownership. That means the refcount is not increased.
         * This is the counter-part to intrusive_ptr::release() and the pointer
         * passed in *must* have been created using intrusive_ptr::release().
         */
        static intrusive_ptr reclaim(TTarget* owning_ptr) {
            return intrusive_ptr(owning_ptr, raw::DontIncreaseRefcount{});
        }

        /**
         * Takes an owning pointer to TTarget* and creates an intrusive_ptr
         * representing a new reference, i.e. the raw pointer retains
         * ownership.
         */
        static intrusive_ptr reclaim_copy(TTarget* owning_ptr) {
            auto ret = reclaim(owning_ptr);
            ret.retain_();
            return ret;
        }

        /**
         * Allocate a heap object with args and wrap it inside a intrusive_ptr and
         * incref. This is a helper function to let make_intrusive() access private
         * intrusive_ptr constructors.
         */
        template <class... Args>
        static intrusive_ptr make(Args&&... args) {
            return intrusive_ptr(new TTarget(std::forward<Args>(args)...));
        }

        /**
         * Turn a new instance of TTarget (e.g., literally allocated
         * using new TTarget(...) into an intrusive_ptr.  If possible,
         * use intrusive_ptr::make instead which statically guarantees
         * that the allocation was done properly.
         *
         * At the moment, the only reason this method exists is because
         * pybind11 holder types expect to be able to allocate in
         * this way (because pybind11 handles the new allocation itself).
         */
        static intrusive_ptr unsafe_steal_from_new(TTarget* raw_ptr) {
            return intrusive_ptr(raw_ptr);
        }

        /**
         * Turn an instance of TTarget that should not be reference counted
         * (e.g., allocated into an arena with placement new) into an
         * intrusive_ptr. This is gratuitously unsafe and should only be
         * used if you can guarantee that the pointer will not escape and be
         * refcounted as normal.
         *
         * `expected_decrefs` is a debugging parameter: it indicates the
         * number of strong owners the intrusive_ptr_target in question is
         * expected to get. In most use cases, this will likely be 1.
         *
         * The reason this method exists is for manually sharing
         * StorageImpls across Tensors in the static runtime. It needs
         * access to private intrusive_ptr members so that the refcounts can
         * be initialized to custom values.
         */
        static intrusive_ptr unsafe_adapt_non_heap_allocated(
                TTarget* raw_ptr,
                size_t expected_decrefs) {
            intrusive_ptr result(raw_ptr, raw::DontIncreaseRefcount{});
            // INT_MAX is impractically huge for a reference count, while
            // being in no danger of overflowing size_t. We actually only need to
            // initialize the refcount to 2 -- we are just doing an unbalanced
            // incref to prevent the non-heap-allocated target from being
            // freed, and we are optimizing that incref by directly
            // initializing the refcounts rather than doing an expensive
            // atomic increment. The reason to use INT_MAX is to accommodate
            // the debug assertions in ~intrusive_ptr_target.
#ifdef NDEBUG
            expected_decrefs = 0;
#endif
            result.target_->refcount_.store(
                    INT_MAX + expected_decrefs, std::memory_order_relaxed);
            result.target_->weakcount_.store(INT_MAX, std::memory_order_relaxed);
            return result;
        }

        /**
         * Turn a **non-owning raw pointer** to an intrusive_ptr.  It is
         * the moral equivalent of enable_shared_from_this on a shared pointer.
         *
         * This method is only valid for objects that are already live.  If
         * you are looking for the moral equivalent of unique_ptr<T>(T*)
         * constructor, see steal_from_new.
         *
         * TODO: https://github.com/pytorch/pytorch/issues/56482
         */
        static intrusive_ptr unsafe_reclaim_from_nonowning(TTarget* raw_ptr) {
            // See Note [Stack allocated intrusive_ptr_target safety]
            TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
                    raw_ptr == NullType::singleton() || raw_ptr->refcount_.load() > 0,
                    "intrusive_ptr: Can only reclaim pointers that are owned by someone");
            auto ptr = reclaim(raw_ptr); // doesn't increase refcount
            ptr.retain_();
            return ptr;
        }
    };

    template <
            class TTarget,
            class NullType = detail::intrusive_target_default_null_type<TTarget>,
            class... Args>
    inline intrusive_ptr<TTarget, NullType> make_intrusive(Args&&... args) {
        return intrusive_ptr<TTarget, NullType>::make(std::forward<Args>(args)...);
    }

    template <class TTarget, class NullType>
    inline void swap(
            intrusive_ptr<TTarget, NullType>& lhs,
            intrusive_ptr<TTarget, NullType>& rhs) noexcept {
        lhs.swap(rhs);
    }

// To allow intrusive_ptr inside std::map or std::set, we need operator<
    template <class TTarget1, class NullType1, class TTarget2, class NullType2>
    inline bool operator<(
            const intrusive_ptr<TTarget1, NullType1>& lhs,
            const intrusive_ptr<TTarget2, NullType2>& rhs) noexcept {
        return lhs.get() < rhs.get();
    }

    template <class TTarget1, class NullType1, class TTarget2, class NullType2>
    inline bool operator==(
            const intrusive_ptr<TTarget1, NullType1>& lhs,
            const intrusive_ptr<TTarget2, NullType2>& rhs) noexcept {
        return lhs.get() == rhs.get();
    }

    template <class TTarget1, class NullType1, class TTarget2, class NullType2>
    inline bool operator!=(
            const intrusive_ptr<TTarget1, NullType1>& lhs,
            const intrusive_ptr<TTarget2, NullType2>& rhs) noexcept {
        return !operator==(lhs, rhs);
    }

    template <typename T>
    struct MaybeOwnedTraits<c10::intrusive_ptr<T>> {
        using owned_type = c10::intrusive_ptr<T>;
        using borrow_type = c10::intrusive_ptr<T>;

        static borrow_type createBorrow(const owned_type& from) {
            return borrow_type::reclaim(from.get());
        }

        static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) {
            lhs.release();
            lhs = borrow_type::reclaim(rhs.get());
        }

        static void destroyBorrow(borrow_type& toDestroy) {
            toDestroy.release();
        }

        static const owned_type& referenceFromBorrow(const borrow_type& borrow) {
            return borrow;
        }

        static const owned_type* pointerFromBorrow(const borrow_type& borrow) {
            return &borrow;
        }

        static bool debugBorrowIsValid(const borrow_type& /*borrow*/) {
            return true;
        }
    };

    template <
            typename TTarget,
            class NullType = detail::intrusive_target_default_null_type<TTarget>>
    class weak_intrusive_ptr final {
    private:
        static_assert(
                std::is_base_of<intrusive_ptr_target, TTarget>::value,
                "intrusive_ptr can only be used for classes that inherit from intrusive_ptr_target.");
#ifndef _WIN32
        // This static_assert triggers on MSVC
        //  error C2131: expression did not evaluate to a constant
        static_assert(
                NullType::singleton() == NullType::singleton(),
                "NullType must have a constexpr singleton() method");
#endif
        static_assert(
                std::is_base_of<
                        TTarget,
                        typename std::remove_pointer<decltype(NullType::singleton())>::type>::
                value,
                "NullType::singleton() must return a element_type* pointer");

        TTarget* target_;

        template <class TTarget2, class NullType2>
        friend class weak_intrusive_ptr;

        void retain_() {
            if (target_ != NullType::singleton()) {
                size_t new_weakcount =
                        detail::atomic_weakcount_increment(target_->weakcount_);
                TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
                        new_weakcount != 1,
                        "weak_intrusive_ptr: Cannot increase weakcount after it reached zero.");
            }
        }

        void reset_() noexcept {
            if (target_ != NullType::singleton() &&
                detail::atomic_weakcount_decrement(target_->weakcount_) == 0) {
                // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDelete)
                delete target_;
            }
            target_ = NullType::singleton();
        }

        constexpr explicit weak_intrusive_ptr(TTarget* target) : target_(target) {}

    public:
        using element_type = TTarget;

        explicit weak_intrusive_ptr(const intrusive_ptr<TTarget, NullType>& ptr)
                : weak_intrusive_ptr(ptr.get()) {
            retain_();
        }

        weak_intrusive_ptr(weak_intrusive_ptr&& rhs) noexcept : target_(rhs.target_) {
            rhs.target_ = NullType::singleton();
        }

        template <class From, class FromNullType>
        /* implicit */ weak_intrusive_ptr(
                weak_intrusive_ptr<From, FromNullType>&& rhs) noexcept
                : target_(
                detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
            static_assert(
                    std::is_convertible<From*, TTarget*>::value,
                    "Type mismatch. weak_intrusive_ptr move constructor got pointer of wrong type.");
            rhs.target_ = FromNullType::singleton();
        }

        weak_intrusive_ptr(const weak_intrusive_ptr& rhs) : target_(rhs.target_) {
            retain_();
        }

        template <class From, class FromNullType>
        /* implicit */ weak_intrusive_ptr(
                const weak_intrusive_ptr<From, FromNullType>& rhs)
                : target_(
                detail::assign_ptr_<TTarget, NullType, FromNullType>(rhs.target_)) {
            static_assert(
                    std::is_convertible<From*, TTarget*>::value,
                    "Type mismatch. weak_intrusive_ptr copy constructor got pointer of wrong type.");
            retain_();
        }

        ~weak_intrusive_ptr() noexcept {
            reset_();
        }

        weak_intrusive_ptr& operator=(weak_intrusive_ptr&& rhs) & noexcept {
            return operator=<TTarget, NullType>(std::move(rhs));
        }

        template <class From, class FromNullType>
        weak_intrusive_ptr& operator=(
                weak_intrusive_ptr<From, FromNullType>&& rhs) & noexcept {
            static_assert(
                    std::is_convertible<From*, TTarget*>::value,
                    "Type mismatch. weak_intrusive_ptr move assignment got pointer of wrong type.");
            weak_intrusive_ptr tmp = std::move(rhs);
            swap(tmp);
            return *this;
        }

        weak_intrusive_ptr& operator=(const weak_intrusive_ptr& rhs) & noexcept {
            return operator=<TTarget, NullType>(rhs);
        }

        weak_intrusive_ptr& operator=(
                const intrusive_ptr<TTarget, NullType>& rhs) & noexcept {
            weak_intrusive_ptr tmp(rhs);
            swap(tmp);
            return *this;
        }

        template <class From, class FromNullType>
        weak_intrusive_ptr& operator=(
                const weak_intrusive_ptr<From, NullType>& rhs) & {
            static_assert(
                    std::is_convertible<From*, TTarget*>::value,
                    "Type mismatch. weak_intrusive_ptr copy assignment got pointer of wrong type.");
            weak_intrusive_ptr tmp = rhs;
            swap(tmp);
            return *this;
        }

        void reset() noexcept {
            reset_();
        }

        void swap(weak_intrusive_ptr& rhs) noexcept {
            TTarget* tmp = target_;
            target_ = rhs.target_;
            rhs.target_ = tmp;
        }

        // NB: This should ONLY be used by the std::hash implementation
        // for weak_intrusive_ptr.  Another way you could do this is
        // friend std::hash<weak_intrusive_ptr>, but this triggers two
        // bugs:
        //
        //  (1) It triggers an nvcc bug, where std::hash in a friend class
        //      declaration gets preprocessed into hash, which then cannot
        //      actually be found.  The error in this case looks like:
        //
        //        error: no template named 'hash'; did you mean 'std::hash'?
        //
        //  (2) On OS X, std::hash is declared as a struct, not a class.
        //      This twings:
        //
        //        error: class 'hash' was previously declared as a struct
        //        [-Werror,-Wmismatched-tags]
        //
        // Both of these are work-aroundable, but on the whole, I decided
        // it would be simpler and easier to make work if we just expose
        // an unsafe getter for target_
        //
        TTarget* _unsafe_get_target() const noexcept {
            return target_;
        }

        size_t use_count() const noexcept {
            if (target_ == NullType::singleton()) {
                return 0;
            }
            return target_->refcount_.load(
                    std::memory_order_acquire); // refcount, not weakcount!
        }

        size_t weak_use_count() const noexcept {
            if (target_ == NullType::singleton()) {
                return 0;
            }
            return target_->weakcount_.load(std::memory_order_acquire);
        }

        bool expired() const noexcept {
            return use_count() == 0;
        }

        intrusive_ptr<TTarget, NullType> lock() const noexcept {
            if (expired()) {
                return intrusive_ptr<TTarget, NullType>();
            } else {
                auto refcount = target_->refcount_.load(std::memory_order_seq_cst);
                do {
                    if (refcount == 0) {
                        // Object already destructed, no strong references left anymore.
                        // Return nullptr.
                        return intrusive_ptr<TTarget, NullType>();
                    }
                } while (
                        !target_->refcount_.compare_exchange_weak(refcount, refcount + 1));
                return intrusive_ptr<TTarget, NullType>(
                        target_, raw::DontIncreaseRefcount{});
            }
        }

        /**
         * Returns an owning (but still only weakly referenced) pointer to the
         * underlying object and makes the weak_intrusive_ptr instance invalid.
         * That means the weakcount is not decreased.
         * You *must* put the returned pointer back into a weak_intrusive_ptr using
         * weak_intrusive_ptr::reclaim(ptr) to properly destruct it.
         * This is helpful for C APIs.
         */
        TTarget* release() noexcept {
            TTarget* result = target_;
            target_ = NullType::singleton();
            return result;
        }

        /**
         * Takes an owning (but must be weakly referenced) pointer to TTarget* and
         * creates a weak_intrusive_ptr that takes over ownership.
         * This means that the weakcount is not increased.
         * This is the counter-part to weak_intrusive_ptr::release() and the pointer
         * passed in *must* have been created using weak_intrusive_ptr::release().
         */
        static weak_intrusive_ptr reclaim(TTarget* owning_weak_ptr) {
            // See Note [Stack allocated intrusive_ptr_target safety]
            // if refcount > 0, weakcount must be >1 for weak references to exist.
            // see weak counting explanation at top of this file.
            // if refcount == 0, weakcount only must be >0.
            TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
                    owning_weak_ptr == NullType::singleton() ||
                    owning_weak_ptr->weakcount_.load() > 1 ||
                    (owning_weak_ptr->refcount_.load() == 0 &&
                     owning_weak_ptr->weakcount_.load() > 0),
                    "weak_intrusive_ptr: Can only weak_intrusive_ptr::reclaim() owning pointers that were created using weak_intrusive_ptr::release().");
            return weak_intrusive_ptr(owning_weak_ptr);
        }

        /**
         * Takes a pointer to TTarget* (may be weak or strong) and creates a
         * new weak_intrusive_ptr representing a new weak reference, i.e.
         * the raw pointer retains ownership.
         */
        static weak_intrusive_ptr reclaim_copy(TTarget* owning_ptr) {
            auto ret = reclaim(owning_ptr);
            ret.retain_();
            return ret;
        }

        template <class TTarget1, class NullType1, class TTarget2, class NullType2>
        friend bool operator<(
                const weak_intrusive_ptr<TTarget1, NullType1>& lhs,
                const weak_intrusive_ptr<TTarget2, NullType2>& rhs) noexcept;
        template <class TTarget1, class NullType1, class TTarget2, class NullType2>
        friend bool operator==(
                const weak_intrusive_ptr<TTarget1, NullType1>& lhs,
                const weak_intrusive_ptr<TTarget2, NullType2>& rhs) noexcept;
    };

    template <class TTarget, class NullType>
    inline void swap(
            weak_intrusive_ptr<TTarget, NullType>& lhs,
            weak_intrusive_ptr<TTarget, NullType>& rhs) noexcept {
        lhs.swap(rhs);
    }

// To allow weak_intrusive_ptr inside std::map or std::set, we need operator<
    template <class TTarget1, class NullType1, class TTarget2, class NullType2>
    inline bool operator<(
            const weak_intrusive_ptr<TTarget1, NullType1>& lhs,
            const weak_intrusive_ptr<TTarget2, NullType2>& rhs) noexcept {
        return lhs.target_ < rhs.target_;
    }

    template <class TTarget1, class NullType1, class TTarget2, class NullType2>
    inline bool operator==(
            const weak_intrusive_ptr<TTarget1, NullType1>& lhs,
            const weak_intrusive_ptr<TTarget2, NullType2>& rhs) noexcept {
        return lhs.target_ == rhs.target_;
    }

    template <class TTarget1, class NullType1, class TTarget2, class NullType2>
    inline bool operator!=(
            const weak_intrusive_ptr<TTarget1, NullType1>& lhs,
            const weak_intrusive_ptr<TTarget2, NullType2>& rhs) noexcept {
        return !operator==(lhs, rhs);
    }

// Alias for documentary purposes, to more easily distinguish
// weak raw intrusive pointers from intrusive pointers.
    using weak_intrusive_ptr_target = intrusive_ptr_target;

// This namespace provides some methods for working with
// raw pointers that subclass intrusive_ptr_target.  They are not provided
// as methods on intrusive_ptr_target, because ideally you would not need these
// methods at all (use smart pointers), but if you are dealing with legacy code
// that still needs to pass around raw pointers, you may find these quite
// useful.
//
// An important usage note: some functions are only valid if you have a
// strong raw pointer to the object, while others are only valid if you
// have a weak raw pointer to the object.  ONLY call intrusive_ptr namespace
// functions on strong pointers, and weak_intrusive_ptr namespace functions
// on weak pointers.  If you mix it up, you may get an assert failure.
    namespace raw {

        namespace intrusive_ptr {

// WARNING: Unlike the reclaim() API, it is NOT valid to pass
// NullType::singleton to this function
            inline void incref(intrusive_ptr_target* self) {
                if (self) {
                    detail::atomic_refcount_increment(self->refcount_);
                }
            }

// WARNING: Unlike the reclaim() API, it is NOT valid to pass
// NullType::singleton to this function
            inline void decref(intrusive_ptr_target* self) {
                // Let it die
                c10::intrusive_ptr<intrusive_ptr_target>::reclaim(self);
                // NB: Caller still has 'self' pointer, but it's now invalid.
                // If you want more safety, used the actual c10::intrusive_ptr class
            }

            template <typename T>
            inline T* make_weak(T* self) {
                // NB: 'this' is a strong pointer, but we return a weak pointer
                auto ptr = c10::intrusive_ptr<T>::reclaim(self);
                c10::weak_intrusive_ptr<T> wptr(ptr);
                ptr.release();
                return wptr.release();
            }

            inline size_t use_count(intrusive_ptr_target* self) {
                auto ptr = c10::intrusive_ptr<intrusive_ptr_target>::reclaim(self);
                auto r = ptr.use_count();
                ptr.release();
                return r;
            }

        } // namespace intrusive_ptr

        namespace weak_intrusive_ptr {

            inline void incref(weak_intrusive_ptr_target* self) {
                detail::atomic_weakcount_increment(self->weakcount_);
            }

            inline void decref(weak_intrusive_ptr_target* self) {
                // Let it die
                c10::weak_intrusive_ptr<intrusive_ptr_target>::reclaim(self);
                // NB: You still "have" the 'self' pointer, but it's now invalid.
                // If you want more safety, used the actual c10::weak_intrusive_ptr class
            }

            template <typename T>
            inline T* lock(T* self) {
                auto wptr = c10::weak_intrusive_ptr<T>::reclaim(self);
                auto ptr = wptr.lock();
                wptr.release();
                return ptr.release();
            }

// This gives the STRONG refcount of a WEAK pointer
            inline size_t use_count(weak_intrusive_ptr_target* self) {
                auto wptr = c10::weak_intrusive_ptr<intrusive_ptr_target>::reclaim(self);
                auto r = wptr.use_count();
                wptr.release();
                return r;
            }

        } // namespace weak_intrusive_ptr

    } // namespace raw

} // namespace c10

namespace std {
// To allow intrusive_ptr and weak_intrusive_ptr inside std::unordered_map or
// std::unordered_set, we need std::hash
    template <class TTarget, class NullType>
    struct hash<c10::intrusive_ptr<TTarget, NullType>> {
        size_t operator()(const c10::intrusive_ptr<TTarget, NullType>& x) const {
            return std::hash<TTarget*>()(x.get());
        }
    };
    template <class TTarget, class NullType>
    struct hash<c10::weak_intrusive_ptr<TTarget, NullType>> {
        size_t operator()(const c10::weak_intrusive_ptr<TTarget, NullType>& x) const {
            return std::hash<TTarget*>()(x._unsafe_get_target());
        }
    };
} // namespace std

namespace c10 {
        struct C10_API UndefinedTensorImpl final : public TensorImpl {
            public:
            // Without this, we get:
            //  error: identifier "at::UndefinedTensorImpl::_singleton" is undefined in
            //  device code
            // (ostensibly because the constexpr tricks MSVC into trying to compile this
            // function for device as well).
#ifdef _WIN32
        static inline TensorImpl* singleton() {
                return &_singleton;
        }
#else
        static constexpr inline TensorImpl* singleton() {
            return &_singleton;
        }
#endif
        IntArrayRef strides() const override;
        int64_t size(int64_t d) const override;
        int64_t stride(int64_t d) const override;
#ifdef DEBUG
        bool has_storage() const override;
#endif
        void set_storage_offset(int64_t offset) override;

        private:
            UndefinedTensorImpl();
            static UndefinedTensorImpl _singleton;
            const char* tensorimpl_type_name() const override;
        };
};

namespace c10 {

    struct TORCH_API IValue final {
        IValue(const IValue& rhs)
                : IValue(rhs.payload, rhs.tag, rhs.is_intrusive_ptr) {
            if (is_intrusive_ptr && payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
                c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr);
            }
        }

        IValue(IValue&& rhs) noexcept : tag(rhs.tag), is_intrusive_ptr(rhs.is_intrusive_ptr) {
            moveFrom(std::move(rhs));
        }


        ~IValue() {
            destroy();
        }

        C10_ALWAYS_INLINE IValue& operator=(IValue&& rhs) & noexcept {
            if (&rhs == this) {
                return *this;
            }

            destroy();
            moveFrom(std::move(rhs));
            return *this;
        }

        IValue& operator=(IValue const& rhs) & {
            *this = IValue(rhs);
            return *this;
        }

        void dump() const;


        IValue equals(const IValue& rhs) const;

        TORCH_API friend bool operator==(const IValue& lhs, const IValue& rhs);
        TORCH_API friend bool operator!=(const IValue& lhs, const IValue& rhs);

        bool is(const IValue& rhs) const;

        IValue hash() const {
            return (int64_t)IValue::hash(*this);
        }

        static size_t hash(const IValue& iv);


        TORCH_API friend bool _fastEqualsForContainer(
                const IValue& lhs,
                const IValue& rhs);

    private:
        static bool isAliasOf(const at::Tensor& a, const at::Tensor& b) {
            if (a.is_sparse()) {
                return isAliasOf(a._values(), b) || isAliasOf(a._indices(), b);
            }
            if (b.is_sparse()) {
                return isAliasOf(a, b._values()) || isAliasOf(a, b._indices());
            }
            if (a.is_sparse_csr()) {
                return isAliasOf(a.values(), b) ||
                       isAliasOf(a.crow_indices(), b) ||
                       isAliasOf(a.col_indices(), b);
            }
            if (b.is_sparse_csr()) {
                return isAliasOf(a, b.values()) ||
                       isAliasOf(a, b.crow_indices()) ||
                       isAliasOf(a, b.col_indices());
            }

            // Opaque tensors such as the ones constructed by the MKL-DNN backend
            // don't have storage so we just compare their TensorImpls.
            // TODO: Find way to expose alias info for opaque tensors.
            if (!a.has_storage() || !b.has_storage()) {
                return a.unsafeGetTensorImpl() == b.unsafeGetTensorImpl();
            }

            return a.is_alias_of(b);
        }

        template <typename T>
        bool isListOf() const;

    public:
        /// @private [doxygen private]
        bool isAliasOf(const IValue& rhs) const {
            if (this->tag != rhs.tag) {
                // Trivially don't alias if the type is different
                return false;
            }

            // Tensors should be compared based on internal storage
            if (this->isTensor()) {
                return isAliasOf(this->toTensor(), rhs.toTensor());
            }

            if (!this->is_intrusive_ptr) {
                // Primitive types don't alias anything
                return false;
            }

            AT_ASSERT(rhs.is_intrusive_ptr);

            // Other types can be compared by their ptr value
            return this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr;
        }

        /// @private [doxygen private]
        size_t use_count() const noexcept {
            if (isTensor()) {
                return payload.as_tensor.use_count();
            }

            if (!is_intrusive_ptr) {
                return 1;
            }

            if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) {
                return 0;
            }
            return c10::raw::intrusive_ptr::use_count(payload.u.as_intrusive_ptr);
        }

        /// @private [doxygen private]
        void swap(IValue& rhs) noexcept {
            if (isTensor() && rhs.isTensor()) {
                std::swap(payload.as_tensor, rhs.payload.as_tensor);
            } else if (isTensor()) {
                at::Tensor t = std::move(payload.as_tensor);

                payload.u = rhs.payload.u;
                new (&rhs.payload.as_tensor) at::Tensor(std::move(t));
            } else if (rhs.isTensor()) {
                rhs.swap(*this);
                return;
            } else {
                std::swap(payload.u, rhs.payload.u);
            }
                std::swap(is_intrusive_ptr, rhs.is_intrusive_ptr);
                std::swap(tag, rhs.tag);
        }

        IValue(at::TensorBase t) : tag(Tag::Tensor), is_intrusive_ptr(false) {
            new (&payload.as_tensor) at::Tensor(std::move(t));
        }
        bool isTensor() const {
            return Tag::Tensor == tag;
        }

    private:
        // Outlined error path so that toTensor() can be inlined.
        [[noreturn]] void reportToTensorTypeError() const;

    public:
        at::Tensor toTensor() &&;
        at::Tensor& toTensor() &;
        const at::Tensor& toTensor() const&;
        at::TensorImpl* unsafeToTensorImpl() const {
            return payload.as_tensor.unsafeGetTensorImpl();
        }

        IValue(at::Storage s) : tag(Tag::Storage), is_intrusive_ptr(static_cast<bool>(s)) {
            // Note: the undefined tensor is not refcounted, so while it
            // is tagged as a tensor, is_intrusive_ptr is set to false.
            // This is not an optional optimization: our incref call
            // *will not* do the right thing when called on an
            // undefined tensor.
            payload.u.as_intrusive_ptr = null_to_undefined_tensor(s.unsafeReleaseStorageImpl());
        }
        bool isStorage() const {
            return Tag::Storage == tag;
        }
        c10::Storage toStorage() &&;
        c10::Storage toStorage() const&;

        const IValue& toIValue() const {
            return *this;
        }
        IValue& toIValue() {
            return *this;
        }

        /// @private [doxygen private]
        IValue(intrusive_ptr<caffe2::Blob> blob)
        : tag(Tag::Blob), is_intrusive_ptr(true) {
            // TODO (after Tensor merge) If we pass in a Blob holding a Tensor, extract
            // and store it as a Tensor instead.
            payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release());
        }

        /// @private [doxygen private]
        bool isBlob() const {
            return Tag::Blob == tag;
        }

        /// @private [doxygen private]
        c10::intrusive_ptr<caffe2::Blob> toBlob() &&;

        /// @private [doxygen private]
        c10::intrusive_ptr<caffe2::Blob> toBlob() const&;

        // Capsule. No new callsites of these APIs should
        // be introduced.
        static inline IValue make_capsule(
                intrusive_ptr<torch::CustomClassHolder> blob);
        bool isCapsule() const {
            return Tag::Capsule == tag;
        }
        c10::intrusive_ptr<torch::CustomClassHolder> toCapsule() &&;
        c10::intrusive_ptr<torch::CustomClassHolder> toCapsule() const&;

        // Custom C++ classes
        template <
                typename T,
                std::enable_if_t<
                        std::is_base_of<torch::CustomClassHolder, T>::value,
                        int> = 0>
        IValue(intrusive_ptr<T> custom_class);
        bool isCustomClass() const;
        template <typename T>
        c10::intrusive_ptr<T> toCustomClass() &&;
        template <typename T>
        c10::intrusive_ptr<T> toCustomClass() const&;

        // Tuple
        IValue(c10::intrusive_ptr<ivalue::Tuple> v);

        template <
                typename... Args,
                std::enable_if_t<
                        !guts::disjunction<
                        std::is_lvalue_reference<Args>...,
                        guts::negation<std::is_constructible<IValue, Args>>...>::value,
        std::nullptr_t> = nullptr>
        IValue(const std::tuple<Args...>& t);
        template <
                typename... Args,
                std::enable_if_t<
                        !guts::disjunction<
                        std::is_lvalue_reference<Args>...,
                        guts::negation<std::is_constructible<IValue, Args>>...>::value,
        std::nullptr_t> = nullptr>
        IValue(std::tuple<Args...>&& t);
        bool isTuple() const {
            return Tag::Tuple == tag;
        }
        c10::intrusive_ptr<ivalue::Tuple> toTuple() &&;
        c10::intrusive_ptr<ivalue::Tuple> toTuple() const&;
        C10_NODISCARD ivalue::Tuple& toTupleRef() const;

        // Double
        IValue(double d) : tag(Tag::Double), is_intrusive_ptr(false) {
            payload.u.as_double = d;
        }
        bool isDouble() const {
            return Tag::Double == tag;
        }
        double toDouble() const {
            AT_ASSERT(isDouble());
            return payload.u.as_double;
        }

        // ComplexDouble
        template <typename T>
        IValue(c10::complex<T> c);
        bool isComplexDouble() const { return Tag::ComplexDouble == tag; }
        c10::complex<double> toComplexDouble() const;

        // Future
        IValue(c10::intrusive_ptr<ivalue::Future> v);
        bool isFuture() const {
            return Tag::Future == tag;
        }
        c10::intrusive_ptr<ivalue::Future> toFuture() &&;
        c10::intrusive_ptr<ivalue::Future> toFuture() const&;

        // RRef
        IValue(c10::intrusive_ptr<c10::RRefInterface> v);
        bool isRRef() const {
            return Tag::RRef == tag;
        }
        c10::intrusive_ptr<c10::RRefInterface> toRRef() &&;
        c10::intrusive_ptr<c10::RRefInterface> toRRef() const&;

        // Quantizer
        IValue(c10::intrusive_ptr<at::Quantizer> v);
        bool isQuantizer() const {
            return Tag::Quantizer == tag;
        }
        c10::intrusive_ptr<at::Quantizer> toQuantizer() &&;
        c10::intrusive_ptr<at::Quantizer> toQuantizer() const&;

        // Int
        IValue(int64_t i) : tag(Tag::Int), is_intrusive_ptr(false) {
            payload.u.as_int = i;
        }

        // allow you to pass literals (3, 4) without ambiguity
        IValue(int32_t i) : IValue(static_cast<int64_t>(i)) {}

        bool isInt() const {
            return Tag::Int == tag;
        }

        int64_t toInt() const {
            AT_ASSERT(isInt());
            return payload.u.as_int;
        }

        // Bool
        IValue(bool b) : tag(Tag::Bool), is_intrusive_ptr(false) {
        #if defined(__clang__) && defined(__x86_64__)
            // Initializing entire payload stops valgrind's from reporting
            // "jump or move depends on uninitialised value" in IValue copy constructor
            // See https://github.com/pytorch/pytorch/issues/37117
            payload.u.as_int = b;
        #else
            payload.u.as_bool = b;
        #endif
        }
        bool isBool() const {
            return Tag::Bool == tag;
        }
        bool toBool() const {
            AT_ASSERT(isBool());
            return payload.u.as_bool;
        }

        // IntList
        bool isIntList() const;
        c10::List<int64_t> toIntList() &&;
        c10::List<int64_t> toIntList() const&;
        std::vector<int64_t> toIntVector() const;
        at::DimVector toDimVector() const;

        // ConstantString
        IValue(c10::intrusive_ptr<ivalue::ConstantString> v);
        IValue(std::string v);
        IValue(const char* v) : IValue(std::string(v)) {}
        IValue(c10::string_view v) : IValue(std::string(v)) {};
        bool isString() const {
            return Tag::String == tag;
        }
        c10::intrusive_ptr<ivalue::ConstantString> toString() &&;
        c10::intrusive_ptr<ivalue::ConstantString> toString() const&;
        const std::string& toStringRef() const;
        c10::optional<std::reference_wrapper<const std::string>> toOptionalStringRef()
        const;
        c10::string_view toStringView() const;

        // DoubleList
        bool isDoubleList() const;
        c10::List<double> toDoubleList() &&;
        c10::List<double> toDoubleList() const&;
        std::vector<double> toDoubleVector() const;

        // ComplexDoubleList
        bool isComplexDoubleList() const;
        c10::List<c10::complex<double>> toComplexDoubleList() &&;
        c10::List<c10::complex<double>> toComplexDoubleList() const&;
        std::vector<c10::complex<double>> toComplexDoubleVector() const;

        // BoolList
        bool isBoolList() const;
        c10::List<bool> toBoolList() &&;
        c10::List<bool> toBoolList() const&;

        // TensorList
        bool isTensorList() const;
        c10::List<at::Tensor> toTensorList() &&;
        c10::List<at::Tensor> toTensorList() const&;
        std::vector<at::Tensor> toTensorVector() const;

        // GenericList
        IValue(c10::List<IValue> v);
        bool isList() const {
            return Tag::GenericList == tag;
        }
        c10::List<IValue> toList() &&;
        c10::List<IValue> toList() const&;
        c10::ArrayRef<IValue> toListRef() const;

        // Some template constructors of IValue calls another constructor recursively.
        // This SNIFAEs the called constructor exists.
        template <class T>
        using enable_if_ivalue_constructible =
        std::enable_if_t<std::is_constructible<IValue, T>::value, std::nullptr_t>;

        template <class T, enable_if_ivalue_constructible<T> = nullptr>
        IValue(c10::List<T>&& v);
        template <class T, enable_if_ivalue_constructible<T> = nullptr>
        IValue(const c10::List<T>& v);
        template <class T, enable_if_ivalue_constructible<T> = nullptr>
        IValue(at::ArrayRef<T> v);
        template <class T, enable_if_ivalue_constructible<T> = nullptr>
        IValue(const std::vector<T>& v);
        template <class T, size_t N>
        IValue(std::array<T, N> v);

        // GenericDict
        IValue(c10::Dict<IValue, IValue> v);
        bool isGenericDict() const {
            return Tag::GenericDict == tag;
        }
        c10::Dict<IValue, IValue> toGenericDict() &&;
        c10::Dict<IValue, IValue> toGenericDict() const&;

        template <class Key, class Value>
        IValue(c10::Dict<Key, Value> v);

        template <class Key, class Value>
        /// \cond
        /// DOXYGEN_CANNOT_HANDLE_CONSTRUCTORS_WITH_MACROS_SO_EXCLUDE_THIS_LINE_FROM_DOXYGEN
        C10_DEPRECATED_MESSAGE(
        "IValues based on std::unordered_map<K, V> are slow and deprecated. Please use c10::Dict<K, V> instead.")
        /// \endcond
        IValue(std::unordered_map<Key, Value> v);

        template <class T, enable_if_ivalue_constructible<T> = nullptr>
        IValue(c10::optional<T> v);
        IValue(c10::nullopt_t);

        // ClassType
        IValue(c10::intrusive_ptr<ivalue::Object> v);
        bool isObject() const {
            return tag == Tag::Object;
        }
        c10::intrusive_ptr<ivalue::Object> toObject() &&;
        c10::intrusive_ptr<ivalue::Object> toObject() const&;
        ivalue::Object& toObjectRef() const;

        torch::jit::Module toModule() const;
        bool isModule() const;

        // PyObject
        IValue(c10::intrusive_ptr<ivalue::PyObjectHolder> v);
        bool isPyObject() const {
            return tag == Tag::PyObject;
        }
        c10::intrusive_ptr<ivalue::PyObjectHolder> toPyObjectHolder() &&;
        c10::intrusive_ptr<ivalue::PyObjectHolder> toPyObjectHolder() const&;
        PyObject* toPyObject() const;

        // Enum
        explicit IValue(c10::intrusive_ptr<ivalue::EnumHolder> v);
        bool isEnum() const {
            return tag == Tag::Enum;
        }
        c10::intrusive_ptr<ivalue::EnumHolder> toEnumHolder() &&;
        c10::intrusive_ptr<ivalue::EnumHolder> toEnumHolder() const&;

        // None
        IValue() : tag(Tag::None), is_intrusive_ptr(false) {}
        bool isNone() const {
            return Tag::None == tag;
        }
        std::string toNone() const {
            AT_ASSERT(isNone());
            return "None";
        }

        static IValue uninitialized() {
            auto i = IValue();
            i.tag = Tag::Uninitialized;
            return i;
        }

        // Scalar, which gets encoded as either an Int, a Double or a ComplexDouble
        IValue(const at::Scalar& s) : IValue() {
            if (s.isFloatingPoint()) {
                *this = s.toDouble();
            } else if (s.isComplex()) {
                *this = s.toComplexDouble();
            } else if (s.isBoolean()) {
                *this = s.toBool();
            } else if (s.isIntegral(false)) {
                *this = s.toLong();
            } else {
                TORCH_CHECK(false, "Unknown type in Scalar");
            }
        }

        bool isScalar() const {
            return isDouble() || isInt() || isComplexDouble() || isBool();
        }

        at::Scalar toScalar() const {
            if (isDouble())
                return toDouble();
            else if (isInt())
                return toInt();
            else if (isComplexDouble())
                return toComplexDouble();
            else if (isBool())
                return toBool();
            throw std::runtime_error("IValue is not a Scalar");
        }

        // Device
        IValue(c10::Device d) : tag(Tag::Device), is_intrusive_ptr(false) {
            payload.u.as_device.type = d.type();
            payload.u.as_device.index = d.index();
        }
        bool isDevice() const {
            return Tag::Device == tag;
        }
        c10::Device toDevice() const {
            AT_ASSERT(isDevice());
            return c10::Device(payload.u.as_device.type, payload.u.as_device.index);
        }

        //Stream
        IValue(c10::Stream stream)
        : tag(Tag::Stream), is_intrusive_ptr(false) {
            payload.u.as_int = stream.pack();
        }
        c10::Stream toStream() &&;
        c10::Stream toStream() const &;
        bool isStream() const { return Tag::Stream == tag; }

        // ScalarType
        IValue(ScalarType t)
        : IValue(static_cast<std::underlying_type<ScalarType>::type>(t)) {}
        at::ScalarType toScalarType() const {
            return static_cast<at::ScalarType>(toInt());
        }

        // Layout
        IValue(Layout l)
        : IValue(static_cast<std::underlying_type<Layout>::type>(l)) {}
        at::Layout toLayout() const {
            return static_cast<at::Layout>(toInt());
        }

        // MemoryFormat
        IValue(MemoryFormat m)
        : IValue(static_cast<std::underlying_type<MemoryFormat>::type>(m)) {}
        at::MemoryFormat toMemoryFormat() const {
            return static_cast<at::MemoryFormat>(toInt());
        }

        // QScheme
        IValue(at::QScheme qscheme) : tag(Tag::Int), is_intrusive_ptr(false) {
            payload.u.as_int = static_cast<int64_t>(qscheme);
        }

        at::QScheme toQScheme() const {
            return static_cast<at::QScheme>(toInt());
        }

        // Dimname
        IValue(at::Dimname dimname) : IValue(dimname.symbol().toQualString()) {}

        at::Dimname toDimname() const {
            return at::Dimname::fromSymbol(Symbol::fromQualString(toStringRef()));
        }

        // Generator
        IValue(at::Generator g) : tag(Tag::Generator), is_intrusive_ptr(g.defined()) {
            // Note: the undefined generator is not refcounted, so while it
            // is tagged as a generator, is_intrusive_ptr is set to false.
            // This is not an optional optimization: our incref call
            // *will not* do the right thing when called on an
            // undefined generator.
            payload.u.as_intrusive_ptr = null_to_undefined_tensor(g.unsafeReleaseGeneratorImpl());
        }
        bool isGenerator() const {
            return Tag::Generator == tag;
        }
        at::Generator toGenerator() &&;
        at::Generator toGenerator() const&;

        // for debugging
        std::string tagKind() const {
            switch (tag) {
        #define DEFINE_CASE(x) \
          case Tag::x:         \
            return #x;
                TORCH_FORALL_TAGS(DEFINE_CASE)
        #undef DEFINE_CASE
            }
            return "InvalidTag(" + c10::guts::to_string(static_cast<int>(tag)) + ")";
        }

        // generic v.to<at::Tensor>() implementations
        // that can be used in special functions like pop/push
        // that use template meta-programming.
        // prefer the directly named methods when you can,
        // since they are simpler to understand

        // Note: if you get linker errors saying one of these is missing,
        // change it to ... && = delete; and you will see better error messages for
        // why However, we cannot commit this because some compiler versions barf on
        // it.
        template <typename T>
        T to() &&;
        template <typename T>
        typename c10::detail::ivalue_to_const_ref_overload_return<T>::type to() const&;

        // ToOptional: convert a IValue to the Optional obj that accepts both T and
        // None
        template <typename T>
        optional<T> toOptional();
        template <typename T>
        optional<T> toOptional() const;

        /// @private [doxygen private]
        /// this is a shallow comparison of two IValues to test the object identity
        bool isSameIdentity(const IValue& rhs) const;

        // Computes the "official" string representation of an IValue. This produces a
        // TorchScript expression that can be used to recreate an IValue with the same
        // value (e.g. when we are printing constants in the serializer).
        //
        // Callers can use `customFormatter` to override how `repr()` prints out an
        // IValue. This is useful if you have some other environment where you can
        // look up values, and you want to print a reference to that environment (like
        // the serializer's constant table).
        //
        // repr() is not necessarily defined on all objects!
        std::ostream& repr(
                std::ostream& stream,
                std::function<bool(std::ostream&, const IValue& v)> customFormatter)
        const;

        // Computes an "informal" string representation of an IValue. This should be
        // used for debugging, or servicing `print()`-like functions.
        // This is different from `repr()` in that there is no expectation that we can
        // exactly reconstruct an IValue from the output; feel free to use a
        // concise/pretty form
        TORCH_API friend std::ostream& operator<<(
                std::ostream& out,
        const IValue& v);

        bool isPtrType() const {
            return (isTensor() && payload.as_tensor.defined()) || is_intrusive_ptr;
        }

        /// @private [doxygen private]
        const void* internalToPointer() const {
            TORCH_INTERNAL_ASSERT(
                    isPtrType(), "Can only call internalToPointer() for pointer types");
            if (isTensor()) {
                return payload.as_tensor.unsafeGetTensorImpl();
            } else {
                return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()
                       ? payload.u.as_intrusive_ptr : nullptr;
            }
        }

        template <typename T = c10::PlatformType>
        TypePtr type() const;

        // Detect aliased tensors.
        struct HashAliasedIValue {
            size_t hashTensor(const at::Tensor& ten) const {
                if (ten.is_sparse()) {
                    // COO sparse tensors have a "values" tensor and an "indices" tensor
                    // so this will detect overlap of sparse tensors that share a values
                    // tensor, but not sparse tensors that share an indices tensor.
                    return hashTensor(ten._values());
                } else if (ten.is_sparse_csr()) {
                    // COO sparse tensors have a "values" tensor and an "indices" tensor
                    // so this will detect overlap of sparse tensors that share a values
                    // tensor, but not sparse tensors that share an indices tensor.
                    return hashTensor(ten.values());
                }  else if (!ten.has_storage()) {
                    // Opaque tensors such as the ones constructed by the MKL-DNN backend
                    // don't have storage so we just use their TensorImpls.
                    // TODO: Find way to expose alias info for opaque tensors.
                    return reinterpret_cast<size_t>(ten.unsafeGetTensorImpl());
                } else {
                    return reinterpret_cast<size_t>(
                            ten.storage().unsafeGetStorageImpl());
                }
            }
            size_t operator()(const IValue& val) const {
                if (val.isTensor()) {
                    return hashTensor(val.toTensor());
                }
                // If it is not a Tensor, then two mutable IValues alias each other only
                // if they are the same pointer.
                return val.payload.u.as_int;
            }
        };

        struct CompAliasedIValues {
            bool operator()(const IValue& lhs, const IValue& rhs) const {
                return lhs.isAliasOf(rhs);
            }
        };

        using HashAliasedIValues =
        std::unordered_set<IValue, HashAliasedIValue, CompAliasedIValues>;
        using HashAliasedIValueMap =
        std::unordered_map<IValue, IValue, HashAliasedIValue, CompAliasedIValues>;

        // Chechs if this and rhs has a subvalues in common.
        // [t1,t2] and [t2, t3] returns true.
        bool overlaps(const IValue& rhs) const;

        // Inserts all subvalues of this in subValues.
        void getSubValues(HashAliasedIValues& subValues) const;

        // Apply visitor to every subvalue.
        // TODO: There are several places that recurse over IValue. This is fragile.
        // This visitor should be used to recurse over ivalues.
        void visit(const std::function<bool(const IValue&)>& visitor) const;
        IValue deepcopy() const;
        IValue deepcopy(HashAliasedIValueMap& memo) const;

    private:
        static c10::intrusive_ptr_target* null_to_undefined_tensor(c10::intrusive_ptr_target* p) {
            return p ? p : static_cast<c10::intrusive_ptr_target*>(c10::UndefinedTensorImpl::singleton());
        }

        static bool ptrEqual(const IValue& lhs, const IValue& rhs);
        // NOTE: IValue tags are intentionally private. In the future we may encode
        // this value different (e.g. using NaN boxing), and this would make it more
        // costly to determine the tag for all types vs just determining if something
        // is a particular type. Instead we want clients to use the `isX` methods when
        // possible. If for perf. reasons you really, absolutely, must have a jump
        // table, then we can revisit this.
        enum class Tag : uint32_t {
        #define DEFINE_TAG(x) x,
            TORCH_FORALL_TAGS(DEFINE_TAG)
        #undef DEFINE_TAG
        };

        template <
                class T,
                class NullType = c10::detail::intrusive_target_default_null_type<T>>
        c10::intrusive_ptr<T, NullType> moveToIntrusivePtr();
        template <
                typename T,
                class NullType = c10::detail::intrusive_target_default_null_type<T>>
        c10::intrusive_ptr<T, NullType> toIntrusivePtr() const;

        void destroy() {
            // We carefully construct this call to both 1) avoid UB by using
            // the "wrong" one of as_tensor and as_intrusive_ptr and 2) enable
            // the compiler to generate the same code for each case. It is
            // surprisingly difficult to get this right.
            if (isTensor() || is_intrusive_ptr) {
                c10::intrusive_ptr_target* p = isTensor() ? payload.as_tensor.unsafeGetTensorImpl() : payload.u.as_intrusive_ptr;
                c10::intrusive_ptr<intrusive_ptr_target, c10::UndefinedTensorImpl>::reclaim(p);
                // No need to make this destructor call!
                // payload.as_tensor.~Tensor();
            }
        }

        C10_ALWAYS_INLINE void moveFrom(IValue&& rhs) noexcept {
            if (rhs.isTensor()) {
                new (&payload.as_tensor) at::Tensor(std::move(rhs.payload.as_tensor));
                // As far as I can tell, omitting the usual explicit destructor call
                // is not UB in and of itself, and it's a slight perf win. The
                // destructor is a no-op, because the moved-from Tensor is
                // effectively an intrusive_ptr in the null state, so we don't need
                // the behavior for correctness reasons either. Leaving this
                // explanatory comment, including commented-out destructor call, to
                // make this abundantly clear.
                //
                // rhs.payload.as_tensor.~Tensor();
            } else {
                payload.u = rhs.payload.u;
            }
            tag = rhs.tag;
            is_intrusive_ptr = rhs.is_intrusive_ptr;
            rhs.clearToNone();
        }

        void clearToNone() noexcept {
            payload.u.as_int = 0;
            tag = Tag::None;
            is_intrusive_ptr = false;
        }

        union Payload {
            // We use a nested union here so that we can make the copy easy
            // and efficient in the non-tensor (i.e., trivially copyable)
            // case. Specifically, we do not have to do a switch-on-tag to
            // figure out which union member to assign; we can just use
            // TriviallyCopyablePayload::operator=.
            union TriviallyCopyablePayload {
                TriviallyCopyablePayload() : as_int(0) {}
                int64_t as_int;
                double as_double;
                bool as_bool;
                // Invariant: never nullptr; null state is represented as
                // c10::UndefinedTensorImpl::singleton() for consistency of
                // representation with Tensor.
                c10::intrusive_ptr_target* as_intrusive_ptr;
                struct {
                    DeviceType type;
                    DeviceIndex index;
                } as_device;
            } u;
            at::Tensor as_tensor;
            Payload() : u() {}
            ~Payload() {}
        };

        IValue(const Payload& p, Tag t, bool i) : tag(t), is_intrusive_ptr(i) {
            if (isTensor()) {
                new (&payload.as_tensor) at::Tensor(p.as_tensor);
            } else {
                payload.u = p.u;
            }
        }

        template <typename T>
        struct TagType {};

        friend MaybeOwnedTraits<IValue>;

        Payload payload;
        Tag tag;
        bool is_intrusive_ptr;
        friend struct WeakIValue;
    };

    struct TORCH_API WeakIValue final {
        WeakIValue() : tag(IValue::Tag::None), is_intrusive_ptr(false) {}

        WeakIValue(const WeakIValue& rhs)
                : payload(rhs.payload),
                  tag(rhs.tag),
                  is_intrusive_ptr(rhs.is_intrusive_ptr) {
            if (is_intrusive_ptr && payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
                c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr);
            }
        }
        WeakIValue(const IValue& rhs)
                : tag(rhs.tag),
                  is_intrusive_ptr(rhs.is_intrusive_ptr) {
            if (rhs.isTensor()) {
                payload.as_intrusive_ptr = rhs.unsafeToTensorImpl();
                is_intrusive_ptr = true;
            } else {
                payload = rhs.payload.u;
            }
            if (is_intrusive_ptr) {
                if (payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
                    c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr);
                }
            }
        }
        WeakIValue(WeakIValue&& rhs) noexcept : WeakIValue() {
            swap(rhs);
        }
        ~WeakIValue() {
            if (is_intrusive_ptr && payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) {
                c10::raw::weak_intrusive_ptr::decref(payload.as_intrusive_ptr);
            }
        }
        WeakIValue& operator=(WeakIValue&& rhs) & noexcept {
            WeakIValue(std::move(rhs)).swap(*this); // this also sets rhs to None
            return *this;
        }
        WeakIValue& operator=(WeakIValue const& rhs) & {
            WeakIValue(rhs).swap(*this);
            return *this;
        }
        void swap(WeakIValue& rhs) noexcept {
            std::swap(payload, rhs.payload);
            std::swap(is_intrusive_ptr, rhs.is_intrusive_ptr);
            std::swap(tag, rhs.tag);
        }

        bool isSameIdentity(const WeakIValue& rhs) const {
            return payload.as_int == rhs.payload.as_int && tag == rhs.tag &&
                   is_intrusive_ptr == rhs.is_intrusive_ptr;
        }

        IValue lock() const {
            if (!is_intrusive_ptr) {
                IValue::Payload newPayload;
                newPayload.u = payload;
                return IValue(newPayload, tag, false);
            }
            if (IValue::Tag::Tensor == tag) {
                auto temp = c10::weak_intrusive_ptr<at::TensorImpl, c10::UndefinedTensorImpl>::reclaim(
                        static_cast<at::TensorImpl*>(payload.as_intrusive_ptr));
                c10::intrusive_ptr<at::TensorImpl, c10::UndefinedTensorImpl> ip(temp.lock());
                temp.release();
                if (!ip) {
                    return IValue();
                } else {
                    return IValue(at::Tensor(std::move(ip)));
                }
            } else {
                auto temp = c10::weak_intrusive_ptr<c10::intrusive_ptr_target>::reclaim(
                        payload.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()
                        ? nullptr
                        : payload.as_intrusive_ptr);
                IValue::Payload pl;
                pl.u.as_intrusive_ptr = temp.lock().release();
                temp.release();
                if (!pl.u.as_intrusive_ptr) {
                    return IValue();
                } else {
                    return IValue(pl, tag, true);
                }
            }
        }

        size_t use_count() const noexcept {
            if (!is_intrusive_ptr) {
                return 1;
            }
            auto temp = c10::weak_intrusive_ptr<c10::intrusive_ptr_target, c10::UndefinedTensorImpl>::reclaim(
                    payload.as_intrusive_ptr);
            size_t result = temp.use_count();
            temp.release();
            return result;
        }

        size_t weak_use_count() const noexcept {
            if (!is_intrusive_ptr) {
                return 1;
            }
            auto temp = c10::weak_intrusive_ptr<c10::intrusive_ptr_target, c10::UndefinedTensorImpl>::reclaim(
                    payload.as_intrusive_ptr);
            size_t result = temp.weak_use_count();
            temp.release();
            return result;
        }
        size_t hash() const {
            return payload.as_int;
        }

        private:
            using Payload = IValue::Payload::TriviallyCopyablePayload;
            Payload payload;
            IValue::Tag tag;
            bool is_intrusive_ptr;
    };

};

#endif //GENERAL_OCR_OBJECT_H
