// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
#error This header must be included via macro-assembler.h
#endif

#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_

#include <vector>

#include "src/arm64/assembler-arm64.h"
#include "src/bailout-reason.h"
#include "src/base/bits.h"
#include "src/globals.h"

// Simulator specific helpers.
#if USE_SIMULATOR
// TODO(all): If possible automatically prepend an indicator like
// UNIMPLEMENTED or LOCATION.
#define ASM_UNIMPLEMENTED(message) \
    __ Debug(message, __LINE__, NO_PARAM)
#define ASM_UNIMPLEMENTED_BREAK(message) \
    __ Debug(message, __LINE__,          \
        FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
#if DEBUG
#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
#define ASM_LOCATION_IN_ASSEMBLER(message) \
    Debug("LOCATION: " message, __LINE__, NO_PARAM)
#else
#define ASM_LOCATION(message)
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
#else
#define ASM_UNIMPLEMENTED(message)
#define ASM_UNIMPLEMENTED_BREAK(message)
#define ASM_LOCATION(message)
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif

namespace v8 {
namespace internal {

#define LS_MACRO_LIST(V)                                       \
    V(Ldrb, Register&, rt, LDRB_w)                             \
    V(Strb, Register&, rt, STRB_w)                             \
    V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
    V(Ldrh, Register&, rt, LDRH_w)                             \
    V(Strh, Register&, rt, STRH_w)                             \
    V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
    V(Ldr, CPURegister&, rt, LoadOpFor(rt))                    \
    V(Str, CPURegister&, rt, StoreOpFor(rt))                   \
    V(Ldrsw, Register&, rt, LDRSW_x)

#define LSPAIR_MACRO_LIST(V)                               \
    V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
    V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
    V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)

#define LDA_STL_MACRO_LIST(V) \
    V(Ldarb, ldarb)           \
    V(Ldarh, ldarh)           \
    V(Ldar, ldar)             \
    V(Ldaxrb, ldaxrb)         \
    V(Ldaxrh, ldaxrh)         \
    V(Ldaxr, ldaxr)           \
    V(Stlrb, stlrb)           \
    V(Stlrh, stlrh)           \
    V(Stlr, stlr)

#define STLX_MACRO_LIST(V) \
    V(Stlxrb, stlxrb)      \
    V(Stlxrh, stlxrh)      \
    V(Stlxr, stlxr)

    // ----------------------------------------------------------------------------
    // Static helper functions

    // Generate a MemOperand for loading a field from an object.
    inline MemOperand FieldMemOperand(Register object, int offset);

    // ----------------------------------------------------------------------------
    // MacroAssembler

    enum BranchType {
        // Copies of architectural conditions.
        // The associated conditions can be used in place of those, the code will
        // take care of reinterpreting them with the correct type.
        integer_eq = eq,
        integer_ne = ne,
        integer_hs = hs,
        integer_lo = lo,
        integer_mi = mi,
        integer_pl = pl,
        integer_vs = vs,
        integer_vc = vc,
        integer_hi = hi,
        integer_ls = ls,
        integer_ge = ge,
        integer_lt = lt,
        integer_gt = gt,
        integer_le = le,
        integer_al = al,
        integer_nv = nv,

        // These two are *different* from the architectural codes al and nv.
        // 'always' is used to generate unconditional branches.
        // 'never' is used to not generate a branch (generally as the inverse
        // branch type of 'always).
        always,
        never,
        // cbz and cbnz
        reg_zero,
        reg_not_zero,
        // tbz and tbnz
        reg_bit_clear,
        reg_bit_set,

        // Aliases.
        kBranchTypeFirstCondition = eq,
        kBranchTypeLastCondition = nv,
        kBranchTypeFirstUsingReg = reg_zero,
        kBranchTypeFirstUsingBit = reg_bit_clear
    };

    inline BranchType InvertBranchType(BranchType type)
    {
        if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
            return static_cast<BranchType>(
                NegateCondition(static_cast<Condition>(type)));
        } else {
            return static_cast<BranchType>(type ^ 1);
        }
    }

    enum RememberedSetAction { EMIT_REMEMBERED_SET,
        OMIT_REMEMBERED_SET };
    enum SmiCheck { INLINE_SMI_CHECK,
        OMIT_SMI_CHECK };
    enum LinkRegisterStatus { kLRHasNotBeenSaved,
        kLRHasBeenSaved };
    enum TargetAddressStorageMode {
        CAN_INLINE_TARGET_ADDRESS,
        NEVER_INLINE_TARGET_ADDRESS
    };
    enum DiscardMoveMode { kDontDiscardForSameWReg,
        kDiscardForSameWReg };

    // The macro assembler supports moving automatically pre-shifted immediates for
    // arithmetic and logical instructions, and then applying a post shift in the
    // instruction to undo the modification, in order to reduce the code emitted for
    // an operation. For example:
    //
    //  Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
    //
    // This optimisation can be only partially applied when the stack pointer is an
    // operand or destination, so this enumeration is used to control the shift.
    enum PreShiftImmMode {
        kNoShift, // Don't pre-shift.
        kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
        kAnyShift // Allow any pre-shift.
    };

    class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
    public:
        using TurboAssemblerBase::TurboAssemblerBase;

#if DEBUG
        void set_allow_macro_instructions(bool value)
        {
            allow_macro_instructions_ = value;
        }
        bool allow_macro_instructions() const { return allow_macro_instructions_; }
#endif

        // We should not use near calls or jumps for calls to external references,
        // since the code spaces are not guaranteed to be close to each other.
        bool CanUseNearCallOrJump(RelocInfo::Mode rmode)
        {
            return rmode != RelocInfo::EXTERNAL_REFERENCE;
        }

        static bool IsNearCallOffset(int64_t offset);

        // Activation support.
        void EnterFrame(StackFrame::Type type);
        void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg)
        {
            // Out-of-line constant pool not implemented on arm64.
            UNREACHABLE();
        }
        void LeaveFrame(StackFrame::Type type);

        inline void InitializeRootRegister();

        void Mov(const Register& rd, const Operand& operand,
            DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
        void Mov(const Register& rd, uint64_t imm);
        void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
            int vn_index)
        {
            DCHECK(allow_macro_instructions());
            mov(vd, vd_index, vn, vn_index);
        }
        void Mov(const Register& rd, Smi smi);
        void Mov(const VRegister& vd, const VRegister& vn, int index)
        {
            DCHECK(allow_macro_instructions());
            mov(vd, vn, index);
        }
        void Mov(const VRegister& vd, int vd_index, const Register& rn)
        {
            DCHECK(allow_macro_instructions());
            mov(vd, vd_index, rn);
        }
        void Mov(const Register& rd, const VRegister& vn, int vn_index)
        {
            DCHECK(allow_macro_instructions());
            mov(rd, vn, vn_index);
        }

        // This is required for compatibility with architecture independent code.
        // Remove if not needed.
        void Move(Register dst, Smi src);

        // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
        void MovePair(Register dst0, Register src0, Register dst1, Register src1);

        // Register swap. Note that the register operands should be distinct.
        void Swap(Register lhs, Register rhs);
        void Swap(VRegister lhs, VRegister rhs);

// NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \
    V(fmla, Fmla)                    \
    V(fmls, Fmls)                    \
    V(fmul, Fmul)                    \
    V(fmulx, Fmulx)                  \
    V(mul, Mul)                      \
    V(mla, Mla)                      \
    V(mls, Mls)                      \
    V(sqdmulh, Sqdmulh)              \
    V(sqrdmulh, Sqrdmulh)            \
    V(sqdmull, Sqdmull)              \
    V(sqdmull2, Sqdmull2)            \
    V(sqdmlal, Sqdmlal)              \
    V(sqdmlal2, Sqdmlal2)            \
    V(sqdmlsl, Sqdmlsl)              \
    V(sqdmlsl2, Sqdmlsl2)            \
    V(smull, Smull)                  \
    V(smull2, Smull2)                \
    V(smlal, Smlal)                  \
    V(smlal2, Smlal2)                \
    V(smlsl, Smlsl)                  \
    V(smlsl2, Smlsl2)                \
    V(umull, Umull)                  \
    V(umull2, Umull2)                \
    V(umlal, Umlal)                  \
    V(umlal2, Umlal2)                \
    V(umlsl, Umlsl)                  \
    V(umlsl2, Umlsl2)

#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
    void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
        int vm_index)                                                        \
    {                                                                        \
        DCHECK(allow_macro_instructions());                                  \
        ASM(vd, vn, vm, vm_index);                                           \
    }
        NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC

// NEON 2 vector register instructions.
#define NEON_2VREG_MACRO_LIST(V) \
    V(abs, Abs)                  \
    V(addp, Addp)                \
    V(addv, Addv)                \
    V(cls, Cls)                  \
    V(clz, Clz)                  \
    V(cnt, Cnt)                  \
    V(faddp, Faddp)              \
    V(fcvtas, Fcvtas)            \
    V(fcvtau, Fcvtau)            \
    V(fcvtms, Fcvtms)            \
    V(fcvtmu, Fcvtmu)            \
    V(fcvtns, Fcvtns)            \
    V(fcvtnu, Fcvtnu)            \
    V(fcvtps, Fcvtps)            \
    V(fcvtpu, Fcvtpu)            \
    V(fmaxnmp, Fmaxnmp)          \
    V(fmaxnmv, Fmaxnmv)          \
    V(fmaxp, Fmaxp)              \
    V(fmaxv, Fmaxv)              \
    V(fminnmp, Fminnmp)          \
    V(fminnmv, Fminnmv)          \
    V(fminp, Fminp)              \
    V(fminv, Fminv)              \
    V(fneg, Fneg)                \
    V(frecpe, Frecpe)            \
    V(frecpx, Frecpx)            \
    V(frinta, Frinta)            \
    V(frinti, Frinti)            \
    V(frintm, Frintm)            \
    V(frintn, Frintn)            \
    V(frintp, Frintp)            \
    V(frintx, Frintx)            \
    V(frintz, Frintz)            \
    V(frsqrte, Frsqrte)          \
    V(fsqrt, Fsqrt)              \
    V(mov, Mov)                  \
    V(mvn, Mvn)                  \
    V(neg, Neg)                  \
    V(not_, Not)                 \
    V(rbit, Rbit)                \
    V(rev16, Rev16)              \
    V(rev32, Rev32)              \
    V(rev64, Rev64)              \
    V(sadalp, Sadalp)            \
    V(saddlp, Saddlp)            \
    V(saddlv, Saddlv)            \
    V(smaxv, Smaxv)              \
    V(sminv, Sminv)              \
    V(sqabs, Sqabs)              \
    V(sqneg, Sqneg)              \
    V(sqxtn2, Sqxtn2)            \
    V(sqxtn, Sqxtn)              \
    V(sqxtun2, Sqxtun2)          \
    V(sqxtun, Sqxtun)            \
    V(suqadd, Suqadd)            \
    V(sxtl2, Sxtl2)              \
    V(sxtl, Sxtl)                \
    V(uadalp, Uadalp)            \
    V(uaddlp, Uaddlp)            \
    V(uaddlv, Uaddlv)            \
    V(umaxv, Umaxv)              \
    V(uminv, Uminv)              \
    V(uqxtn2, Uqxtn2)            \
    V(uqxtn, Uqxtn)              \
    V(urecpe, Urecpe)            \
    V(ursqrte, Ursqrte)          \
    V(usqadd, Usqadd)            \
    V(uxtl2, Uxtl2)              \
    V(uxtl, Uxtl)                \
    V(xtn2, Xtn2)                \
    V(xtn, Xtn)

#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                \
    void MASM(const VRegister& vd, const VRegister& vn) \
    {                                                   \
        DCHECK(allow_macro_instructions());             \
        ASM(vd, vn);                                    \
    }
        NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
#undef NEON_2VREG_MACRO_LIST

// NEON 2 vector register with immediate instructions.
#define NEON_2VREG_FPIMM_MACRO_LIST(V) \
    V(fcmeq, Fcmeq)                    \
    V(fcmge, Fcmge)                    \
    V(fcmgt, Fcmgt)                    \
    V(fcmle, Fcmle)                    \
    V(fcmlt, Fcmlt)

#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                            \
    void MASM(const VRegister& vd, const VRegister& vn, double imm) \
    {                                                               \
        DCHECK(allow_macro_instructions());                         \
        ASM(vd, vn, imm);                                           \
    }
        NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC

// NEON 3 vector register instructions.
#define NEON_3VREG_MACRO_LIST(V) \
    V(add, Add)                  \
    V(addhn2, Addhn2)            \
    V(addhn, Addhn)              \
    V(addp, Addp)                \
    V(and_, And)                 \
    V(bic, Bic)                  \
    V(bif, Bif)                  \
    V(bit, Bit)                  \
    V(bsl, Bsl)                  \
    V(cmeq, Cmeq)                \
    V(cmge, Cmge)                \
    V(cmgt, Cmgt)                \
    V(cmhi, Cmhi)                \
    V(cmhs, Cmhs)                \
    V(cmtst, Cmtst)              \
    V(eor, Eor)                  \
    V(fabd, Fabd)                \
    V(facge, Facge)              \
    V(facgt, Facgt)              \
    V(faddp, Faddp)              \
    V(fcmeq, Fcmeq)              \
    V(fcmge, Fcmge)              \
    V(fcmgt, Fcmgt)              \
    V(fmaxnmp, Fmaxnmp)          \
    V(fmaxp, Fmaxp)              \
    V(fminnmp, Fminnmp)          \
    V(fminp, Fminp)              \
    V(fmla, Fmla)                \
    V(fmls, Fmls)                \
    V(fmulx, Fmulx)              \
    V(frecps, Frecps)            \
    V(frsqrts, Frsqrts)          \
    V(mla, Mla)                  \
    V(mls, Mls)                  \
    V(mul, Mul)                  \
    V(orn, Orn)                  \
    V(orr, Orr)                  \
    V(pmull2, Pmull2)            \
    V(pmull, Pmull)              \
    V(pmul, Pmul)                \
    V(raddhn2, Raddhn2)          \
    V(raddhn, Raddhn)            \
    V(rsubhn2, Rsubhn2)          \
    V(rsubhn, Rsubhn)            \
    V(sabal2, Sabal2)            \
    V(sabal, Sabal)              \
    V(saba, Saba)                \
    V(sabdl2, Sabdl2)            \
    V(sabdl, Sabdl)              \
    V(sabd, Sabd)                \
    V(saddl2, Saddl2)            \
    V(saddl, Saddl)              \
    V(saddw2, Saddw2)            \
    V(saddw, Saddw)              \
    V(shadd, Shadd)              \
    V(shsub, Shsub)              \
    V(smaxp, Smaxp)              \
    V(smax, Smax)                \
    V(sminp, Sminp)              \
    V(smin, Smin)                \
    V(smlal2, Smlal2)            \
    V(smlal, Smlal)              \
    V(smlsl2, Smlsl2)            \
    V(smlsl, Smlsl)              \
    V(smull2, Smull2)            \
    V(smull, Smull)              \
    V(sqadd, Sqadd)              \
    V(sqdmlal2, Sqdmlal2)        \
    V(sqdmlal, Sqdmlal)          \
    V(sqdmlsl2, Sqdmlsl2)        \
    V(sqdmlsl, Sqdmlsl)          \
    V(sqdmulh, Sqdmulh)          \
    V(sqdmull2, Sqdmull2)        \
    V(sqdmull, Sqdmull)          \
    V(sqrdmulh, Sqrdmulh)        \
    V(sqrshl, Sqrshl)            \
    V(sqshl, Sqshl)              \
    V(sqsub, Sqsub)              \
    V(srhadd, Srhadd)            \
    V(srshl, Srshl)              \
    V(sshl, Sshl)                \
    V(ssubl2, Ssubl2)            \
    V(ssubl, Ssubl)              \
    V(ssubw2, Ssubw2)            \
    V(ssubw, Ssubw)              \
    V(subhn2, Subhn2)            \
    V(subhn, Subhn)              \
    V(sub, Sub)                  \
    V(trn1, Trn1)                \
    V(trn2, Trn2)                \
    V(uabal2, Uabal2)            \
    V(uabal, Uabal)              \
    V(uaba, Uaba)                \
    V(uabdl2, Uabdl2)            \
    V(uabdl, Uabdl)              \
    V(uabd, Uabd)                \
    V(uaddl2, Uaddl2)            \
    V(uaddl, Uaddl)              \
    V(uaddw2, Uaddw2)            \
    V(uaddw, Uaddw)              \
    V(uhadd, Uhadd)              \
    V(uhsub, Uhsub)              \
    V(umaxp, Umaxp)              \
    V(umax, Umax)                \
    V(uminp, Uminp)              \
    V(umin, Umin)                \
    V(umlal2, Umlal2)            \
    V(umlal, Umlal)              \
    V(umlsl2, Umlsl2)            \
    V(umlsl, Umlsl)              \
    V(umull2, Umull2)            \
    V(umull, Umull)              \
    V(uqadd, Uqadd)              \
    V(uqrshl, Uqrshl)            \
    V(uqshl, Uqshl)              \
    V(uqsub, Uqsub)              \
    V(urhadd, Urhadd)            \
    V(urshl, Urshl)              \
    V(ushl, Ushl)                \
    V(usubl2, Usubl2)            \
    V(usubl, Usubl)              \
    V(usubw2, Usubw2)            \
    V(usubw, Usubw)              \
    V(uzp1, Uzp1)                \
    V(uzp2, Uzp2)                \
    V(zip1, Zip1)                \
    V(zip2, Zip2)

#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
    void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) \
    {                                                                        \
        DCHECK(allow_macro_instructions());                                  \
        ASM(vd, vn, vm);                                                     \
    }
        NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC

        void Bic(const VRegister& vd, const int imm8, const int left_shift = 0)
        {
            DCHECK(allow_macro_instructions());
            bic(vd, imm8, left_shift);
        }

        // This is required for compatibility in architecture independent code.
        inline void jmp(Label* L);

        void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
        inline void B(Label* label);
        inline void B(Condition cond, Label* label);
        void B(Label* label, Condition cond);

        void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
        void Tbz(const Register& rt, unsigned bit_pos, Label* label);

        void Cbnz(const Register& rt, Label* label);
        void Cbz(const Register& rt, Label* label);

        inline void Dmb(BarrierDomain domain, BarrierType type);
        inline void Dsb(BarrierDomain domain, BarrierType type);
        inline void Isb();
        inline void Csdb();

        // Call a runtime routine. This expects {centry} to contain a fitting CEntry
        // builtin for the target runtime function and uses an indirect call.
        void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);

        // Removes current frame and its arguments from the stack preserving
        // the arguments and a return address pushed to the stack for the next call.
        // Both |callee_args_count| and |caller_args_count_reg| do not include
        // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
        // is trashed.
        void PrepareForTailCall(const ParameterCount& callee_args_count,
            Register caller_args_count_reg, Register scratch0,
            Register scratch1);

        inline void SmiUntag(Register dst, Register src);
        inline void SmiUntag(Register dst, const MemOperand& src);
        inline void SmiUntag(Register smi);

        // Calls Abort(msg) if the condition cond is not satisfied.
        // Use --debug_code to enable.
        void Assert(Condition cond, AbortReason reason);

        // Like Assert(), but without condition.
        // Use --debug_code to enable.
        void AssertUnreachable(AbortReason reason);

        void AssertSmi(Register object,
            AbortReason reason = AbortReason::kOperandIsNotASmi);

        // Like Assert(), but always enabled.
        void Check(Condition cond, AbortReason reason);

        inline void Debug(const char* message, uint32_t code, Instr params = BREAK);

        // Print a message to stderr and abort execution.
        void Abort(AbortReason reason);

        // Remaining instructions are simple pass-through calls to the assembler.
        inline void Asr(const Register& rd, const Register& rn, unsigned shift);
        inline void Asr(const Register& rd, const Register& rn, const Register& rm);

        // Try to move an immediate into the destination register in a single
        // instruction. Returns true for success, and updates the contents of dst.
        // Returns false, otherwise.
        bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);

        inline void Bind(Label* label);

        static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);

        CPURegList* TmpList() { return &tmp_list_; }
        CPURegList* FPTmpList() { return &fptmp_list_; }

        static CPURegList DefaultTmpList();
        static CPURegList DefaultFPTmpList();

        // Move macros.
        inline void Mvn(const Register& rd, uint64_t imm);
        void Mvn(const Register& rd, const Operand& operand);
        static bool IsImmMovn(uint64_t imm, unsigned reg_size);
        static bool IsImmMovz(uint64_t imm, unsigned reg_size);

        void LogicalMacro(const Register& rd, const Register& rn,
            const Operand& operand, LogicalOp op);
        void AddSubMacro(const Register& rd, const Register& rn,
            const Operand& operand, FlagsUpdate S, AddSubOp op);
        inline void Orr(const Register& rd, const Register& rn,
            const Operand& operand);
        void Orr(const VRegister& vd, const int imm8, const int left_shift = 0)
        {
            DCHECK(allow_macro_instructions());
            orr(vd, imm8, left_shift);
        }
        inline void Orn(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Eor(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Eon(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void And(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Ands(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Tst(const Register& rn, const Operand& operand);
        inline void Bic(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Blr(const Register& xn);
        inline void Cmp(const Register& rn, const Operand& operand);
        inline void Subs(const Register& rd, const Register& rn,
            const Operand& operand);
        void Csel(const Register& rd, const Register& rn, const Operand& operand,
            Condition cond);

        // Emits a runtime assert that the stack pointer is aligned.
        void AssertSpAligned();

        // Copy slot_count stack slots from the stack offset specified by src to
        // the stack offset specified by dst. The offsets and count are expressed in
        // slot-sized units. Offset dst must be less than src, or the gap between
        // them must be greater than or equal to slot_count, otherwise the result is
        // unpredictable. The function may corrupt its register arguments. The
        // registers must not alias each other.
        void CopySlots(int dst, Register src, Register slot_count);
        void CopySlots(Register dst, Register src, Register slot_count);

        // Copy count double words from the address in register src to the address
        // in register dst. There are two modes for this function:
        // 1) Address dst must be less than src, or the gap between them must be
        //    greater than or equal to count double words, otherwise the result is
        //    unpredictable. This is the default mode.
        // 2) Address src must be less than dst, or the gap between them must be
        //    greater than or equal to count double words, otherwise the result is
        //    undpredictable. In this mode, src and dst specify the last (highest)
        //    address of the regions to copy from and to.
        // The case where src == dst is not supported.
        // The function may corrupt its register arguments. The registers must not
        // alias each other.
        enum CopyDoubleWordsMode { kDstLessThanSrc,
            kSrcLessThanDst };
        void CopyDoubleWords(Register dst, Register src, Register count,
            CopyDoubleWordsMode mode = kDstLessThanSrc);

        // Calculate the address of a double word-sized slot at slot_offset from the
        // stack pointer, and write it to dst. Positive slot_offsets are at addresses
        // greater than sp, with slot zero at sp.
        void SlotAddress(Register dst, int slot_offset);
        void SlotAddress(Register dst, Register slot_offset);

        // Load a literal from the inline constant pool.
        inline void Ldr(const CPURegister& rt, const Operand& imm);

        // Claim or drop stack space without actually accessing memory.
        //
        // In debug mode, both of these will write invalid data into the claimed or
        // dropped space.
        //
        // The stack pointer must be aligned to 16 bytes and the size claimed or
        // dropped must be a multiple of 16 bytes.
        //
        // Note that unit_size must be specified in bytes. For variants which take a
        // Register count, the unit size must be a power of two.
        inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
        inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
        inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
        inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);

        // Drop 'count' arguments from the stack, rounded up to a multiple of two,
        // without actually accessing memory.
        // We assume the size of the arguments is the pointer size.
        // An optional mode argument is passed, which can indicate we need to
        // explicitly add the receiver to the count.
        enum ArgumentsCountMode { kCountIncludesReceiver,
            kCountExcludesReceiver };
        inline void DropArguments(const Register& count,
            ArgumentsCountMode mode = kCountIncludesReceiver);
        inline void DropArguments(int64_t count,
            ArgumentsCountMode mode = kCountIncludesReceiver);

        // Drop 'count' slots from stack, rounded up to a multiple of two, without
        // actually accessing memory.
        inline void DropSlots(int64_t count);

        // Push a single argument, with padding, to the stack.
        inline void PushArgument(const Register& arg);

        // Add and sub macros.
        inline void Add(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Adds(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Sub(const Register& rd, const Register& rn,
            const Operand& operand);

        // Abort execution if argument is not a positive or zero integer, enabled via
        // --debug-code.
        void AssertPositiveOrZero(Register value);

#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
    inline void FN(const REGTYPE REG, const MemOperand& addr);
        LS_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION

        // Push or pop up to 4 registers of the same width to or from the stack.
        //
        // If an argument register is 'NoReg', all further arguments are also assumed
        // to be 'NoReg', and are thus not pushed or popped.
        //
        // Arguments are ordered such that "Push(a, b);" is functionally equivalent
        // to "Push(a); Push(b);".
        //
        // It is valid to push the same register more than once, and there is no
        // restriction on the order in which registers are specified.
        //
        // It is not valid to pop into the same register more than once in one
        // operation, not even into the zero register.
        //
        // The stack pointer must be aligned to 16 bytes on entry and the total size
        // of the specified registers must also be a multiple of 16 bytes.
        //
        // Other than the registers passed into Pop, the stack pointer and (possibly)
        // the system stack pointer, these methods do not modify any other registers.
        void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
            const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
        void Push(const CPURegister& src0, const CPURegister& src1,
            const CPURegister& src2, const CPURegister& src3,
            const CPURegister& src4, const CPURegister& src5 = NoReg,
            const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
        void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
            const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
        void Pop(const CPURegister& dst0, const CPURegister& dst1,
            const CPURegister& dst2, const CPURegister& dst3,
            const CPURegister& dst4, const CPURegister& dst5 = NoReg,
            const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
        void Push(const Register& src0, const VRegister& src1);

        // This is a convenience method for pushing a single Handle<Object>.
        inline void Push(Handle<HeapObject> object);
        inline void Push(Smi smi);

        // Aliases of Push and Pop, required for V8 compatibility.
        inline void push(Register src) { Push(src); }
        inline void pop(Register dst) { Pop(dst); }

        void SaveRegisters(RegList registers);
        void RestoreRegisters(RegList registers);

        void CallRecordWriteStub(Register object, Register address,
            RememberedSetAction remembered_set_action,
            SaveFPRegsMode fp_mode);
        void CallRecordWriteStub(Register object, Register address,
            RememberedSetAction remembered_set_action,
            SaveFPRegsMode fp_mode, Address wasm_target);
        void CallEphemeronKeyBarrier(Register object, Register address,
            SaveFPRegsMode fp_mode);

        // Alternative forms of Push and Pop, taking a RegList or CPURegList that
        // specifies the registers that are to be pushed or popped. Higher-numbered
        // registers are associated with higher memory addresses (as in the A32 push
        // and pop instructions).
        //
        // (Push|Pop)SizeRegList allow you to specify the register size as a
        // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
        // kSRegSizeInBits are supported.
        //
        // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
        void PushCPURegList(CPURegList registers);
        void PopCPURegList(CPURegList registers);

        // Calculate how much stack space (in bytes) are required to store caller
        // registers excluding those specified in the arguments.
        int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
            Register exclusion) const;

        // Push caller saved registers on the stack, and return the number of bytes
        // stack pointer is adjusted.
        int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);

        // Restore caller saved registers from the stack, and return the number of
        // bytes stack pointer is adjusted.
        int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);

        // Move an immediate into register dst, and return an Operand object for use
        // with a subsequent instruction that accepts a shift. The value moved into
        // dst is not necessarily equal to imm; it may have had a shifting operation
        // applied to it that will be subsequently undone by the shift applied in the
        // Operand.
        Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
            PreShiftImmMode mode);

        void CheckPageFlagSet(const Register& object, const Register& scratch,
            int mask, Label* if_any_set);

        void CheckPageFlagClear(const Register& object, const Register& scratch,
            int mask, Label* if_all_clear);

        // Test the bits of register defined by bit_pattern, and branch if ANY of
        // those bits are set. May corrupt the status flags.
        inline void TestAndBranchIfAnySet(const Register& reg,
            const uint64_t bit_pattern, Label* label);

        // Test the bits of register defined by bit_pattern, and branch if ALL of
        // those bits are clear (ie. not set.) May corrupt the status flags.
        inline void TestAndBranchIfAllClear(const Register& reg,
            const uint64_t bit_pattern, Label* label);

        inline void Brk(int code);

        inline void JumpIfSmi(Register value, Label* smi_label,
            Label* not_smi_label = nullptr);

        inline void JumpIfEqual(Register x, int32_t y, Label* dest);
        inline void JumpIfLessThan(Register x, int32_t y, Label* dest);

        inline void Fmov(VRegister fd, VRegister fn);
        inline void Fmov(VRegister fd, Register rn);
        // Provide explicit double and float interfaces for FP immediate moves, rather
        // than relying on implicit C++ casts. This allows signalling NaNs to be
        // preserved when the immediate matches the format of fd. Most systems convert
        // signalling NaNs to quiet NaNs when converting between float and double.
        inline void Fmov(VRegister fd, double imm);
        inline void Fmov(VRegister fd, float imm);
        // Provide a template to allow other types to be converted automatically.
        template <typename T>
        void Fmov(VRegister fd, T imm)
        {
            DCHECK(allow_macro_instructions());
            Fmov(fd, static_cast<double>(imm));
        }
        inline void Fmov(Register rd, VRegister fn);

        void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
            int shift_amount = 0);
        void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);

        void LoadFromConstantsTable(Register destination,
            int constant_index) override;
        void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
        void LoadRootRelative(Register destination, int32_t offset) override;

        void Jump(Register target, Condition cond = al);
        void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
        void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);

        void Call(Register target);
        void Call(Address target, RelocInfo::Mode rmode);
        void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
        void Call(ExternalReference target);

        // Generate an indirect call (for when a direct call's range is not adequate).
        void IndirectCall(Address target, RelocInfo::Mode rmode);

        void CallBuiltinPointer(Register builtin_pointer) override;

        void LoadCodeObjectEntry(Register destination, Register code_object) override;
        void CallCodeObject(Register code_object) override;
        void JumpCodeObject(Register code_object) override;

        // Generates an instruction sequence s.t. the return address points to the
        // instruction following the call.
        // The return address on the stack is used by frame iteration.
        void StoreReturnAddressAndCall(Register target);

        void CallForDeoptimization(Address target, int deopt_id);

        // Calls a C function.
        // The called function is not allowed to trigger a
        // garbage collection, since that might move the code and invalidate the
        // return address (unless this is somehow accounted for by the called
        // function).
        void CallCFunction(ExternalReference function, int num_reg_arguments);
        void CallCFunction(ExternalReference function, int num_reg_arguments,
            int num_double_arguments);
        void CallCFunction(Register function, int num_reg_arguments,
            int num_double_arguments);

        // Performs a truncating conversion of a floating point number as used by
        // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
        // Exits with 'result' holding the answer.
        void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
            DoubleRegister double_input, StubCallMode stub_mode);

        inline void Mul(const Register& rd, const Register& rn, const Register& rm);

        inline void Fcvtzs(const Register& rd, const VRegister& fn);
        void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0)
        {
            DCHECK(allow_macro_instructions());
            fcvtzs(vd, vn, fbits);
        }

        inline void Fcvtzu(const Register& rd, const VRegister& fn);
        void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0)
        {
            DCHECK(allow_macro_instructions());
            fcvtzu(vd, vn, fbits);
        }

        inline void Madd(const Register& rd, const Register& rn, const Register& rm,
            const Register& ra);
        inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
        inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
        inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
        inline void Msub(const Register& rd, const Register& rn, const Register& rm,
            const Register& ra);

        inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
        inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
        inline void Umull(const Register& rd, const Register& rn, const Register& rm);
        inline void Smull(const Register& rd, const Register& rn, const Register& rm);

        inline void Sxtb(const Register& rd, const Register& rn);
        inline void Sxth(const Register& rd, const Register& rn);
        inline void Sxtw(const Register& rd, const Register& rn);
        inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
            unsigned width);
        inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb,
            unsigned width);
        inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
        inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
        inline void Ror(const Register& rd, const Register& rs, unsigned shift);
        inline void Ror(const Register& rd, const Register& rn, const Register& rm);
        inline void Cmn(const Register& rn, const Operand& operand);
        inline void Fadd(const VRegister& fd, const VRegister& fn,
            const VRegister& fm);
        inline void Fcmp(const VRegister& fn, const VRegister& fm);
        inline void Fcmp(const VRegister& fn, double value);
        inline void Fabs(const VRegister& fd, const VRegister& fn);
        inline void Fmul(const VRegister& fd, const VRegister& fn,
            const VRegister& fm);
        inline void Fsub(const VRegister& fd, const VRegister& fn,
            const VRegister& fm);
        inline void Fdiv(const VRegister& fd, const VRegister& fn,
            const VRegister& fm);
        inline void Fmax(const VRegister& fd, const VRegister& fn,
            const VRegister& fm);
        inline void Fmin(const VRegister& fd, const VRegister& fn,
            const VRegister& fm);
        inline void Rbit(const Register& rd, const Register& rn);
        inline void Rev(const Register& rd, const Register& rn);

        enum AdrHint {
            // The target must be within the immediate range of adr.
            kAdrNear,
            // The target may be outside of the immediate range of adr. Additional
            // instructions may be emitted.
            kAdrFar
        };
        void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);

        // Add/sub with carry macros.
        inline void Adc(const Register& rd, const Register& rn,
            const Operand& operand);

        // Conditional macros.
        inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
            Condition cond);

        inline void Clz(const Register& rd, const Register& rn);

        // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
        // be 16 byte aligned.
        void Poke(const CPURegister& src, const Operand& offset);

        // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
        // The stack pointer must be aligned to 16 bytes.
        void Peek(const CPURegister& dst, const Operand& offset);

        // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
        // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
        // stack pointer must be 16 byte aligned.
        void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);

        inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
            unsigned width);

        inline void Bfi(const Register& rd, const Register& rn, unsigned lsb,
            unsigned width);

        inline void Scvtf(const VRegister& fd, const Register& rn,
            unsigned fbits = 0);
        void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0)
        {
            DCHECK(allow_macro_instructions());
            scvtf(vd, vn, fbits);
        }
        inline void Ucvtf(const VRegister& fd, const Register& rn,
            unsigned fbits = 0);
        void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0)
        {
            DCHECK(allow_macro_instructions());
            ucvtf(vd, vn, fbits);
        }

        void AssertFPCRState(Register fpcr = NoReg);
        void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
        void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }

        inline void CmovX(const Register& rd, const Register& rn, Condition cond);
        inline void Cset(const Register& rd, Condition cond);
        inline void Csetm(const Register& rd, Condition cond);
        inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
            Condition cond);
        inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
            Condition cond);

        inline void Fcvt(const VRegister& fd, const VRegister& fn);

        int ActivationFrameAlignment();

        void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
            int vn_index)
        {
            DCHECK(allow_macro_instructions());
            ins(vd, vd_index, vn, vn_index);
        }
        void Ins(const VRegister& vd, int vd_index, const Register& rn)
        {
            DCHECK(allow_macro_instructions());
            ins(vd, vd_index, rn);
        }

        inline void Bl(Label* label);
        inline void Br(const Register& xn);

        inline void Uxtb(const Register& rd, const Register& rn);
        inline void Uxth(const Register& rd, const Register& rn);
        inline void Uxtw(const Register& rd, const Register& rn);

        void Dup(const VRegister& vd, const VRegister& vn, int index)
        {
            DCHECK(allow_macro_instructions());
            dup(vd, vn, index);
        }
        void Dup(const VRegister& vd, const Register& rn)
        {
            DCHECK(allow_macro_instructions());
            dup(vd, rn);
        }

#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
    inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
        LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION

#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
    V(rshrn, Rshrn)                    \
    V(rshrn2, Rshrn2)                  \
    V(shl, Shl)                        \
    V(shll, Shll)                      \
    V(shll2, Shll2)                    \
    V(shrn, Shrn)                      \
    V(shrn2, Shrn2)                    \
    V(sli, Sli)                        \
    V(sqrshrn, Sqrshrn)                \
    V(sqrshrn2, Sqrshrn2)              \
    V(sqrshrun, Sqrshrun)              \
    V(sqrshrun2, Sqrshrun2)            \
    V(sqshl, Sqshl)                    \
    V(sqshlu, Sqshlu)                  \
    V(sqshrn, Sqshrn)                  \
    V(sqshrn2, Sqshrn2)                \
    V(sqshrun, Sqshrun)                \
    V(sqshrun2, Sqshrun2)              \
    V(sri, Sri)                        \
    V(srshr, Srshr)                    \
    V(srsra, Srsra)                    \
    V(sshll, Sshll)                    \
    V(sshll2, Sshll2)                  \
    V(sshr, Sshr)                      \
    V(ssra, Ssra)                      \
    V(uqrshrn, Uqrshrn)                \
    V(uqrshrn2, Uqrshrn2)              \
    V(uqshl, Uqshl)                    \
    V(uqshrn, Uqshrn)                  \
    V(uqshrn2, Uqshrn2)                \
    V(urshr, Urshr)                    \
    V(ursra, Ursra)                    \
    V(ushll, Ushll)                    \
    V(ushll2, Ushll2)                  \
    V(ushr, Ushr)                      \
    V(usra, Usra)

#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                           \
    void MASM(const VRegister& vd, const VRegister& vn, int shift) \
    {                                                              \
        DCHECK(allow_macro_instructions());                        \
        ASM(vd, vn, shift);                                        \
    }
        NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC

        void Umov(const Register& rd, const VRegister& vn, int vn_index)
        {
            DCHECK(allow_macro_instructions());
            umov(rd, vn, vn_index);
        }
        void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm)
        {
            DCHECK(allow_macro_instructions());
            tbl(vd, vn, vm);
        }
        void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
            const VRegister& vm)
        {
            DCHECK(allow_macro_instructions());
            tbl(vd, vn, vn2, vm);
        }
        void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
            const VRegister& vn3, const VRegister& vm)
        {
            DCHECK(allow_macro_instructions());
            tbl(vd, vn, vn2, vn3, vm);
        }
        void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
            const VRegister& vn3, const VRegister& vn4, const VRegister& vm)
        {
            DCHECK(allow_macro_instructions());
            tbl(vd, vn, vn2, vn3, vn4, vm);
        }
        void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
            int index)
        {
            DCHECK(allow_macro_instructions());
            ext(vd, vn, vm, index);
        }

        void Smov(const Register& rd, const VRegister& vn, int vn_index)
        {
            DCHECK(allow_macro_instructions());
            smov(rd, vn, vn_index);
        }

// Load-acquire/store-release macros.
#define DECLARE_FUNCTION(FN, OP) \
    inline void FN(const Register& rt, const Register& rn);
        LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION

        // Load an object from the root table.
        void LoadRoot(Register destination, RootIndex index) override;

        inline void Ret(const Register& xn = lr);

        // Perform a conversion from a double to a signed int64. If the input fits in
        // range of the 64-bit result, execution branches to done. Otherwise,
        // execution falls through, and the sign of the result can be used to
        // determine if overflow was towards positive or negative infinity.
        //
        // On successful conversion, the least significant 32 bits of the result are
        // equivalent to the ECMA-262 operation "ToInt32".
        void TryConvertDoubleToInt64(Register result, DoubleRegister input,
            Label* done);

        inline void Mrs(const Register& rt, SystemRegister sysreg);

        // Generates function prologue code.
        void Prologue();

        void Cmgt(const VRegister& vd, const VRegister& vn, int imm)
        {
            DCHECK(allow_macro_instructions());
            cmgt(vd, vn, imm);
        }
        void Cmge(const VRegister& vd, const VRegister& vn, int imm)
        {
            DCHECK(allow_macro_instructions());
            cmge(vd, vn, imm);
        }
        void Cmeq(const VRegister& vd, const VRegister& vn, int imm)
        {
            DCHECK(allow_macro_instructions());
            cmeq(vd, vn, imm);
        }

        inline void Neg(const Register& rd, const Operand& operand);
        inline void Negs(const Register& rd, const Operand& operand);

        // Compute rd = abs(rm).
        // This function clobbers the condition flags. On output the overflow flag is
        // set iff the negation overflowed.
        //
        // If rm is the minimum representable value, the result is not representable.
        // Handlers for each case can be specified using the relevant labels.
        void Abs(const Register& rd, const Register& rm,
            Label* is_not_representable = nullptr,
            Label* is_representable = nullptr);

        inline void Cls(const Register& rd, const Register& rn);
        inline void Cneg(const Register& rd, const Register& rn, Condition cond);
        inline void Rev16(const Register& rd, const Register& rn);
        inline void Rev32(const Register& rd, const Register& rn);
        inline void Fcvtns(const Register& rd, const VRegister& fn);
        inline void Fcvtnu(const Register& rd, const VRegister& fn);
        inline void Fcvtms(const Register& rd, const VRegister& fn);
        inline void Fcvtmu(const Register& rd, const VRegister& fn);
        inline void Fcvtas(const Register& rd, const VRegister& fn);
        inline void Fcvtau(const Register& rd, const VRegister& fn);

        // Compute the start of the generated instruction stream from the current PC.
        // This is an alternative to embedding the {CodeObject} handle as a reference.
        void ComputeCodeStartAddress(const Register& rd);

        void ResetSpeculationPoisonRegister();

        // ---------------------------------------------------------------------------
        // Pointer compression Support

        // Loads a field containing a HeapObject and decompresses it if pointer
        // compression is enabled.
        void LoadTaggedPointerField(const Register& destination,
            const MemOperand& field_operand);

        // Loads a field containing any tagged value and decompresses it if necessary.
        void LoadAnyTaggedField(const Register& destination,
            const MemOperand& field_operand);

        // Loads a field containing smi value and untags it.
        void SmiUntagField(Register dst, const MemOperand& src);

        // Compresses and stores tagged value to given on-heap location.
        void StoreTaggedField(const Register& value,
            const MemOperand& dst_field_operand);

        void DecompressTaggedSigned(const Register& destination,
            const MemOperand& field_operand);
        void DecompressTaggedPointer(const Register& destination,
            const MemOperand& field_operand);
        void DecompressAnyTagged(const Register& destination,
            const MemOperand& field_operand);

    protected:
        // The actual Push and Pop implementations. These don't generate any code
        // other than that required for the push or pop. This allows
        // (Push|Pop)CPURegList to bundle together run-time assertions for a large
        // block of registers.
        //
        // Note that size is per register, and is specified in bytes.
        void PushHelper(int count, int size, const CPURegister& src0,
            const CPURegister& src1, const CPURegister& src2,
            const CPURegister& src3);
        void PopHelper(int count, int size, const CPURegister& dst0,
            const CPURegister& dst1, const CPURegister& dst2,
            const CPURegister& dst3);

        void ConditionalCompareMacro(const Register& rn, const Operand& operand,
            StatusFlags nzcv, Condition cond,
            ConditionalCompareOp op);

        void AddSubWithCarryMacro(const Register& rd, const Register& rn,
            const Operand& operand, FlagsUpdate S,
            AddSubWithCarryOp op);

        // Call Printf. On a native build, a simple call will be generated, but if the
        // simulator is being used then a suitable pseudo-instruction is used. The
        // arguments and stack must be prepared by the caller as for a normal AAPCS64
        // call to 'printf'.
        //
        // The 'args' argument should point to an array of variable arguments in their
        // proper PCS registers (and in calling order). The argument registers can
        // have mixed types. The format string (x0) should not be included.
        void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);

    private:
#if DEBUG
        // Tell whether any of the macro instruction can be used. When false the
        // MacroAssembler will assert if a method which can emit a variable number
        // of instructions is called.
        bool allow_macro_instructions_ = true;
#endif

        // Scratch registers available for use by the MacroAssembler.
        CPURegList tmp_list_ = DefaultTmpList();
        CPURegList fptmp_list_ = DefaultFPTmpList();

        // Helps resolve branching to labels potentially out of range.
        // If the label is not bound, it registers the information necessary to later
        // be able to emit a veneer for this branch if necessary.
        // If the label is bound, it returns true if the label (or the previous link
        // in the label chain) is out of range. In that case the caller is responsible
        // for generating appropriate code.
        // Otherwise it returns false.
        // This function also checks wether veneers need to be emitted.
        bool NeedExtraInstructionsOrRegisterBranch(Label* label,
            ImmBranchType branch_type);

        void Movi16bitHelper(const VRegister& vd, uint64_t imm);
        void Movi32bitHelper(const VRegister& vd, uint64_t imm);
        void Movi64bitHelper(const VRegister& vd, uint64_t imm);

        void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
            LoadStoreOp op);

        void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
            const MemOperand& addr, LoadStorePairOp op);

        void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);

        void CallRecordWriteStub(Register object, Register address,
            RememberedSetAction remembered_set_action,
            SaveFPRegsMode fp_mode, Handle<Code> code_target,
            Address wasm_target);
    };

    class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
    public:
        using TurboAssembler::TurboAssembler;

        // Instruction set functions ------------------------------------------------
        // Logical macros.
        inline void Bics(const Register& rd, const Register& rn,
            const Operand& operand);

        inline void Adcs(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Sbc(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Sbcs(const Register& rd, const Register& rn,
            const Operand& operand);
        inline void Ngc(const Register& rd, const Operand& operand);
        inline void Ngcs(const Register& rd, const Operand& operand);

        inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
            Condition cond);

#define DECLARE_FUNCTION(FN, OP) \
    inline void FN(const Register& rs, const Register& rt, const Register& rn);
        STLX_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION

        // Branch type inversion relies on these relations.
        STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) && (reg_bit_clear == (reg_bit_set ^ 1)) && (always == (never ^ 1)));

        inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb,
            unsigned width);
        inline void Cinc(const Register& rd, const Register& rn, Condition cond);
        inline void Cinv(const Register& rd, const Register& rn, Condition cond);
        inline void CzeroX(const Register& rd, Condition cond);
        inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
            Condition cond);
        inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
            Condition cond);
        inline void Extr(const Register& rd, const Register& rn, const Register& rm,
            unsigned lsb);
        inline void Fcsel(const VRegister& fd, const VRegister& fn,
            const VRegister& fm, Condition cond);
        void Fcvtl(const VRegister& vd, const VRegister& vn)
        {
            DCHECK(allow_macro_instructions());
            fcvtl(vd, vn);
        }
        void Fcvtl2(const VRegister& vd, const VRegister& vn)
        {
            DCHECK(allow_macro_instructions());
            fcvtl2(vd, vn);
        }
        void Fcvtn(const VRegister& vd, const VRegister& vn)
        {
            DCHECK(allow_macro_instructions());
            fcvtn(vd, vn);
        }
        void Fcvtn2(const VRegister& vd, const VRegister& vn)
        {
            DCHECK(allow_macro_instructions());
            fcvtn2(vd, vn);
        }
        void Fcvtxn(const VRegister& vd, const VRegister& vn)
        {
            DCHECK(allow_macro_instructions());
            fcvtxn(vd, vn);
        }
        void Fcvtxn2(const VRegister& vd, const VRegister& vn)
        {
            DCHECK(allow_macro_instructions());
            fcvtxn2(vd, vn);
        }
        inline void Fmadd(const VRegister& fd, const VRegister& fn,
            const VRegister& fm, const VRegister& fa);
        inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
            const VRegister& fm);
        inline void Fminnm(const VRegister& fd, const VRegister& fn,
            const VRegister& fm);
        inline void Fmsub(const VRegister& fd, const VRegister& fn,
            const VRegister& fm, const VRegister& fa);
        inline void Fnmadd(const VRegister& fd, const VRegister& fn,
            const VRegister& fm, const VRegister& fa);
        inline void Fnmsub(const VRegister& fd, const VRegister& fn,
            const VRegister& fm, const VRegister& fa);
        inline void Hint(SystemHint code);
        inline void Hlt(int code);
        inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
            const MemOperand& src);
        inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
        inline void Msr(SystemRegister sysreg, const Register& rt);
        inline void Nop() { nop(); }
        void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
            const int shift_amount = 0)
        {
            DCHECK(allow_macro_instructions());
            mvni(vd, imm8, shift, shift_amount);
        }
        inline void Rev(const Register& rd, const Register& rn);
        inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
            unsigned width);
        inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
            const Register& ra);
        inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
            const Register& ra);
        inline void Smulh(const Register& rd, const Register& rn, const Register& rm);
        inline void Stnp(const CPURegister& rt, const CPURegister& rt2,
            const MemOperand& dst);
        inline void Umaddl(const Register& rd, const Register& rn, const Register& rm,
            const Register& ra);
        inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
            const Register& ra);

        void Cmle(const VRegister& vd, const VRegister& vn, int imm)
        {
            DCHECK(allow_macro_instructions());
            cmle(vd, vn, imm);
        }
        void Cmlt(const VRegister& vd, const VRegister& vn, int imm)
        {
            DCHECK(allow_macro_instructions());
            cmlt(vd, vn, imm);
        }

        void Ld1(const VRegister& vt, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld1(vt, src);
        }
        void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld1(vt, vt2, src);
        }
        void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld1(vt, vt2, vt3, src);
        }
        void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const VRegister& vt4, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld1(vt, vt2, vt3, vt4, src);
        }
        void Ld1(const VRegister& vt, int lane, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld1(vt, lane, src);
        }
        void Ld1r(const VRegister& vt, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld1r(vt, src);
        }
        void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld2(vt, vt2, src);
        }
        void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
            const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld2(vt, vt2, lane, src);
        }
        void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld2r(vt, vt2, src);
        }
        void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld3(vt, vt2, vt3, src);
        }
        void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            int lane, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld3(vt, vt2, vt3, lane, src);
        }
        void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld3r(vt, vt2, vt3, src);
        }
        void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const VRegister& vt4, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld4(vt, vt2, vt3, vt4, src);
        }
        void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const VRegister& vt4, int lane, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld4(vt, vt2, vt3, vt4, lane, src);
        }
        void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const VRegister& vt4, const MemOperand& src)
        {
            DCHECK(allow_macro_instructions());
            ld4r(vt, vt2, vt3, vt4, src);
        }
        void St1(const VRegister& vt, const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st1(vt, dst);
        }
        void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st1(vt, vt2, dst);
        }
        void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st1(vt, vt2, vt3, dst);
        }
        void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const VRegister& vt4, const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st1(vt, vt2, vt3, vt4, dst);
        }
        void St1(const VRegister& vt, int lane, const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st1(vt, lane, dst);
        }
        void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st2(vt, vt2, dst);
        }
        void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st3(vt, vt2, vt3, dst);
        }
        void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const VRegister& vt4, const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st4(vt, vt2, vt3, vt4, dst);
        }
        void St2(const VRegister& vt, const VRegister& vt2, int lane,
            const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st2(vt, vt2, lane, dst);
        }
        void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            int lane, const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st3(vt, vt2, vt3, lane, dst);
        }
        void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
            const VRegister& vt4, int lane, const MemOperand& dst)
        {
            DCHECK(allow_macro_instructions());
            st4(vt, vt2, vt3, vt4, lane, dst);
        }
        void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm)
        {
            DCHECK(allow_macro_instructions());
            tbx(vd, vn, vm);
        }
        void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
            const VRegister& vm)
        {
            DCHECK(allow_macro_instructions());
            tbx(vd, vn, vn2, vm);
        }
        void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
            const VRegister& vn3, const VRegister& vm)
        {
            DCHECK(allow_macro_instructions());
            tbx(vd, vn, vn2, vn3, vm);
        }
        void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
            const VRegister& vn3, const VRegister& vn4, const VRegister& vm)
        {
            DCHECK(allow_macro_instructions());
            tbx(vd, vn, vn2, vn3, vn4, vm);
        }

        void LoadObject(Register result, Handle<Object> object);

        inline void PushSizeRegList(RegList registers, unsigned reg_size,
            CPURegister::RegisterType type = CPURegister::kRegister)
        {
            PushCPURegList(CPURegList(type, reg_size, registers));
        }
        inline void PopSizeRegList(RegList registers, unsigned reg_size,
            CPURegister::RegisterType type = CPURegister::kRegister)
        {
            PopCPURegList(CPURegList(type, reg_size, registers));
        }
        inline void PushXRegList(RegList regs)
        {
            PushSizeRegList(regs, kXRegSizeInBits);
        }
        inline void PopXRegList(RegList regs)
        {
            PopSizeRegList(regs, kXRegSizeInBits);
        }
        inline void PushWRegList(RegList regs)
        {
            PushSizeRegList(regs, kWRegSizeInBits);
        }
        inline void PopWRegList(RegList regs)
        {
            PopSizeRegList(regs, kWRegSizeInBits);
        }
        inline void PushDRegList(RegList regs)
        {
            PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
        }
        inline void PopDRegList(RegList regs)
        {
            PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
        }
        inline void PushSRegList(RegList regs)
        {
            PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
        }
        inline void PopSRegList(RegList regs)
        {
            PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
        }

        // Push the specified register 'count' times.
        void PushMultipleTimes(CPURegister src, Register count);

        // Sometimes callers need to push or pop multiple registers in a way that is
        // difficult to structure efficiently for fixed Push or Pop calls. This scope
        // allows push requests to be queued up, then flushed at once. The
        // MacroAssembler will try to generate the most efficient sequence required.
        //
        // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
        // register sizes and types.
        class PushPopQueue {
        public:
            explicit PushPopQueue(MacroAssembler* masm)
                : masm_(masm)
                , size_(0)
            {
            }

            ~PushPopQueue()
            {
                DCHECK(queued_.empty());
            }

            void Queue(const CPURegister& rt)
            {
                size_ += rt.SizeInBytes();
                queued_.push_back(rt);
            }

            void PushQueued();
            void PopQueued();

        private:
            MacroAssembler* masm_;
            int size_;
            std::vector<CPURegister> queued_;
        };

        // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
        // values peeked will be adjacent, with the value in 'dst2' being from a
        // higher address than 'dst1'. The offset is in bytes. The stack pointer must
        // be aligned to 16 bytes.
        void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);

        // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
        // register.
        inline void ClaimBySMI(const Register& count_smi,
            uint64_t unit_size = kXRegSize);
        inline void DropBySMI(const Register& count_smi,
            uint64_t unit_size = kXRegSize);

        // Compare a register with an operand, and branch to label depending on the
        // condition. May corrupt the status flags.
        inline void CompareAndBranch(const Register& lhs,
            const Operand& rhs,
            Condition cond,
            Label* label);

        // Insert one or more instructions into the instruction stream that encode
        // some caller-defined data. The instructions used will be executable with no
        // side effects.
        inline void InlineData(uint64_t data);

        // Insert an instrumentation enable marker into the instruction stream.
        inline void EnableInstrumentation();

        // Insert an instrumentation disable marker into the instruction stream.
        inline void DisableInstrumentation();

        // Insert an instrumentation event marker into the instruction stream. These
        // will be picked up by the instrumentation system to annotate an instruction
        // profile. The argument marker_name must be a printable two character string;
        // it will be encoded in the event marker.
        inline void AnnotateInstrumentation(const char* marker_name);

        // Preserve the callee-saved registers (as defined by AAPCS64).
        //
        // Higher-numbered registers are pushed before lower-numbered registers, and
        // thus get higher addresses.
        // Floating-point registers are pushed before general-purpose registers, and
        // thus get higher addresses.
        //
        // Note that registers are not checked for invalid values. Use this method
        // only if you know that the GC won't try to examine the values on the stack.
        void PushCalleeSavedRegisters();

        // Restore the callee-saved registers (as defined by AAPCS64).
        //
        // Higher-numbered registers are popped after lower-numbered registers, and
        // thus come from higher addresses.
        // Floating-point registers are popped after general-purpose registers, and
        // thus come from higher addresses.
        void PopCalleeSavedRegisters();

        // Helpers ------------------------------------------------------------------

        static int SafepointRegisterStackIndex(int reg_code);

        template <typename Field>
        void DecodeField(Register dst, Register src)
        {
            static const int shift = Field::kShift;
            static const int setbits = CountSetBits(Field::kMask, 32);
            Ubfx(dst, src, shift, setbits);
        }

        template <typename Field>
        void DecodeField(Register reg)
        {
            DecodeField<Field>(reg, reg);
        }

        // ---- SMI and Number Utilities ----

        inline void SmiTag(Register dst, Register src);
        inline void SmiTag(Register smi);

        inline void JumpIfNotSmi(Register value, Label* not_smi_label);
        inline void JumpIfBothSmi(Register value1, Register value2,
            Label* both_smi_label,
            Label* not_smi_label = nullptr);
        inline void JumpIfEitherSmi(Register value1, Register value2,
            Label* either_smi_label,
            Label* not_smi_label = nullptr);
        inline void JumpIfEitherNotSmi(Register value1,
            Register value2,
            Label* not_smi_label);
        inline void JumpIfBothNotSmi(Register value1,
            Register value2,
            Label* not_smi_label);

        // Abort execution if argument is a smi, enabled via --debug-code.
        void AssertNotSmi(Register object,
            AbortReason reason = AbortReason::kOperandIsASmi);

        inline void ObjectTag(Register tagged_obj, Register obj);
        inline void ObjectUntag(Register untagged_obj, Register obj);

        // Abort execution if argument is not a Constructor, enabled via --debug-code.
        void AssertConstructor(Register object);

        // Abort execution if argument is not a JSFunction, enabled via --debug-code.
        void AssertFunction(Register object);

        // Abort execution if argument is not a JSGeneratorObject (or subclass),
        // enabled via --debug-code.
        void AssertGeneratorObject(Register object);

        // Abort execution if argument is not a JSBoundFunction,
        // enabled via --debug-code.
        void AssertBoundFunction(Register object);

        // Abort execution if argument is not undefined or an AllocationSite, enabled
        // via --debug-code.
        void AssertUndefinedOrAllocationSite(Register object);

        // Try to represent a double as a signed 64-bit int.
        // This succeeds if the result compares equal to the input, so inputs of -0.0
        // are represented as 0 and handled as a success.
        //
        // On output the Z flag is set if the operation was successful.
        void TryRepresentDoubleAsInt64(Register as_int, VRegister value,
            VRegister scratch_d,
            Label* on_successful_conversion = nullptr,
            Label* on_failed_conversion = nullptr)
        {
            DCHECK(as_int.Is64Bits());
            TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
                on_failed_conversion);
        }

        // ---- Calling / Jumping helpers ----

        void CallRuntime(const Runtime::Function* f,
            int num_arguments,
            SaveFPRegsMode save_doubles = kDontSaveFPRegs);

        // Convenience function: Same as above, but takes the fid instead.
        void CallRuntime(Runtime::FunctionId fid, int num_arguments,
            SaveFPRegsMode save_doubles = kDontSaveFPRegs)
        {
            CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
        }

        // Convenience function: Same as above, but takes the fid instead.
        void CallRuntime(Runtime::FunctionId fid,
            SaveFPRegsMode save_doubles = kDontSaveFPRegs)
        {
            const Runtime::Function* function = Runtime::FunctionForId(fid);
            CallRuntime(function, function->nargs, save_doubles);
        }

        void TailCallRuntime(Runtime::FunctionId fid);

        // Jump to a runtime routine.
        void JumpToExternalReference(const ExternalReference& builtin,
            bool builtin_exit_frame = false);

        // Generates a trampoline to jump to the off-heap instruction stream.
        void JumpToInstructionStream(Address entry);

        // Registers used through the invocation chain are hard-coded.
        // We force passing the parameters to ensure the contracts are correctly
        // honoured by the caller.
        // 'function' must be x1.
        // 'actual' must use an immediate or x0.
        // 'expected' must use an immediate or x2.
        // 'call_kind' must be x5.
        void InvokePrologue(const ParameterCount& expected,
            const ParameterCount& actual, Label* done,
            InvokeFlag flag, bool* definitely_mismatches);

        // On function call, call into the debugger if necessary.
        void CheckDebugHook(Register fun, Register new_target,
            const ParameterCount& expected,
            const ParameterCount& actual);
        void InvokeFunctionCode(Register function, Register new_target,
            const ParameterCount& expected,
            const ParameterCount& actual, InvokeFlag flag);
        // Invoke the JavaScript function in the given register.
        // Changes the current context to the context in the function before invoking.
        void InvokeFunction(Register function, Register new_target,
            const ParameterCount& actual, InvokeFlag flag);
        void InvokeFunction(Register function, const ParameterCount& expected,
            const ParameterCount& actual, InvokeFlag flag);

        // ---- Code generation helpers ----

        // Frame restart support
        void MaybeDropFrames();

        // ---------------------------------------------------------------------------
        // Support functions.

        // Compare object type for heap object.  heap_object contains a non-Smi
        // whose object type should be compared with the given type.  This both
        // sets the flags and leaves the object type in the type_reg register.
        // It leaves the map in the map register (unless the type_reg and map register
        // are the same register).  It leaves the heap object in the heap_object
        // register unless the heap_object register is the same register as one of the
        // other registers.
        void CompareObjectType(Register heap_object,
            Register map,
            Register type_reg,
            InstanceType type);

        // Compare object type for heap object, and branch if equal (or not.)
        // heap_object contains a non-Smi whose object type should be compared with
        // the given type.  This both sets the flags and leaves the object type in
        // the type_reg register. It leaves the map in the map register (unless the
        // type_reg and map register are the same register).  It leaves the heap
        // object in the heap_object register unless the heap_object register is the
        // same register as one of the other registers.
        void JumpIfObjectType(Register object,
            Register map,
            Register type_reg,
            InstanceType type,
            Label* if_cond_pass,
            Condition cond = eq);

        // Compare instance type in a map.  map contains a valid map object whose
        // object type should be compared with the given type.  This both
        // sets the flags and leaves the object type in the type_reg register.
        void CompareInstanceType(Register map,
            Register type_reg,
            InstanceType type);

        // Load the elements kind field from a map, and return it in the result
        // register.
        void LoadElementsKindFromMap(Register result, Register map);

        // Compare the object in a register to a value from the root list.
        void CompareRoot(const Register& obj, RootIndex index);

        // Compare the object in a register to a value and jump if they are equal.
        void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal);

        // Compare the object in a register to a value and jump if they are not equal.
        void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);

        // Checks if value is in range [lower_limit, higher_limit] using a single
        // comparison.
        void JumpIfIsInRange(const Register& value, unsigned lower_limit,
            unsigned higher_limit, Label* on_in_range);

        // Compare the contents of a register with an operand, and branch to true,
        // false or fall through, depending on condition.
        void CompareAndSplit(const Register& lhs,
            const Operand& rhs,
            Condition cond,
            Label* if_true,
            Label* if_false,
            Label* fall_through);

        // Test the bits of register defined by bit_pattern, and branch to
        // if_any_set, if_all_clear or fall_through accordingly.
        void TestAndSplit(const Register& reg,
            uint64_t bit_pattern,
            Label* if_all_clear,
            Label* if_any_set,
            Label* fall_through);

        // ---------------------------------------------------------------------------
        // Frames.

        void ExitFramePreserveFPRegs();
        void ExitFrameRestoreFPRegs();

        // Enter exit frame. Exit frames are used when calling C code from generated
        // (JavaScript) code.
        //
        // The only registers modified by this function are the provided scratch
        // register, the frame pointer and the stack pointer.
        //
        // The 'extra_space' argument can be used to allocate some space in the exit
        // frame that will be ignored by the GC. This space will be reserved in the
        // bottom of the frame immediately above the return address slot.
        //
        // Set up a stack frame and registers as follows:
        //         fp[8]: CallerPC (lr)
        //   fp -> fp[0]: CallerFP (old fp)
        //         fp[-8]: SPOffset (new sp)
        //         fp[-16]: CodeObject()
        //         fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
        //         sp[8]: Memory reserved for the caller if extra_space != 0.
        //                 Alignment padding, if necessary.
        //   sp -> sp[0]: Space reserved for the return address.
        //
        // This function also stores the new frame information in the top frame, so
        // that the new frame becomes the current frame.
        void EnterExitFrame(bool save_doubles, const Register& scratch,
            int extra_space = 0,
            StackFrame::Type frame_type = StackFrame::EXIT);

        // Leave the current exit frame, after a C function has returned to generated
        // (JavaScript) code.
        //
        // This effectively unwinds the operation of EnterExitFrame:
        //  * Preserved doubles are restored (if restore_doubles is true).
        //  * The frame information is removed from the top frame.
        //  * The exit frame is dropped.
        void LeaveExitFrame(bool save_doubles, const Register& scratch,
            const Register& scratch2);

        // Load the global proxy from the current context.
        void LoadGlobalProxy(Register dst);

        // ---------------------------------------------------------------------------
        // In-place weak references.
        void LoadWeakValue(Register out, Register in, Label* target_if_cleared);

        // ---------------------------------------------------------------------------
        // StatsCounter support

        void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
            Register scratch2);
        void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
            Register scratch2);

        // ---------------------------------------------------------------------------
        // Garbage collector support (GC).

        // Push and pop the registers that can hold pointers, as defined by the
        // RegList constant kSafepointSavedRegisters.
        void PushSafepointRegisters();
        void PopSafepointRegisters();

        void CheckPageFlag(const Register& object, const Register& scratch, int mask,
            Condition cc, Label* condition_met);

        // Notify the garbage collector that we wrote a pointer into an object.
        // |object| is the object being stored into, |value| is the object being
        // stored.  value and scratch registers are clobbered by the operation.
        // The offset is the offset from the start of the object, not the offset from
        // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
        void RecordWriteField(
            Register object, int offset, Register value, Register scratch,
            LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
            RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
            SmiCheck smi_check = INLINE_SMI_CHECK);

        // For a given |object| notify the garbage collector that the slot |address|
        // has been written.  |value| is the object being stored. The value and
        // address registers are clobbered by the operation.
        void RecordWrite(
            Register object, Register address, Register value,
            LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
            RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
            SmiCheck smi_check = INLINE_SMI_CHECK);

        // ---------------------------------------------------------------------------
        // Debugging.

        void AssertRegisterIsRoot(
            Register reg, RootIndex index,
            AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);

        // Abort if the specified register contains the invalid color bit pattern.
        // The pattern must be in bits [1:0] of 'reg' register.
        //
        // If emit_debug_code() is false, this emits no code.
        void AssertHasValidColor(const Register& reg);

        void LoadNativeContextSlot(int index, Register dst);

        // Like printf, but print at run-time from generated code.
        //
        // The caller must ensure that arguments for floating-point placeholders
        // (such as %e, %f or %g) are VRegisters, and that arguments for integer
        // placeholders are Registers.
        //
        // Format placeholders that refer to more than one argument, or to a specific
        // argument, are not supported. This includes formats like "%1$d" or "%.*d".
        //
        // This function automatically preserves caller-saved registers so that
        // calling code can use Printf at any point without having to worry about
        // corruption. The preservation mechanism generates a lot of code. If this is
        // a problem, preserve the important registers manually and then call
        // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
        // implicitly preserved.
        void Printf(const char* format,
            CPURegister arg0 = NoCPUReg,
            CPURegister arg1 = NoCPUReg,
            CPURegister arg2 = NoCPUReg,
            CPURegister arg3 = NoCPUReg);

        // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
        //
        // The return code from the system printf call will be returned in x0.
        void PrintfNoPreserve(const char* format,
            const CPURegister& arg0 = NoCPUReg,
            const CPURegister& arg1 = NoCPUReg,
            const CPURegister& arg2 = NoCPUReg,
            const CPURegister& arg3 = NoCPUReg);

    private:
        // Try to represent a double as an int so that integer fast-paths may be
        // used. Not every valid integer value is guaranteed to be caught.
        // It supports both 32-bit and 64-bit integers depending whether 'as_int'
        // is a W or X register.
        //
        // This does not distinguish between +0 and -0, so if this distinction is
        // important it must be checked separately.
        //
        // On output the Z flag is set if the operation was successful.
        void TryRepresentDoubleAsInt(Register as_int, VRegister value,
            VRegister scratch_d,
            Label* on_successful_conversion = nullptr,
            Label* on_failed_conversion = nullptr);

    public:
        // Far branches resolving.
        //
        // The various classes of branch instructions with immediate offsets have
        // different ranges. While the Assembler will fail to assemble a branch
        // exceeding its range, the MacroAssembler offers a mechanism to resolve
        // branches to too distant targets, either by tweaking the generated code to
        // use branch instructions with wider ranges or generating veneers.
        //
        // Currently branches to distant targets are resolved using unconditional
        // branch isntructions with a range of +-128MB. If that becomes too little
        // (!), the mechanism can be extended to generate special veneers for really
        // far targets.

        DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
    };

    // Use this scope when you need a one-to-one mapping between methods and
    // instructions. This scope prevents the MacroAssembler from being called and
    // literal pools from being emitted. It also asserts the number of instructions
    // emitted is what you specified when creating the scope.
    class InstructionAccurateScope {
    public:
        explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
            : tasm_(tasm)
#ifdef DEBUG
            , size_(count * kInstrSize)
#endif
        {
            // Before blocking the const pool, see if it needs to be emitted.
            tasm_->CheckConstPool(false, true);
            tasm_->CheckVeneerPool(false, true);

            tasm_->StartBlockPools();
#ifdef DEBUG
            if (count != 0) {
                tasm_->bind(&start_);
            }
            previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
            tasm_->set_allow_macro_instructions(false);
#endif
        }

        ~InstructionAccurateScope()
        {
            tasm_->EndBlockPools();
#ifdef DEBUG
            if (start_.is_bound()) {
                DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
            }
            tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
#endif
        }

    private:
        TurboAssembler* tasm_;
#ifdef DEBUG
        size_t size_;
        Label start_;
        bool previous_allow_macro_instructions_;
#endif
    };

    // This scope utility allows scratch registers to be managed safely. The
    // TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
    // registers. These registers can be allocated on demand, and will be returned
    // at the end of the scope.
    //
    // When the scope ends, the MacroAssembler's lists will be restored to their
    // original state, even if the lists were modified by some other means. Note
    // that this scope can be nested but the destructors need to run in the opposite
    // order as the constructors. We do not have assertions for this.
    class V8_EXPORT_PRIVATE UseScratchRegisterScope {
    public:
        explicit UseScratchRegisterScope(TurboAssembler* tasm)
            : available_(tasm->TmpList())
            , availablefp_(tasm->FPTmpList())
            , old_available_(available_->list())
            , old_availablefp_(availablefp_->list())
        {
            DCHECK_EQ(available_->type(), CPURegister::kRegister);
            DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
        }

        V8_EXPORT_PRIVATE ~UseScratchRegisterScope();

        // Take a register from the appropriate temps list. It will be returned
        // automatically when the scope ends.
        Register AcquireW() { return AcquireNextAvailable(available_).W(); }
        Register AcquireX() { return AcquireNextAvailable(available_).X(); }
        VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
        VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
        VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
        VRegister AcquireV(VectorFormat format)
        {
            return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
        }

        Register AcquireSameSizeAs(const Register& reg);
        VRegister AcquireSameSizeAs(const VRegister& reg);

    private:
        V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
            CPURegList* available);

        // Available scratch registers.
        CPURegList* available_; // kRegister
        CPURegList* availablefp_; // kVRegister

        // The state of the available lists at the start of this scope.
        RegList old_available_; // kRegister
        RegList old_availablefp_; // kVRegister
    };

    MemOperand ContextMemOperand(Register context, int index = 0);
    MemOperand NativeContextMemOperand();

    // Encode and decode information about patchable inline SMI checks.
    class InlineSmiCheckInfo {
    public:
        explicit InlineSmiCheckInfo(Address info);

        bool HasSmiCheck() const { return smi_check_ != nullptr; }

        const Register& SmiRegister() const
        {
            return reg_;
        }

        Instruction* SmiCheck() const
        {
            return smi_check_;
        }

        int SmiCheckDelta() const { return smi_check_delta_; }

        // Use MacroAssembler::InlineData to emit information about patchable inline
        // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
        // indicate that there is no inline SMI check. Note that 'reg' cannot be sp.
        //
        // The generated patch information can be read using the InlineSMICheckInfo
        // class.
        static void Emit(MacroAssembler* masm, const Register& reg,
            const Label* smi_check);

        // Emit information to indicate that there is no inline SMI check.
        static void EmitNotInlined(MacroAssembler* masm)
        {
            Label unbound;
            Emit(masm, NoReg, &unbound);
        }

    private:
        Register reg_;
        int smi_check_delta_;
        Instruction* smi_check_;

        // Fields in the data encoded by InlineData.

        // A width of 5 (Rd_width) for the SMI register precludes the use of sp,
        // since kSPRegInternalCode is 63. However, sp should never hold a SMI or be
        // used in a patchable check. The Emit() method checks this.
        //
        // Note that the total size of the fields is restricted by the underlying
        // storage size handled by the BitField class, which is a uint32_t.
        class RegisterBits : public BitField<unsigned, 0, 5> {
        };
        class DeltaBits : public BitField<uint32_t, 5, 32 - 5> {
        };
    };

} // namespace internal
} // namespace v8

#define ACCESS_MASM(masm) masm->

#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
