// Copyright 2013 the V8 project authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#if V8_TARGET_ARCH_ARM64

#include "src/arm64/assembler-arm64.h"

#include "src/arm64/assembler-arm64-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
#include "src/string-constants.h"

namespace v8 {
namespace internal {

    // -----------------------------------------------------------------------------
    // CpuFeatures implementation.

    void CpuFeatures::ProbeImpl(bool cross_compile)
    {
        // AArch64 has no configuration options, no further probing is required.
        supported_ = 0;

        // Only use statically determined features for cross compile (snapshot).
        if (cross_compile)
            return;

        // We used to probe for coherent cache support, but on older CPUs it
        // causes crashes (crbug.com/524337), and newer CPUs don't even have
        // the feature any more.
    }

    void CpuFeatures::PrintTarget() { }
    void CpuFeatures::PrintFeatures() { }

    // -----------------------------------------------------------------------------
    // CPURegList utilities.

    CPURegister CPURegList::PopLowestIndex()
    {
        DCHECK(IsValid());
        if (IsEmpty()) {
            return NoCPUReg;
        }
        int index = CountTrailingZeros(list_, kRegListSizeInBits);
        DCHECK((1LL << index) & list_);
        Remove(index);
        return CPURegister::Create(index, size_, type_);
    }

    CPURegister CPURegList::PopHighestIndex()
    {
        DCHECK(IsValid());
        if (IsEmpty()) {
            return NoCPUReg;
        }
        int index = CountLeadingZeros(list_, kRegListSizeInBits);
        index = kRegListSizeInBits - 1 - index;
        DCHECK((1LL << index) & list_);
        Remove(index);
        return CPURegister::Create(index, size_, type_);
    }

    void CPURegList::RemoveCalleeSaved()
    {
        if (type() == CPURegister::kRegister) {
            Remove(GetCalleeSaved(RegisterSizeInBits()));
        } else if (type() == CPURegister::kVRegister) {
            Remove(GetCalleeSavedV(RegisterSizeInBits()));
        } else {
            DCHECK_EQ(type(), CPURegister::kNoRegister);
            DCHECK(IsEmpty());
            // The list must already be empty, so do nothing.
        }
    }

    CPURegList CPURegList::GetCalleeSaved(int size)
    {
        return CPURegList(CPURegister::kRegister, size, 19, 29);
    }

    CPURegList CPURegList::GetCalleeSavedV(int size)
    {
        return CPURegList(CPURegister::kVRegister, size, 8, 15);
    }

    CPURegList CPURegList::GetCallerSaved(int size)
    {
        // x18 is the platform register and is reserved for the use of platform ABIs.
        // Registers x0-x17 and lr (x30) are caller-saved.
        CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 17);
        list.Combine(lr);
        return list;
    }

    CPURegList CPURegList::GetCallerSavedV(int size)
    {
        // Registers d0-d7 and d16-d31 are caller-saved.
        CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
        list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
        return list;
    }

    // This function defines the list of registers which are associated with a
    // safepoint slot. Safepoint register slots are saved contiguously on the stack.
    // MacroAssembler::SafepointRegisterStackIndex handles mapping from register
    // code to index in the safepoint register slots. Any change here can affect
    // this mapping.
    CPURegList CPURegList::GetSafepointSavedRegisters()
    {
        CPURegList list = CPURegList::GetCalleeSaved();
        list.Combine(
            CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));

        // Note that unfortunately we can't use symbolic names for registers and have
        // to directly use register codes. This is because this function is used to
        // initialize some static variables and we can't rely on register variables
        // to be initialized due to static initialization order issues in C++.

        // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
        // preserved outside of the macro assembler.
        list.Remove(16);
        list.Remove(17);

        // x18 is the platform register and is reserved for the use of platform ABIs.

        // Add the link register (x30) to the safepoint list.
        list.Combine(30);

        return list;
    }

    // -----------------------------------------------------------------------------
    // Implementation of RelocInfo

    const int RelocInfo::kApplyMask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) | RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);

    bool RelocInfo::IsCodedSpecially()
    {
        // The deserializer needs to know whether a pointer is specially coded. Being
        // specially coded on ARM64 means that it is an immediate branch.
        Instruction* instr = reinterpret_cast<Instruction*>(pc_);
        if (instr->IsLdrLiteralX()) {
            return false;
        } else {
            DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
            return true;
        }
    }

    bool RelocInfo::IsInConstantPool()
    {
        Instruction* instr = reinterpret_cast<Instruction*>(pc_);
        return instr->IsLdrLiteralX();
    }

    uint32_t RelocInfo::wasm_call_tag() const
    {
        DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
        Instruction* instr = reinterpret_cast<Instruction*>(pc_);
        if (instr->IsLdrLiteralX()) {
            return static_cast<uint32_t>(
                Memory<Address>(Assembler::target_pointer_address_at(pc_)));
        } else {
            DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
            return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize);
        }
    }

    bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
        const CPURegister& reg3, const CPURegister& reg4,
        const CPURegister& reg5, const CPURegister& reg6,
        const CPURegister& reg7, const CPURegister& reg8)
    {
        int number_of_valid_regs = 0;
        int number_of_valid_fpregs = 0;

        RegList unique_regs = 0;
        RegList unique_fpregs = 0;

        const CPURegister regs[] = { reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8 };

        for (unsigned i = 0; i < arraysize(regs); i++) {
            if (regs[i].IsRegister()) {
                number_of_valid_regs++;
                unique_regs |= regs[i].bit();
            } else if (regs[i].IsVRegister()) {
                number_of_valid_fpregs++;
                unique_fpregs |= regs[i].bit();
            } else {
                DCHECK(!regs[i].IsValid());
            }
        }

        int number_of_unique_regs = CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
        int number_of_unique_fpregs = CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);

        DCHECK(number_of_valid_regs >= number_of_unique_regs);
        DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);

        return (number_of_valid_regs != number_of_unique_regs) || (number_of_valid_fpregs != number_of_unique_fpregs);
    }

    bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
        const CPURegister& reg3, const CPURegister& reg4,
        const CPURegister& reg5, const CPURegister& reg6,
        const CPURegister& reg7, const CPURegister& reg8)
    {
        DCHECK(reg1.IsValid());
        bool match = true;
        match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
        match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
        match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
        match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
        match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
        match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
        match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
        return match;
    }

    bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
        const VRegister& reg3, const VRegister& reg4)
    {
        DCHECK(reg1.IsValid());
        return (!reg2.IsValid() || reg2.IsSameFormat(reg1)) && (!reg3.IsValid() || reg3.IsSameFormat(reg1)) && (!reg4.IsValid() || reg4.IsSameFormat(reg1));
    }

    bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
        const VRegister& reg3, const VRegister& reg4)
    {
        DCHECK(reg1.IsValid());
        if (!reg2.IsValid()) {
            DCHECK(!reg3.IsValid() && !reg4.IsValid());
            return true;
        } else if (reg2.code() != ((reg1.code() + 1) % kNumberOfVRegisters)) {
            return false;
        }

        if (!reg3.IsValid()) {
            DCHECK(!reg4.IsValid());
            return true;
        } else if (reg3.code() != ((reg2.code() + 1) % kNumberOfVRegisters)) {
            return false;
        }

        if (!reg4.IsValid()) {
            return true;
        } else if (reg4.code() != ((reg3.code() + 1) % kNumberOfVRegisters)) {
            return false;
        }

        return true;
    }

    void Immediate::InitializeHandle(Handle<HeapObject> handle)
    {
        value_ = static_cast<intptr_t>(handle.address());
        rmode_ = RelocInfo::EMBEDDED_OBJECT;
    }

    bool Operand::NeedsRelocation(const Assembler* assembler) const
    {
        RelocInfo::Mode rmode = immediate_.rmode();

        if (RelocInfo::IsOnlyForSerializer(rmode)) {
            return assembler->options().record_reloc_info_for_serialization;
        }

        return !RelocInfo::IsNone(rmode);
    }

    bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
        int offset)
    {
        auto existing = entry_map.find(data);
        if (existing == entry_map.end()) {
            entry_map[data] = static_cast<int>(entries_.size());
            entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
            return true;
        }
        int index = existing->second;
        entries_[index].second.push_back(offset);
        return false;
    }

    // Constant Pool.
    bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode)
    {
        DCHECK(mode != RelocInfo::CONST_POOL && mode != RelocInfo::VENEER_POOL && mode != RelocInfo::DEOPT_SCRIPT_OFFSET && mode != RelocInfo::DEOPT_INLINING_ID && mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);

        bool write_reloc_info = true;

        uint64_t raw_data = static_cast<uint64_t>(data);
        int offset = assm_->pc_offset();
        if (IsEmpty()) {
            first_use_ = offset;
        }

        if (RelocInfo::IsShareableRelocMode(mode)) {
            write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
        } else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
            // A zero data value is a placeholder and must not be shared.
            write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
        } else {
            entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
        }

        if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
            // Request constant pool emission after the next instruction.
            assm_->SetNextConstPoolCheckIn(1);
        }

        return write_reloc_info;
    }

    int ConstPool::DistanceToFirstUse()
    {
        DCHECK_GE(first_use_, 0);
        return assm_->pc_offset() - first_use_;
    }

    int ConstPool::MaxPcOffset()
    {
        // There are no pending entries in the pool so we can never get out of
        // range.
        if (IsEmpty())
            return kMaxInt;

        // Entries are not necessarily emitted in the order they are added so in the
        // worst case the first constant pool use will be accessing the last entry.
        return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
    }

    int ConstPool::WorstCaseSize()
    {
        if (IsEmpty())
            return 0;

        // Max size prologue:
        //   b   over
        //   ldr xzr, #pool_size
        //   blr xzr
        //   nop
        // All entries are 64-bit for now.
        return 4 * kInstrSize + EntryCount() * kSystemPointerSize;
    }

    int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump)
    {
        if (IsEmpty())
            return 0;

        // Prologue is:
        //   b   over  ;; if require_jump
        //   ldr xzr, #pool_size
        //   blr xzr
        //   nop       ;; if not 64-bit aligned
        int prologue_size = require_jump ? kInstrSize : 0;
        prologue_size += 2 * kInstrSize;
        prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ? 0 : kInstrSize;

        // All entries are 64-bit for now.
        return prologue_size + EntryCount() * kSystemPointerSize;
    }

    void ConstPool::Emit(bool require_jump)
    {
        DCHECK(!assm_->is_const_pool_blocked());
        // Prevent recursive pool emission and protect from veneer pools.
        Assembler::BlockPoolsScope block_pools(assm_);

        int size = SizeIfEmittedAtCurrentPc(require_jump);
        Label size_check;
        assm_->bind(&size_check);

        assm_->RecordConstPool(size);
        // Emit the constant pool. It is preceded by an optional branch if
        // require_jump and a header which will:
        //  1) Encode the size of the constant pool, for use by the disassembler.
        //  2) Terminate the program, to try to prevent execution from accidentally
        //     flowing into the constant pool.
        //  3) align the pool entries to 64-bit.
        // The header is therefore made of up to three arm64 instructions:
        //   ldr xzr, #<size of the constant pool in 32-bit words>
        //   blr xzr
        //   nop
        //
        // If executed, the header will likely segfault and lr will point to the
        // instruction following the offending blr.
        // TODO(all): Make the alignment part less fragile. Currently code is
        // allocated as a byte array so there are no guarantees the alignment will
        // be preserved on compaction. Currently it works as allocation seems to be
        // 64-bit aligned.

        // Emit branch if required
        Label after_pool;
        if (require_jump) {
            assm_->b(&after_pool);
        }

        // Emit the header.
        assm_->RecordComment("[ Constant Pool");
        EmitMarker();
        EmitGuard();
        assm_->Align(8);

        // Emit constant pool entries.
        // TODO(all): currently each relocated constant is 64 bits, consider adding
        // support for 32-bit entries.
        EmitEntries();
        assm_->RecordComment("]");

        if (after_pool.is_linked()) {
            assm_->bind(&after_pool);
        }

        DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) == static_cast<unsigned>(size));
    }

    void ConstPool::Clear()
    {
        shared_entries_.clear();
        handle_to_index_map_.clear();
        entries_.clear();
        first_use_ = -1;
    }

    void ConstPool::EmitMarker()
    {
        // A constant pool size is expressed in number of 32-bits words.
        // Currently all entries are 64-bit.
        // + 1 is for the crash guard.
        // + 0/1 for alignment.
        int word_count = EntryCount() * 2 + 1 + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
        assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) | Assembler::Rt(xzr));
    }

    MemOperand::PairResult MemOperand::AreConsistentForPair(
        const MemOperand& operandA,
        const MemOperand& operandB,
        int access_size_log2)
    {
        DCHECK_GE(access_size_log2, 0);
        DCHECK_LE(access_size_log2, 3);
        // Step one: check that they share the same base, that the mode is Offset
        // and that the offset is a multiple of access size.
        if (!operandA.base().Is(operandB.base()) || (operandA.addrmode() != Offset) || (operandB.addrmode() != Offset) || ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
            return kNotPair;
        }
        // Step two: check that the offsets are contiguous and that the range
        // is OK for ldp/stp.
        if ((operandB.offset() == operandA.offset() + (1LL << access_size_log2)) && is_int7(operandA.offset() >> access_size_log2)) {
            return kPairAB;
        }
        if ((operandA.offset() == operandB.offset() + (1LL << access_size_log2)) && is_int7(operandB.offset() >> access_size_log2)) {
            return kPairBA;
        }
        return kNotPair;
    }

    void ConstPool::EmitGuard()
    {
#ifdef DEBUG
        Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
        DCHECK(instr->preceding()->IsLdrLiteralX() && instr->preceding()->Rt() == xzr.code());
#endif
        assm_->EmitPoolGuard();
    }

    void ConstPool::EmitEntries()
    {
        DCHECK(IsAligned(assm_->pc_offset(), 8));

        // Emit entries.
        for (const auto& entry : entries_) {
            for (const auto& pc : entry.second) {
                Instruction* instr = assm_->InstructionAt(pc);

                // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
                DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
                instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc());
            }

            assm_->dc64(entry.first);
        }
        Clear();
    }

    // Assembler
    Assembler::Assembler(const AssemblerOptions& options,
        std::unique_ptr<AssemblerBuffer> buffer)
        : AssemblerBase(options, std::move(buffer))
        , constpool_(this)
        , unresolved_branches_()
    {
        const_pool_blocked_nesting_ = 0;
        veneer_pool_blocked_nesting_ = 0;
        Reset();
    }

    Assembler::~Assembler()
    {
        DCHECK(constpool_.IsEmpty());
        DCHECK_EQ(const_pool_blocked_nesting_, 0);
        DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
    }

    void Assembler::AbortedCodeGeneration() { constpool_.Clear(); }

    void Assembler::Reset()
    {
#ifdef DEBUG
        DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size()));
        DCHECK_EQ(const_pool_blocked_nesting_, 0);
        DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
        DCHECK(unresolved_branches_.empty());
        memset(buffer_start_, 0, pc_ - buffer_start_);
#endif
        pc_ = buffer_start_;
        reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
        constpool_.Clear();
        next_constant_pool_check_ = 0;
        next_veneer_pool_check_ = kMaxInt;
        no_const_pool_before_ = 0;
    }

    void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate)
    {
        DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
        for (auto& request : heap_object_requests_) {
            Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
            switch (request.kind()) {
            case HeapObjectRequest::kHeapNumber: {
                Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
                    request.heap_number(), AllocationType::kOld);
                set_target_address_at(pc, 0 /* unused */, object.address());
                break;
            }
            case HeapObjectRequest::kStringConstant: {
                const StringConstantBase* str = request.string();
                CHECK_NOT_NULL(str);
                set_target_address_at(pc, 0 /* unused */,
                    str->AllocateStringConstant(isolate).address());
                break;
            }
            }
        }
    }

    void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
        SafepointTableBuilder* safepoint_table_builder,
        int handler_table_offset)
    {
        // Emit constant pool if necessary.
        CheckConstPool(true, false);
        DCHECK(constpool_.IsEmpty());

        int code_comments_size = WriteCodeComments();

        AllocateAndInstallRequestedHeapObjects(isolate);

        // Set up code descriptor.
        // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
        // this point to make CodeDesc initialization less fiddly.

        static constexpr int kConstantPoolSize = 0;
        const int instruction_size = pc_offset();
        const int code_comments_offset = instruction_size - code_comments_size;
        const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
        const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
            ? constant_pool_offset
            : handler_table_offset;
        const int safepoint_table_offset = (safepoint_table_builder == kNoSafepointTable)
            ? handler_table_offset2
            : safepoint_table_builder->GetCodeOffset();
        const int reloc_info_offset = static_cast<int>(reloc_info_writer.pos() - buffer_->start());
        CodeDesc::Initialize(desc, this, safepoint_table_offset,
            handler_table_offset2, constant_pool_offset,
            code_comments_offset, reloc_info_offset);
    }

    void Assembler::Align(int m)
    {
        DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
        while ((pc_offset() & (m - 1)) != 0) {
            nop();
        }
    }

    void Assembler::CodeTargetAlign()
    {
        // Preferred alignment of jump targets on some ARM chips.
        Align(8);
    }

    void Assembler::CheckLabelLinkChain(Label const* label)
    {
#ifdef DEBUG
        if (label->is_linked()) {
            static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
            int links_checked = 0;
            int64_t linkoffset = label->pos();
            bool end_of_chain = false;
            while (!end_of_chain) {
                if (++links_checked > kMaxLinksToCheck)
                    break;
                Instruction* link = InstructionAt(linkoffset);
                int64_t linkpcoffset = link->ImmPCOffset();
                int64_t prevlinkoffset = linkoffset + linkpcoffset;

                end_of_chain = (linkoffset == prevlinkoffset);
                linkoffset = linkoffset + linkpcoffset;
            }
        }
#endif
    }

    void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
        Label* label,
        Instruction* label_veneer)
    {
        DCHECK(label->is_linked());

        CheckLabelLinkChain(label);

        Instruction* link = InstructionAt(label->pos());
        Instruction* prev_link = link;
        Instruction* next_link;
        bool end_of_chain = false;

        while (link != branch && !end_of_chain) {
            next_link = link->ImmPCOffsetTarget();
            end_of_chain = (link == next_link);
            prev_link = link;
            link = next_link;
        }

        DCHECK(branch == link);
        next_link = branch->ImmPCOffsetTarget();

        if (branch == prev_link) {
            // The branch is the first instruction in the chain.
            if (branch == next_link) {
                // It is also the last instruction in the chain, so it is the only branch
                // currently referring to this label.
                label->Unuse();
            } else {
                label->link_to(
                    static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_start_));
            }

        } else if (branch == next_link) {
            // The branch is the last (but not also the first) instruction in the chain.
            prev_link->SetImmPCOffsetTarget(options(), prev_link);

        } else {
            // The branch is in the middle of the chain.
            if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
                prev_link->SetImmPCOffsetTarget(options(), next_link);
            } else if (label_veneer != nullptr) {
                // Use the veneer for all previous links in the chain.
                prev_link->SetImmPCOffsetTarget(options(), prev_link);

                end_of_chain = false;
                link = next_link;
                while (!end_of_chain) {
                    next_link = link->ImmPCOffsetTarget();
                    end_of_chain = (link == next_link);
                    link->SetImmPCOffsetTarget(options(), label_veneer);
                    link = next_link;
                }
            } else {
                // The assert below will fire.
                // Some other work could be attempted to fix up the chain, but it would be
                // rather complicated. If we crash here, we may want to consider using an
                // other mechanism than a chain of branches.
                //
                // Note that this situation currently should not happen, as we always call
                // this function with a veneer to the target label.
                // However this could happen with a MacroAssembler in the following state:
                //    [previous code]
                //    B(label);
                //    [20KB code]
                //    Tbz(label);   // First tbz. Pointing to unconditional branch.
                //    [20KB code]
                //    Tbz(label);   // Second tbz. Pointing to the first tbz.
                //    [more code]
                // and this function is called to remove the first tbz from the label link
                // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
                // the unconditional branch.
                CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
                UNREACHABLE();
            }
        }

        CheckLabelLinkChain(label);
    }

    void Assembler::bind(Label* label)
    {
        // Bind label to the address at pc_. All instructions (most likely branches)
        // that are linked to this label will be updated to point to the newly-bound
        // label.

        DCHECK(!label->is_near_linked());
        DCHECK(!label->is_bound());

        DeleteUnresolvedBranchInfoForLabel(label);

        // If the label is linked, the link chain looks something like this:
        //
        // |--I----I-------I-------L
        // |---------------------->| pc_offset
        // |-------------->|         linkoffset = label->pos()
        //         |<------|         link->ImmPCOffset()
        // |------>|                 prevlinkoffset = linkoffset + link->ImmPCOffset()
        //
        // On each iteration, the last link is updated and then removed from the
        // chain until only one remains. At that point, the label is bound.
        //
        // If the label is not linked, no preparation is required before binding.
        while (label->is_linked()) {
            int linkoffset = label->pos();
            Instruction* link = InstructionAt(linkoffset);
            int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset());

            CheckLabelLinkChain(label);

            DCHECK_GE(linkoffset, 0);
            DCHECK(linkoffset < pc_offset());
            DCHECK((linkoffset > prevlinkoffset) || (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
            DCHECK_GE(prevlinkoffset, 0);

            // Update the link to point to the label.
            if (link->IsUnresolvedInternalReference()) {
                // Internal references do not get patched to an instruction but directly
                // to an address.
                internal_reference_positions_.push_back(linkoffset);
                PatchingAssembler patcher(options(), reinterpret_cast<byte*>(link), 2);
                patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
            } else {
                link->SetImmPCOffsetTarget(options(),
                    reinterpret_cast<Instruction*>(pc_));
            }

            // Link the label to the previous link in the chain.
            if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
                // We hit kStartOfLabelLinkChain, so the chain is fully processed.
                label->Unuse();
            } else {
                // Update the label for the next iteration.
                label->link_to(prevlinkoffset);
            }
        }
        label->bind_to(pc_offset());

        DCHECK(label->is_bound());
        DCHECK(!label->is_linked());
    }

    int Assembler::LinkAndGetByteOffsetTo(Label* label)
    {
        DCHECK_EQ(sizeof(*pc_), 1);
        CheckLabelLinkChain(label);

        int offset;
        if (label->is_bound()) {
            // The label is bound, so it does not need to be updated. Referring
            // instructions must link directly to the label as they will not be
            // updated.
            //
            // In this case, label->pos() returns the offset of the label from the
            // start of the buffer.
            //
            // Note that offset can be zero for self-referential instructions. (This
            // could be useful for ADR, for example.)
            offset = label->pos() - pc_offset();
            DCHECK_LE(offset, 0);
        } else {
            if (label->is_linked()) {
                // The label is linked, so the referring instruction should be added onto
                // the end of the label's link chain.
                //
                // In this case, label->pos() returns the offset of the last linked
                // instruction from the start of the buffer.
                offset = label->pos() - pc_offset();
                DCHECK_NE(offset, kStartOfLabelLinkChain);
                // Note that the offset here needs to be PC-relative only so that the
                // first instruction in a buffer can link to an unbound label. Otherwise,
                // the offset would be 0 for this case, and 0 is reserved for
                // kStartOfLabelLinkChain.
            } else {
                // The label is unused, so it now becomes linked and the referring
                // instruction is at the start of the new link chain.
                offset = kStartOfLabelLinkChain;
            }
            // The instruction at pc is now the last link in the label's chain.
            label->link_to(pc_offset());
        }

        return offset;
    }

    void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label)
    {
        DCHECK(label->is_linked());
        CheckLabelLinkChain(label);

        int link_offset = label->pos();
        int link_pcoffset;
        bool end_of_chain = false;

        while (!end_of_chain) {
            Instruction* link = InstructionAt(link_offset);
            link_pcoffset = static_cast<int>(link->ImmPCOffset());

            // ADR instructions are not handled by veneers.
            if (link->IsImmBranch()) {
                int max_reachable_pc = static_cast<int>(InstructionOffset(link) + Instruction::ImmBranchRange(link->BranchType()));
                typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
                std::pair<unresolved_info_it, unresolved_info_it> range;
                range = unresolved_branches_.equal_range(max_reachable_pc);
                unresolved_info_it it;
                for (it = range.first; it != range.second; ++it) {
                    if (it->second.pc_offset_ == link_offset) {
                        unresolved_branches_.erase(it);
                        break;
                    }
                }
            }

            end_of_chain = (link_pcoffset == 0);
            link_offset = link_offset + link_pcoffset;
        }
    }

    void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label)
    {
        if (unresolved_branches_.empty()) {
            DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
            return;
        }

        if (label->is_linked()) {
            // Branches to this label will be resolved when the label is bound, normally
            // just after all the associated info has been deleted.
            DeleteUnresolvedBranchInfoForLabelTraverse(label);
        }
        if (unresolved_branches_.empty()) {
            next_veneer_pool_check_ = kMaxInt;
        } else {
            next_veneer_pool_check_ = unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
        }
    }

    void Assembler::StartBlockConstPool()
    {
        if (const_pool_blocked_nesting_++ == 0) {
            // Prevent constant pool checks happening by setting the next check to
            // the biggest possible offset.
            next_constant_pool_check_ = kMaxInt;
        }
    }

    void Assembler::EndBlockConstPool()
    {
        if (--const_pool_blocked_nesting_ == 0) {
            // Check the constant pool hasn't been blocked for too long.
            DCHECK(pc_offset() < constpool_.MaxPcOffset());
            // Two cases:
            //  * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
            //    still blocked
            //  * no_const_pool_before_ < next_constant_pool_check_ and the next emit
            //    will trigger a check.
            next_constant_pool_check_ = no_const_pool_before_;
        }
    }

    bool Assembler::is_const_pool_blocked() const
    {
        return (const_pool_blocked_nesting_ > 0) || (pc_offset() < no_const_pool_before_);
    }

    bool Assembler::IsConstantPoolAt(Instruction* instr)
    {
        // The constant pool marker is made of two instructions. These instructions
        // will never be emitted by the JIT, so checking for the first one is enough:
        // 0: ldr xzr, #<size of pool>
        bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode);

        // It is still worth asserting the marker is complete.
        // 4: blr xzr
        DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() && instr->following()->Rn() == kZeroRegCode));

        return result;
    }

    int Assembler::ConstantPoolSizeAt(Instruction* instr)
    {
#ifdef USE_SIMULATOR
        // Assembler::debug() embeds constants directly into the instruction stream.
        // Although this is not a genuine constant pool, treat it like one to avoid
        // disassembling the constants.
        if ((instr->Mask(ExceptionMask) == HLT) && (instr->ImmException() == kImmExceptionIsDebug)) {
            const char* message = reinterpret_cast<const char*>(
                instr->InstructionAtOffset(kDebugMessageOffset));
            int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
            return RoundUp(size, kInstrSize) / kInstrSize;
        }
        // Same for printf support, see MacroAssembler::CallPrintf().
        if ((instr->Mask(ExceptionMask) == HLT) && (instr->ImmException() == kImmExceptionIsPrintf)) {
            return kPrintfLength / kInstrSize;
        }
#endif
        if (IsConstantPoolAt(instr)) {
            return instr->ImmLLiteral();
        } else {
            return -1;
        }
    }

    void Assembler::EmitPoolGuard()
    {
        // We must generate only one instruction as this is used in scopes that
        // control the size of the code generated.
        Emit(BLR | Rn(xzr));
    }

    void Assembler::StartBlockVeneerPool()
    {
        ++veneer_pool_blocked_nesting_;
    }

    void Assembler::EndBlockVeneerPool()
    {
        if (--veneer_pool_blocked_nesting_ == 0) {
            // Check the veneer pool hasn't been blocked for too long.
            DCHECK(unresolved_branches_.empty() || (pc_offset() < unresolved_branches_first_limit()));
        }
    }

    void Assembler::br(const Register& xn)
    {
        DCHECK(xn.Is64Bits());
        Emit(BR | Rn(xn));
    }

    void Assembler::blr(const Register& xn)
    {
        DCHECK(xn.Is64Bits());
        // The pattern 'blr xzr' is used as a guard to detect when execution falls
        // through the constant pool. It should not be emitted.
        DCHECK(!xn.Is(xzr));
        Emit(BLR | Rn(xn));
    }

    void Assembler::ret(const Register& xn)
    {
        DCHECK(xn.Is64Bits());
        Emit(RET | Rn(xn));
    }

    void Assembler::b(int imm26)
    {
        Emit(B | ImmUncondBranch(imm26));
    }

    void Assembler::b(Label* label)
    {
        b(LinkAndGetInstructionOffsetTo(label));
    }

    void Assembler::b(int imm19, Condition cond)
    {
        Emit(B_cond | ImmCondBranch(imm19) | cond);
    }

    void Assembler::b(Label* label, Condition cond)
    {
        b(LinkAndGetInstructionOffsetTo(label), cond);
    }

    void Assembler::bl(int imm26)
    {
        Emit(BL | ImmUncondBranch(imm26));
    }

    void Assembler::bl(Label* label)
    {
        bl(LinkAndGetInstructionOffsetTo(label));
    }

    void Assembler::cbz(const Register& rt,
        int imm19)
    {
        Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
    }

    void Assembler::cbz(const Register& rt,
        Label* label)
    {
        cbz(rt, LinkAndGetInstructionOffsetTo(label));
    }

    void Assembler::cbnz(const Register& rt,
        int imm19)
    {
        Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
    }

    void Assembler::cbnz(const Register& rt,
        Label* label)
    {
        cbnz(rt, LinkAndGetInstructionOffsetTo(label));
    }

    void Assembler::tbz(const Register& rt,
        unsigned bit_pos,
        int imm14)
    {
        DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
        Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
    }

    void Assembler::tbz(const Register& rt,
        unsigned bit_pos,
        Label* label)
    {
        tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
    }

    void Assembler::tbnz(const Register& rt,
        unsigned bit_pos,
        int imm14)
    {
        DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
        Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
    }

    void Assembler::tbnz(const Register& rt,
        unsigned bit_pos,
        Label* label)
    {
        tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
    }

    void Assembler::adr(const Register& rd, int imm21)
    {
        DCHECK(rd.Is64Bits());
        Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
    }

    void Assembler::adr(const Register& rd, Label* label)
    {
        adr(rd, LinkAndGetByteOffsetTo(label));
    }

    void Assembler::nop(NopMarkerTypes n)
    {
        DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
        mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
    }

    void Assembler::add(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        AddSub(rd, rn, operand, LeaveFlags, ADD);
    }

    void Assembler::adds(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        AddSub(rd, rn, operand, SetFlags, ADD);
    }

    void Assembler::cmn(const Register& rn,
        const Operand& operand)
    {
        Register zr = AppropriateZeroRegFor(rn);
        adds(zr, rn, operand);
    }

    void Assembler::sub(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        AddSub(rd, rn, operand, LeaveFlags, SUB);
    }

    void Assembler::subs(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        AddSub(rd, rn, operand, SetFlags, SUB);
    }

    void Assembler::cmp(const Register& rn, const Operand& operand)
    {
        Register zr = AppropriateZeroRegFor(rn);
        subs(zr, rn, operand);
    }

    void Assembler::neg(const Register& rd, const Operand& operand)
    {
        Register zr = AppropriateZeroRegFor(rd);
        sub(rd, zr, operand);
    }

    void Assembler::negs(const Register& rd, const Operand& operand)
    {
        Register zr = AppropriateZeroRegFor(rd);
        subs(rd, zr, operand);
    }

    void Assembler::adc(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
    }

    void Assembler::adcs(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
    }

    void Assembler::sbc(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
    }

    void Assembler::sbcs(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
    }

    void Assembler::ngc(const Register& rd, const Operand& operand)
    {
        Register zr = AppropriateZeroRegFor(rd);
        sbc(rd, zr, operand);
    }

    void Assembler::ngcs(const Register& rd, const Operand& operand)
    {
        Register zr = AppropriateZeroRegFor(rd);
        sbcs(rd, zr, operand);
    }

    // Logical instructions.
    void Assembler::and_(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        Logical(rd, rn, operand, AND);
    }

    void Assembler::ands(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        Logical(rd, rn, operand, ANDS);
    }

    void Assembler::tst(const Register& rn,
        const Operand& operand)
    {
        ands(AppropriateZeroRegFor(rn), rn, operand);
    }

    void Assembler::bic(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        Logical(rd, rn, operand, BIC);
    }

    void Assembler::bics(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        Logical(rd, rn, operand, BICS);
    }

    void Assembler::orr(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        Logical(rd, rn, operand, ORR);
    }

    void Assembler::orn(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        Logical(rd, rn, operand, ORN);
    }

    void Assembler::eor(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        Logical(rd, rn, operand, EOR);
    }

    void Assembler::eon(const Register& rd,
        const Register& rn,
        const Operand& operand)
    {
        Logical(rd, rn, operand, EON);
    }

    void Assembler::lslv(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        DCHECK(rd.SizeInBits() == rm.SizeInBits());
        Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
    }

    void Assembler::lsrv(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        DCHECK(rd.SizeInBits() == rm.SizeInBits());
        Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
    }

    void Assembler::asrv(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        DCHECK(rd.SizeInBits() == rm.SizeInBits());
        Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
    }

    void Assembler::rorv(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        DCHECK(rd.SizeInBits() == rm.SizeInBits());
        Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
    }

    // Bitfield operations.
    void Assembler::bfm(const Register& rd, const Register& rn, int immr,
        int imms)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
        Emit(SF(rd) | BFM | N | ImmR(immr, rd.SizeInBits()) | ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd));
    }

    void Assembler::sbfm(const Register& rd, const Register& rn, int immr,
        int imms)
    {
        DCHECK(rd.Is64Bits() || rn.Is32Bits());
        Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
        Emit(SF(rd) | SBFM | N | ImmR(immr, rd.SizeInBits()) | ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd));
    }

    void Assembler::ubfm(const Register& rd, const Register& rn, int immr,
        int imms)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
        Emit(SF(rd) | UBFM | N | ImmR(immr, rd.SizeInBits()) | ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd));
    }

    void Assembler::extr(const Register& rd, const Register& rn, const Register& rm,
        int lsb)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        DCHECK(rd.SizeInBits() == rm.SizeInBits());
        Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
        Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
    }

    void Assembler::csel(const Register& rd,
        const Register& rn,
        const Register& rm,
        Condition cond)
    {
        ConditionalSelect(rd, rn, rm, cond, CSEL);
    }

    void Assembler::csinc(const Register& rd,
        const Register& rn,
        const Register& rm,
        Condition cond)
    {
        ConditionalSelect(rd, rn, rm, cond, CSINC);
    }

    void Assembler::csinv(const Register& rd,
        const Register& rn,
        const Register& rm,
        Condition cond)
    {
        ConditionalSelect(rd, rn, rm, cond, CSINV);
    }

    void Assembler::csneg(const Register& rd,
        const Register& rn,
        const Register& rm,
        Condition cond)
    {
        ConditionalSelect(rd, rn, rm, cond, CSNEG);
    }

    void Assembler::cset(const Register& rd, Condition cond)
    {
        DCHECK((cond != al) && (cond != nv));
        Register zr = AppropriateZeroRegFor(rd);
        csinc(rd, zr, zr, NegateCondition(cond));
    }

    void Assembler::csetm(const Register& rd, Condition cond)
    {
        DCHECK((cond != al) && (cond != nv));
        Register zr = AppropriateZeroRegFor(rd);
        csinv(rd, zr, zr, NegateCondition(cond));
    }

    void Assembler::cinc(const Register& rd, const Register& rn, Condition cond)
    {
        DCHECK((cond != al) && (cond != nv));
        csinc(rd, rn, rn, NegateCondition(cond));
    }

    void Assembler::cinv(const Register& rd, const Register& rn, Condition cond)
    {
        DCHECK((cond != al) && (cond != nv));
        csinv(rd, rn, rn, NegateCondition(cond));
    }

    void Assembler::cneg(const Register& rd, const Register& rn, Condition cond)
    {
        DCHECK((cond != al) && (cond != nv));
        csneg(rd, rn, rn, NegateCondition(cond));
    }

    void Assembler::ConditionalSelect(const Register& rd,
        const Register& rn,
        const Register& rm,
        Condition cond,
        ConditionalSelectOp op)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        DCHECK(rd.SizeInBits() == rm.SizeInBits());
        Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
    }

    void Assembler::ccmn(const Register& rn,
        const Operand& operand,
        StatusFlags nzcv,
        Condition cond)
    {
        ConditionalCompare(rn, operand, nzcv, cond, CCMN);
    }

    void Assembler::ccmp(const Register& rn,
        const Operand& operand,
        StatusFlags nzcv,
        Condition cond)
    {
        ConditionalCompare(rn, operand, nzcv, cond, CCMP);
    }

    void Assembler::DataProcessing3Source(const Register& rd,
        const Register& rn,
        const Register& rm,
        const Register& ra,
        DataProcessing3SourceOp op)
    {
        Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
    }

    void Assembler::mul(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(AreSameSizeAndType(rd, rn, rm));
        Register zr = AppropriateZeroRegFor(rn);
        DataProcessing3Source(rd, rn, rm, zr, MADD);
    }

    void Assembler::madd(const Register& rd,
        const Register& rn,
        const Register& rm,
        const Register& ra)
    {
        DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
        DataProcessing3Source(rd, rn, rm, ra, MADD);
    }

    void Assembler::mneg(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(AreSameSizeAndType(rd, rn, rm));
        Register zr = AppropriateZeroRegFor(rn);
        DataProcessing3Source(rd, rn, rm, zr, MSUB);
    }

    void Assembler::msub(const Register& rd,
        const Register& rn,
        const Register& rm,
        const Register& ra)
    {
        DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
        DataProcessing3Source(rd, rn, rm, ra, MSUB);
    }

    void Assembler::smaddl(const Register& rd,
        const Register& rn,
        const Register& rm,
        const Register& ra)
    {
        DCHECK(rd.Is64Bits() && ra.Is64Bits());
        DCHECK(rn.Is32Bits() && rm.Is32Bits());
        DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
    }

    void Assembler::smsubl(const Register& rd,
        const Register& rn,
        const Register& rm,
        const Register& ra)
    {
        DCHECK(rd.Is64Bits() && ra.Is64Bits());
        DCHECK(rn.Is32Bits() && rm.Is32Bits());
        DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
    }

    void Assembler::umaddl(const Register& rd,
        const Register& rn,
        const Register& rm,
        const Register& ra)
    {
        DCHECK(rd.Is64Bits() && ra.Is64Bits());
        DCHECK(rn.Is32Bits() && rm.Is32Bits());
        DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
    }

    void Assembler::umsubl(const Register& rd,
        const Register& rn,
        const Register& rm,
        const Register& ra)
    {
        DCHECK(rd.Is64Bits() && ra.Is64Bits());
        DCHECK(rn.Is32Bits() && rm.Is32Bits());
        DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
    }

    void Assembler::smull(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(rd.Is64Bits());
        DCHECK(rn.Is32Bits() && rm.Is32Bits());
        DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
    }

    void Assembler::smulh(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(AreSameSizeAndType(rd, rn, rm));
        DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
    }

    void Assembler::sdiv(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        DCHECK(rd.SizeInBits() == rm.SizeInBits());
        Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
    }

    void Assembler::udiv(const Register& rd,
        const Register& rn,
        const Register& rm)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        DCHECK(rd.SizeInBits() == rm.SizeInBits());
        Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
    }

    void Assembler::rbit(const Register& rd,
        const Register& rn)
    {
        DataProcessing1Source(rd, rn, RBIT);
    }

    void Assembler::rev16(const Register& rd,
        const Register& rn)
    {
        DataProcessing1Source(rd, rn, REV16);
    }

    void Assembler::rev32(const Register& rd,
        const Register& rn)
    {
        DCHECK(rd.Is64Bits());
        DataProcessing1Source(rd, rn, REV);
    }

    void Assembler::rev(const Register& rd,
        const Register& rn)
    {
        DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
    }

    void Assembler::clz(const Register& rd,
        const Register& rn)
    {
        DataProcessing1Source(rd, rn, CLZ);
    }

    void Assembler::cls(const Register& rd,
        const Register& rn)
    {
        DataProcessing1Source(rd, rn, CLS);
    }

    void Assembler::ldp(const CPURegister& rt,
        const CPURegister& rt2,
        const MemOperand& src)
    {
        LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
    }

    void Assembler::stp(const CPURegister& rt,
        const CPURegister& rt2,
        const MemOperand& dst)
    {
        LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
    }

    void Assembler::ldpsw(const Register& rt,
        const Register& rt2,
        const MemOperand& src)
    {
        DCHECK(rt.Is64Bits());
        LoadStorePair(rt, rt2, src, LDPSW_x);
    }

    void Assembler::LoadStorePair(const CPURegister& rt,
        const CPURegister& rt2,
        const MemOperand& addr,
        LoadStorePairOp op)
    {
        // 'rt' and 'rt2' can only be aliased for stores.
        DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
        DCHECK(AreSameSizeAndType(rt, rt2));
        DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
        int offset = static_cast<int>(addr.offset());

        Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, CalcLSPairDataSize(op));

        Instr addrmodeop;
        if (addr.IsImmediateOffset()) {
            addrmodeop = LoadStorePairOffsetFixed;
        } else {
            // Pre-index and post-index modes.
            DCHECK(!rt.Is(addr.base()));
            DCHECK(!rt2.Is(addr.base()));
            DCHECK_NE(addr.offset(), 0);
            if (addr.IsPreIndex()) {
                addrmodeop = LoadStorePairPreIndexFixed;
            } else {
                DCHECK(addr.IsPostIndex());
                addrmodeop = LoadStorePairPostIndexFixed;
            }
        }
        Emit(addrmodeop | memop);
    }

    // Memory instructions.
    void Assembler::ldrb(const Register& rt, const MemOperand& src)
    {
        LoadStore(rt, src, LDRB_w);
    }

    void Assembler::strb(const Register& rt, const MemOperand& dst)
    {
        LoadStore(rt, dst, STRB_w);
    }

    void Assembler::ldrsb(const Register& rt, const MemOperand& src)
    {
        LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
    }

    void Assembler::ldrh(const Register& rt, const MemOperand& src)
    {
        LoadStore(rt, src, LDRH_w);
    }

    void Assembler::strh(const Register& rt, const MemOperand& dst)
    {
        LoadStore(rt, dst, STRH_w);
    }

    void Assembler::ldrsh(const Register& rt, const MemOperand& src)
    {
        LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
    }

    void Assembler::ldr(const CPURegister& rt, const MemOperand& src)
    {
        LoadStore(rt, src, LoadOpFor(rt));
    }

    void Assembler::str(const CPURegister& rt, const MemOperand& src)
    {
        LoadStore(rt, src, StoreOpFor(rt));
    }

    void Assembler::ldrsw(const Register& rt, const MemOperand& src)
    {
        DCHECK(rt.Is64Bits());
        LoadStore(rt, src, LDRSW_x);
    }

    void Assembler::ldr_pcrel(const CPURegister& rt, int imm19)
    {
        // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
        // constant pool. It should not be emitted.
        DCHECK(!rt.IsZero());
        Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
    }

    Operand Operand::EmbeddedNumber(double number)
    {
        int32_t smi;
        if (DoubleToSmiInteger(number, &smi)) {
            return Operand(Immediate(Smi::FromInt(smi)));
        }
        Operand result(0, RelocInfo::EMBEDDED_OBJECT);
        result.heap_object_request_.emplace(number);
        DCHECK(result.IsHeapObjectRequest());
        return result;
    }

    Operand Operand::EmbeddedStringConstant(const StringConstantBase* str)
    {
        Operand result(0, RelocInfo::EMBEDDED_OBJECT);
        result.heap_object_request_.emplace(str);
        DCHECK(result.IsHeapObjectRequest());
        return result;
    }

    void Assembler::ldr(const CPURegister& rt, const Operand& operand)
    {
        if (operand.IsHeapObjectRequest()) {
            RequestHeapObject(operand.heap_object_request());
            ldr(rt, operand.immediate_for_heap_object_request());
        } else {
            ldr(rt, operand.immediate());
        }
    }

    void Assembler::ldr(const CPURegister& rt, const Immediate& imm)
    {
        // Currently we only support 64-bit literals.
        DCHECK(rt.Is64Bits());

        RecordRelocInfo(imm.rmode(), imm.value());
        BlockConstPoolFor(1);
        // The load will be patched when the constpool is emitted, patching code
        // expect a load literal with offset 0.
        ldr_pcrel(rt, 0);
    }

    void Assembler::ldar(const Register& rt, const Register& rn)
    {
        DCHECK(rn.Is64Bits());
        LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
        Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::ldaxr(const Register& rt, const Register& rn)
    {
        DCHECK(rn.Is64Bits());
        LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
        Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::stlr(const Register& rt, const Register& rn)
    {
        DCHECK(rn.Is64Bits());
        LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
        Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::stlxr(const Register& rs, const Register& rt,
        const Register& rn)
    {
        DCHECK(rn.Is64Bits());
        DCHECK(!rs.Is(rt) && !rs.Is(rn));
        LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
        Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::ldarb(const Register& rt, const Register& rn)
    {
        DCHECK(rt.Is32Bits());
        DCHECK(rn.Is64Bits());
        Emit(LDAR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::ldaxrb(const Register& rt, const Register& rn)
    {
        DCHECK(rt.Is32Bits());
        DCHECK(rn.Is64Bits());
        Emit(LDAXR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::stlrb(const Register& rt, const Register& rn)
    {
        DCHECK(rt.Is32Bits());
        DCHECK(rn.Is64Bits());
        Emit(STLR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::stlxrb(const Register& rs, const Register& rt,
        const Register& rn)
    {
        DCHECK(rs.Is32Bits());
        DCHECK(rt.Is32Bits());
        DCHECK(rn.Is64Bits());
        DCHECK(!rs.Is(rt) && !rs.Is(rn));
        Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::ldarh(const Register& rt, const Register& rn)
    {
        DCHECK(rt.Is32Bits());
        DCHECK(rn.Is64Bits());
        Emit(LDAR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::ldaxrh(const Register& rt, const Register& rn)
    {
        DCHECK(rt.Is32Bits());
        DCHECK(rn.Is64Bits());
        Emit(LDAXR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::stlrh(const Register& rt, const Register& rn)
    {
        DCHECK(rt.Is32Bits());
        DCHECK(rn.Is64Bits());
        Emit(STLR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::stlxrh(const Register& rs, const Register& rt,
        const Register& rn)
    {
        DCHECK(rs.Is32Bits());
        DCHECK(rt.Is32Bits());
        DCHECK(rn.Is64Bits());
        DCHECK(!rs.Is(rt) && !rs.Is(rn));
        Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
    }

    void Assembler::NEON3DifferentL(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, NEON3DifferentOp vop)
    {
        DCHECK(AreSameFormat(vn, vm));
        DCHECK((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) || (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
        Instr format, op = vop;
        if (vd.IsScalar()) {
            op |= NEON_Q | NEONScalar;
            format = SFormat(vn);
        } else {
            format = VFormat(vn);
        }
        Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
    }

    void Assembler::NEON3DifferentW(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, NEON3DifferentOp vop)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) || (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) || (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D()));
        Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
    }

    void Assembler::NEON3DifferentHN(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, NEON3DifferentOp vop)
    {
        DCHECK(AreSameFormat(vm, vn));
        DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
        Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
    }

#define NEON_3DIFF_LONG_LIST(V)                                                  \
    V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B())                             \
    V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B())                          \
    V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD())                              \
    V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ())                            \
    V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD())                              \
    V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ())                            \
    V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD())                              \
    V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ())                            \
    V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD())                              \
    V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ())                            \
    V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD())                              \
    V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ())                            \
    V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD())                              \
    V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ())                            \
    V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD())                              \
    V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ())                            \
    V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD())                              \
    V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ())                            \
    V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD())                              \
    V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ())                            \
    V(smull, NEON_SMULL, vn.IsVector() && vn.IsD())                              \
    V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ())                            \
    V(umull, NEON_UMULL, vn.IsVector() && vn.IsD())                              \
    V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ())                            \
    V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD())                              \
    V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ())                            \
    V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD())                              \
    V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ())                            \
    V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD())                              \
    V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ())                            \
    V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S())   \
    V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
    V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S())   \
    V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
    V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S())   \
    V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S())

#define DEFINE_ASM_FUNC(FN, OP, AS)                              \
    void Assembler::FN(const VRegister& vd, const VRegister& vn, \
        const VRegister& vm)                                     \
    {                                                            \
        DCHECK(AS);                                              \
        NEON3DifferentL(vd, vn, vm, OP);                         \
    }
    NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC

#define NEON_3DIFF_HN_LIST(V)          \
    V(addhn, NEON_ADDHN, vd.IsD())     \
    V(addhn2, NEON_ADDHN2, vd.IsQ())   \
    V(raddhn, NEON_RADDHN, vd.IsD())   \
    V(raddhn2, NEON_RADDHN2, vd.IsQ()) \
    V(subhn, NEON_SUBHN, vd.IsD())     \
    V(subhn2, NEON_SUBHN2, vd.IsQ())   \
    V(rsubhn, NEON_RSUBHN, vd.IsD())   \
    V(rsubhn2, NEON_RSUBHN2, vd.IsQ())

#define DEFINE_ASM_FUNC(FN, OP, AS)                              \
    void Assembler::FN(const VRegister& vd, const VRegister& vn, \
        const VRegister& vm)                                     \
    {                                                            \
        DCHECK(AS);                                              \
        NEON3DifferentHN(vd, vn, vm, OP);                        \
    }
    NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC

    void Assembler::NEONPerm(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, NEONPermOp op)
    {
        DCHECK(AreSameFormat(vd, vn, vm));
        DCHECK(!vd.Is1D());
        Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
    }

    void Assembler::trn1(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        NEONPerm(vd, vn, vm, NEON_TRN1);
    }

    void Assembler::trn2(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        NEONPerm(vd, vn, vm, NEON_TRN2);
    }

    void Assembler::uzp1(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        NEONPerm(vd, vn, vm, NEON_UZP1);
    }

    void Assembler::uzp2(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        NEONPerm(vd, vn, vm, NEON_UZP2);
    }

    void Assembler::zip1(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        NEONPerm(vd, vn, vm, NEON_ZIP1);
    }

    void Assembler::zip2(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        NEONPerm(vd, vn, vm, NEON_ZIP2);
    }

    void Assembler::NEONShiftImmediate(const VRegister& vd, const VRegister& vn,
        NEONShiftImmediateOp op, int immh_immb)
    {
        DCHECK(AreSameFormat(vd, vn));
        Instr q, scalar;
        if (vn.IsScalar()) {
            q = NEON_Q;
            scalar = NEONScalar;
        } else {
            q = vd.IsD() ? 0 : NEON_Q;
            scalar = 0;
        }
        Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
    }

    void Assembler::NEONShiftLeftImmediate(const VRegister& vd, const VRegister& vn,
        int shift, NEONShiftImmediateOp op)
    {
        int laneSizeInBits = vn.LaneSizeInBits();
        DCHECK((shift >= 0) && (shift < laneSizeInBits));
        NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
    }

    void Assembler::NEONShiftRightImmediate(const VRegister& vd,
        const VRegister& vn, int shift,
        NEONShiftImmediateOp op)
    {
        int laneSizeInBits = vn.LaneSizeInBits();
        DCHECK((shift >= 1) && (shift <= laneSizeInBits));
        NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
    }

    void Assembler::NEONShiftImmediateL(const VRegister& vd, const VRegister& vn,
        int shift, NEONShiftImmediateOp op)
    {
        int laneSizeInBits = vn.LaneSizeInBits();
        DCHECK((shift >= 0) && (shift < laneSizeInBits));
        int immh_immb = (laneSizeInBits + shift) << 16;

        DCHECK((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
        Instr q;
        q = vn.IsD() ? 0 : NEON_Q;
        Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
    }

    void Assembler::NEONShiftImmediateN(const VRegister& vd, const VRegister& vn,
        int shift, NEONShiftImmediateOp op)
    {
        Instr q, scalar;
        int laneSizeInBits = vd.LaneSizeInBits();
        DCHECK((shift >= 1) && (shift <= laneSizeInBits));
        int immh_immb = (2 * laneSizeInBits - shift) << 16;

        if (vn.IsScalar()) {
            DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || (vd.Is1S() && vn.Is1D()));
            q = NEON_Q;
            scalar = NEONScalar;
        } else {
            DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
            scalar = 0;
            q = vd.IsD() ? 0 : NEON_Q;
        }
        Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
    }

    void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
    }

    void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
    }

    void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift)
    {
        NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
    }

    void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift)
    {
        NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
    }

    void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift)
    {
        NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
    }

    void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsD());
        NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
    }

    void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsQ());
        NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
    }

    void Assembler::sxtl(const VRegister& vd, const VRegister& vn)
    {
        sshll(vd, vn, 0);
    }

    void Assembler::sxtl2(const VRegister& vd, const VRegister& vn)
    {
        sshll2(vd, vn, 0);
    }

    void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsD());
        NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
    }

    void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsQ());
        NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
    }

    void Assembler::uxtl(const VRegister& vd, const VRegister& vn)
    {
        ushll(vd, vn, 0);
    }

    void Assembler::uxtl2(const VRegister& vd, const VRegister& vn)
    {
        ushll2(vd, vn, 0);
    }

    void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
    }

    void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
    }

    void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
    }

    void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
    }

    void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
    }

    void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
    }

    void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
    }

    void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
    }

    void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
    }

    void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsD());
        NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
    }

    void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsQ());
        NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
    }

    void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsD());
        NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
    }

    void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsQ());
        NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
    }

    void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
        NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
    }

    void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsQ());
        NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
    }

    void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
        NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
    }

    void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsQ());
        NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
    }

    void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
        NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
    }

    void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsQ());
        NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
    }

    void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
        NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
    }

    void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsQ());
        NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
    }

    void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
        NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
    }

    void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsQ());
        NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
    }

    void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
        NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
    }

    void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK(vn.IsVector() && vd.IsQ());
        NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
    }

    void Assembler::uaddw(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        DCHECK(vm.IsD());
        NEON3DifferentW(vd, vn, vm, NEON_UADDW);
    }

    void Assembler::uaddw2(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        DCHECK(vm.IsQ());
        NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
    }

    void Assembler::saddw(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        DCHECK(vm.IsD());
        NEON3DifferentW(vd, vn, vm, NEON_SADDW);
    }

    void Assembler::saddw2(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        DCHECK(vm.IsQ());
        NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
    }

    void Assembler::usubw(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        DCHECK(vm.IsD());
        NEON3DifferentW(vd, vn, vm, NEON_USUBW);
    }

    void Assembler::usubw2(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        DCHECK(vm.IsQ());
        NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
    }

    void Assembler::ssubw(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        DCHECK(vm.IsD());
        NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
    }

    void Assembler::ssubw2(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        DCHECK(vm.IsQ());
        NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
    }

    void Assembler::mov(const Register& rd, const Register& rm)
    {
        // Moves involving the stack pointer are encoded as add immediate with
        // second operand of zero. Otherwise, orr with first operand zr is
        // used.
        if (rd.IsSP() || rm.IsSP()) {
            add(rd, rm, 0);
        } else {
            orr(rd, AppropriateZeroRegFor(rd), rm);
        }
    }

    void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn)
    {
        // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
        // number of lanes, and T is b, h, s or d.
        int lane_size = vd.LaneSizeInBytes();
        NEONFormatField format;
        switch (lane_size) {
        case 1:
            format = NEON_16B;
            DCHECK(rn.IsW());
            break;
        case 2:
            format = NEON_8H;
            DCHECK(rn.IsW());
            break;
        case 4:
            format = NEON_4S;
            DCHECK(rn.IsW());
            break;
        default:
            DCHECK_EQ(lane_size, 8);
            DCHECK(rn.IsX());
            format = NEON_2D;
            break;
        }

        DCHECK((0 <= vd_index) && (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
        Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
    }

    void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index)
    {
        DCHECK_GE(vn.SizeInBytes(), 4);
        umov(rd, vn, vn_index);
    }

    void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index)
    {
        // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
        // number of lanes, and T is b, h, s.
        int lane_size = vn.LaneSizeInBytes();
        NEONFormatField format;
        Instr q = 0;
        switch (lane_size) {
        case 1:
            format = NEON_16B;
            break;
        case 2:
            format = NEON_8H;
            break;
        default:
            DCHECK_EQ(lane_size, 4);
            DCHECK(rd.IsX());
            format = NEON_4S;
            break;
        }
        q = rd.IsW() ? 0 : NEON_Q;
        DCHECK((0 <= vn_index) && (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
        Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
    }

    void Assembler::cls(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(!vd.Is1D() && !vd.Is2D());
        Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
    }

    void Assembler::clz(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(!vd.Is1D() && !vd.Is2D());
        Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
    }

    void Assembler::cnt(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(vd.Is8B() || vd.Is16B());
        Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
    }

    void Assembler::rev16(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(vd.Is8B() || vd.Is16B());
        Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
    }

    void Assembler::rev32(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
        Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
    }

    void Assembler::rev64(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(!vd.Is1D() && !vd.Is2D());
        Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
    }

    void Assembler::ursqrte(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(vd.Is2S() || vd.Is4S());
        Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
    }

    void Assembler::urecpe(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(vd.Is2S() || vd.Is4S());
        Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
    }

    void Assembler::NEONAddlp(const VRegister& vd, const VRegister& vn,
        NEON2RegMiscOp op)
    {
        DCHECK((op == NEON_SADDLP) || (op == NEON_UADDLP) || (op == NEON_SADALP) || (op == NEON_UADALP));

        DCHECK((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) || (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) || (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D()));
        Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
    }

    void Assembler::saddlp(const VRegister& vd, const VRegister& vn)
    {
        NEONAddlp(vd, vn, NEON_SADDLP);
    }

    void Assembler::uaddlp(const VRegister& vd, const VRegister& vn)
    {
        NEONAddlp(vd, vn, NEON_UADDLP);
    }

    void Assembler::sadalp(const VRegister& vd, const VRegister& vn)
    {
        NEONAddlp(vd, vn, NEON_SADALP);
    }

    void Assembler::uadalp(const VRegister& vd, const VRegister& vn)
    {
        NEONAddlp(vd, vn, NEON_UADALP);
    }

    void Assembler::NEONAcrossLanesL(const VRegister& vd, const VRegister& vn,
        NEONAcrossLanesOp op)
    {
        DCHECK((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) || (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) || (vn.Is4S() && vd.Is1D()));
        Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
    }

    void Assembler::saddlv(const VRegister& vd, const VRegister& vn)
    {
        NEONAcrossLanesL(vd, vn, NEON_SADDLV);
    }

    void Assembler::uaddlv(const VRegister& vd, const VRegister& vn)
    {
        NEONAcrossLanesL(vd, vn, NEON_UADDLV);
    }

    void Assembler::NEONAcrossLanes(const VRegister& vd, const VRegister& vn,
        NEONAcrossLanesOp op)
    {
        DCHECK((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) || (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) || (vn.Is4S() && vd.Is1S()));
        if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
            Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
        } else {
            Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
        }
    }

#define NEON_ACROSSLANES_LIST(V)        \
    V(fmaxv, NEON_FMAXV, vd.Is1S())     \
    V(fminv, NEON_FMINV, vd.Is1S())     \
    V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
    V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
    V(addv, NEON_ADDV, true)            \
    V(smaxv, NEON_SMAXV, true)          \
    V(sminv, NEON_SMINV, true)          \
    V(umaxv, NEON_UMAXV, true)          \
    V(uminv, NEON_UMINV, true)

#define DEFINE_ASM_FUNC(FN, OP, AS)                              \
    void Assembler::FN(const VRegister& vd, const VRegister& vn) \
    {                                                            \
        DCHECK(AS);                                              \
        NEONAcrossLanes(vd, vn, OP);                             \
    }
    NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC

    void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn)
    {
        ins(vd, vd_index, rn);
    }

    void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index)
    {
        // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
        // number of lanes, and T is b, h, s or d.
        int lane_size = vn.LaneSizeInBytes();
        NEONFormatField format;
        Instr q = 0;
        switch (lane_size) {
        case 1:
            format = NEON_16B;
            DCHECK(rd.IsW());
            break;
        case 2:
            format = NEON_8H;
            DCHECK(rd.IsW());
            break;
        case 4:
            format = NEON_4S;
            DCHECK(rd.IsW());
            break;
        default:
            DCHECK_EQ(lane_size, 8);
            DCHECK(rd.IsX());
            format = NEON_2D;
            q = NEON_Q;
            break;
        }

        DCHECK((0 <= vn_index) && (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
        Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
    }

    void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index)
    {
        DCHECK(vd.IsScalar());
        dup(vd, vn, vn_index);
    }

    void Assembler::dup(const VRegister& vd, const Register& rn)
    {
        DCHECK(!vd.Is1D());
        DCHECK_EQ(vd.Is2D(), rn.IsX());
        Instr q = vd.IsD() ? 0 : NEON_Q;
        Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
    }

    void Assembler::ins(const VRegister& vd, int vd_index, const VRegister& vn,
        int vn_index)
    {
        DCHECK(AreSameFormat(vd, vn));
        // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
        // number of lanes, and T is b, h, s or d.
        int lane_size = vd.LaneSizeInBytes();
        NEONFormatField format;
        switch (lane_size) {
        case 1:
            format = NEON_16B;
            break;
        case 2:
            format = NEON_8H;
            break;
        case 4:
            format = NEON_4S;
            break;
        default:
            DCHECK_EQ(lane_size, 8);
            format = NEON_2D;
            break;
        }

        DCHECK((0 <= vd_index) && (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
        DCHECK((0 <= vn_index) && (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
        Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) | ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
    }

    void Assembler::NEONTable(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, NEONTableOp op)
    {
        DCHECK(vd.Is16B() || vd.Is8B());
        DCHECK(vn.Is16B());
        DCHECK(AreSameFormat(vd, vm));
        Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
    }

    void Assembler::tbl(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        NEONTable(vd, vn, vm, NEON_TBL_1v);
    }

    void Assembler::tbl(const VRegister& vd, const VRegister& vn,
        const VRegister& vn2, const VRegister& vm)
    {
        USE(vn2);
        DCHECK(AreSameFormat(vn, vn2));
        DCHECK(AreConsecutive(vn, vn2));
        NEONTable(vd, vn, vm, NEON_TBL_2v);
    }

    void Assembler::tbl(const VRegister& vd, const VRegister& vn,
        const VRegister& vn2, const VRegister& vn3,
        const VRegister& vm)
    {
        USE(vn2);
        USE(vn3);
        DCHECK(AreSameFormat(vn, vn2, vn3));
        DCHECK(AreConsecutive(vn, vn2, vn3));
        NEONTable(vd, vn, vm, NEON_TBL_3v);
    }

    void Assembler::tbl(const VRegister& vd, const VRegister& vn,
        const VRegister& vn2, const VRegister& vn3,
        const VRegister& vn4, const VRegister& vm)
    {
        USE(vn2);
        USE(vn3);
        USE(vn4);
        DCHECK(AreSameFormat(vn, vn2, vn3, vn4));
        DCHECK(AreConsecutive(vn, vn2, vn3, vn4));
        NEONTable(vd, vn, vm, NEON_TBL_4v);
    }

    void Assembler::tbx(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        NEONTable(vd, vn, vm, NEON_TBX_1v);
    }

    void Assembler::tbx(const VRegister& vd, const VRegister& vn,
        const VRegister& vn2, const VRegister& vm)
    {
        USE(vn2);
        DCHECK(AreSameFormat(vn, vn2));
        DCHECK(AreConsecutive(vn, vn2));
        NEONTable(vd, vn, vm, NEON_TBX_2v);
    }

    void Assembler::tbx(const VRegister& vd, const VRegister& vn,
        const VRegister& vn2, const VRegister& vn3,
        const VRegister& vm)
    {
        USE(vn2);
        USE(vn3);
        DCHECK(AreSameFormat(vn, vn2, vn3));
        DCHECK(AreConsecutive(vn, vn2, vn3));
        NEONTable(vd, vn, vm, NEON_TBX_3v);
    }

    void Assembler::tbx(const VRegister& vd, const VRegister& vn,
        const VRegister& vn2, const VRegister& vn3,
        const VRegister& vn4, const VRegister& vm)
    {
        USE(vn2);
        USE(vn3);
        USE(vn4);
        DCHECK(AreSameFormat(vn, vn2, vn3, vn4));
        DCHECK(AreConsecutive(vn, vn2, vn3, vn4));
        NEONTable(vd, vn, vm, NEON_TBX_4v);
    }

    void Assembler::mov(const VRegister& vd, int vd_index, const VRegister& vn,
        int vn_index)
    {
        ins(vd, vd_index, vn, vn_index);
    }

    void Assembler::mvn(const Register& rd, const Operand& operand)
    {
        orn(rd, AppropriateZeroRegFor(rd), operand);
    }

    void Assembler::mrs(const Register& rt, SystemRegister sysreg)
    {
        DCHECK(rt.Is64Bits());
        Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
    }

    void Assembler::msr(SystemRegister sysreg, const Register& rt)
    {
        DCHECK(rt.Is64Bits());
        Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
    }

    void Assembler::hint(SystemHint code) { Emit(HINT | ImmHint(code) | Rt(xzr)); }

    // NEON structure loads and stores.
    Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr)
    {
        Instr addr_field = RnSP(addr.base());

        if (addr.IsPostIndex()) {
            static_assert(NEONLoadStoreMultiStructPostIndex == static_cast<NEONLoadStoreMultiStructPostIndexOp>(NEONLoadStoreSingleStructPostIndex),
                "Opcodes must match for NEON post index memop.");

            addr_field |= NEONLoadStoreMultiStructPostIndex;
            if (addr.offset() == 0) {
                addr_field |= RmNot31(addr.regoffset());
            } else {
                // The immediate post index addressing mode is indicated by rm = 31.
                // The immediate is implied by the number of vector registers used.
                addr_field |= (0x1F << Rm_offset);
            }
        } else {
            DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0));
        }
        return addr_field;
    }

    void Assembler::LoadStoreStructVerify(const VRegister& vt,
        const MemOperand& addr, Instr op)
    {
#ifdef DEBUG
        // Assert that addressing mode is either offset (with immediate 0), post
        // index by immediate of the size of the register list, or post index by a
        // value in a core register.
        if (addr.IsImmediateOffset()) {
            DCHECK_EQ(addr.offset(), 0);
        } else {
            int offset = vt.SizeInBytes();
            switch (op) {
            case NEON_LD1_1v:
            case NEON_ST1_1v:
                offset *= 1;
                break;
            case NEONLoadStoreSingleStructLoad1:
            case NEONLoadStoreSingleStructStore1:
            case NEON_LD1R:
                offset = (offset / vt.LaneCount()) * 1;
                break;

            case NEON_LD1_2v:
            case NEON_ST1_2v:
            case NEON_LD2:
            case NEON_ST2:
                offset *= 2;
                break;
            case NEONLoadStoreSingleStructLoad2:
            case NEONLoadStoreSingleStructStore2:
            case NEON_LD2R:
                offset = (offset / vt.LaneCount()) * 2;
                break;

            case NEON_LD1_3v:
            case NEON_ST1_3v:
            case NEON_LD3:
            case NEON_ST3:
                offset *= 3;
                break;
            case NEONLoadStoreSingleStructLoad3:
            case NEONLoadStoreSingleStructStore3:
            case NEON_LD3R:
                offset = (offset / vt.LaneCount()) * 3;
                break;

            case NEON_LD1_4v:
            case NEON_ST1_4v:
            case NEON_LD4:
            case NEON_ST4:
                offset *= 4;
                break;
            case NEONLoadStoreSingleStructLoad4:
            case NEONLoadStoreSingleStructStore4:
            case NEON_LD4R:
                offset = (offset / vt.LaneCount()) * 4;
                break;
            default:
                UNREACHABLE();
            }
            DCHECK(!addr.regoffset().Is(NoReg) || addr.offset() == offset);
        }
#else
        USE(vt);
        USE(addr);
        USE(op);
#endif
    }

    void Assembler::LoadStoreStruct(const VRegister& vt, const MemOperand& addr,
        NEONLoadStoreMultiStructOp op)
    {
        LoadStoreStructVerify(vt, addr, op);
        DCHECK(vt.IsVector() || vt.Is1D());
        Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
    }

    void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt,
        const MemOperand& addr,
        NEONLoadStoreSingleStructOp op)
    {
        LoadStoreStructVerify(vt, addr, op);
        Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
    }

    void Assembler::ld1(const VRegister& vt, const MemOperand& src)
    {
        LoadStoreStruct(vt, src, NEON_LD1_1v);
    }

    void Assembler::ld1(const VRegister& vt, const VRegister& vt2,
        const MemOperand& src)
    {
        USE(vt2);
        DCHECK(AreSameFormat(vt, vt2));
        DCHECK(AreConsecutive(vt, vt2));
        LoadStoreStruct(vt, src, NEON_LD1_2v);
    }

    void Assembler::ld1(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        DCHECK(AreSameFormat(vt, vt2, vt3));
        DCHECK(AreConsecutive(vt, vt2, vt3));
        LoadStoreStruct(vt, src, NEON_LD1_3v);
    }

    void Assembler::ld1(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const VRegister& vt4,
        const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        USE(vt4);
        DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
        DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
        LoadStoreStruct(vt, src, NEON_LD1_4v);
    }

    void Assembler::ld2(const VRegister& vt, const VRegister& vt2,
        const MemOperand& src)
    {
        USE(vt2);
        DCHECK(AreSameFormat(vt, vt2));
        DCHECK(AreConsecutive(vt, vt2));
        LoadStoreStruct(vt, src, NEON_LD2);
    }

    void Assembler::ld2(const VRegister& vt, const VRegister& vt2, int lane,
        const MemOperand& src)
    {
        USE(vt2);
        DCHECK(AreSameFormat(vt, vt2));
        DCHECK(AreConsecutive(vt, vt2));
        LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
    }

    void Assembler::ld2r(const VRegister& vt, const VRegister& vt2,
        const MemOperand& src)
    {
        USE(vt2);
        DCHECK(AreSameFormat(vt, vt2));
        DCHECK(AreConsecutive(vt, vt2));
        LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
    }

    void Assembler::ld3(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        DCHECK(AreSameFormat(vt, vt2, vt3));
        DCHECK(AreConsecutive(vt, vt2, vt3));
        LoadStoreStruct(vt, src, NEON_LD3);
    }

    void Assembler::ld3(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, int lane, const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        DCHECK(AreSameFormat(vt, vt2, vt3));
        DCHECK(AreConsecutive(vt, vt2, vt3));
        LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
    }

    void Assembler::ld3r(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        DCHECK(AreSameFormat(vt, vt2, vt3));
        DCHECK(AreConsecutive(vt, vt2, vt3));
        LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
    }

    void Assembler::ld4(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const VRegister& vt4,
        const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        USE(vt4);
        DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
        DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
        LoadStoreStruct(vt, src, NEON_LD4);
    }

    void Assembler::ld4(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const VRegister& vt4, int lane,
        const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        USE(vt4);
        DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
        DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
        LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
    }

    void Assembler::ld4r(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const VRegister& vt4,
        const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        USE(vt4);
        DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
        DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
        LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
    }

    void Assembler::st1(const VRegister& vt, const MemOperand& src)
    {
        LoadStoreStruct(vt, src, NEON_ST1_1v);
    }

    void Assembler::st1(const VRegister& vt, const VRegister& vt2,
        const MemOperand& src)
    {
        USE(vt2);
        DCHECK(AreSameFormat(vt, vt2));
        DCHECK(AreConsecutive(vt, vt2));
        LoadStoreStruct(vt, src, NEON_ST1_2v);
    }

    void Assembler::st1(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        DCHECK(AreSameFormat(vt, vt2, vt3));
        DCHECK(AreConsecutive(vt, vt2, vt3));
        LoadStoreStruct(vt, src, NEON_ST1_3v);
    }

    void Assembler::st1(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const VRegister& vt4,
        const MemOperand& src)
    {
        USE(vt2);
        USE(vt3);
        USE(vt4);
        DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
        DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
        LoadStoreStruct(vt, src, NEON_ST1_4v);
    }

    void Assembler::st2(const VRegister& vt, const VRegister& vt2,
        const MemOperand& dst)
    {
        USE(vt2);
        DCHECK(AreSameFormat(vt, vt2));
        DCHECK(AreConsecutive(vt, vt2));
        LoadStoreStruct(vt, dst, NEON_ST2);
    }

    void Assembler::st2(const VRegister& vt, const VRegister& vt2, int lane,
        const MemOperand& dst)
    {
        USE(vt2);
        DCHECK(AreSameFormat(vt, vt2));
        DCHECK(AreConsecutive(vt, vt2));
        LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
    }

    void Assembler::st3(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const MemOperand& dst)
    {
        USE(vt2);
        USE(vt3);
        DCHECK(AreSameFormat(vt, vt2, vt3));
        DCHECK(AreConsecutive(vt, vt2, vt3));
        LoadStoreStruct(vt, dst, NEON_ST3);
    }

    void Assembler::st3(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, int lane, const MemOperand& dst)
    {
        USE(vt2);
        USE(vt3);
        DCHECK(AreSameFormat(vt, vt2, vt3));
        DCHECK(AreConsecutive(vt, vt2, vt3));
        LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
    }

    void Assembler::st4(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const VRegister& vt4,
        const MemOperand& dst)
    {
        USE(vt2);
        USE(vt3);
        USE(vt4);
        DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
        DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
        LoadStoreStruct(vt, dst, NEON_ST4);
    }

    void Assembler::st4(const VRegister& vt, const VRegister& vt2,
        const VRegister& vt3, const VRegister& vt4, int lane,
        const MemOperand& dst)
    {
        USE(vt2);
        USE(vt3);
        USE(vt4);
        DCHECK(AreSameFormat(vt, vt2, vt3, vt4));
        DCHECK(AreConsecutive(vt, vt2, vt3, vt4));
        LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
    }

    void Assembler::LoadStoreStructSingle(const VRegister& vt, uint32_t lane,
        const MemOperand& addr,
        NEONLoadStoreSingleStructOp op)
    {
        LoadStoreStructVerify(vt, addr, op);

        // We support vt arguments of the form vt.VxT() or vt.T(), where x is the
        // number of lanes, and T is b, h, s or d.
        unsigned lane_size = vt.LaneSizeInBytes();
        DCHECK_LT(lane, kQRegSize / lane_size);

        // Lane size is encoded in the opcode field. Lane index is encoded in the Q,
        // S and size fields.
        lane *= lane_size;

        // Encodings for S[0]/D[0] and S[2]/D[1] are distinguished using the least-
        // significant bit of the size field, so we increment lane here to account for
        // that.
        if (lane_size == 8)
            lane++;

        Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
        Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
        Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;

        Instr instr = op;
        switch (lane_size) {
        case 1:
            instr |= NEONLoadStoreSingle_b;
            break;
        case 2:
            instr |= NEONLoadStoreSingle_h;
            break;
        case 4:
            instr |= NEONLoadStoreSingle_s;
            break;
        default:
            DCHECK_EQ(lane_size, 8U);
            instr |= NEONLoadStoreSingle_d;
        }

        Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
    }

    void Assembler::ld1(const VRegister& vt, int lane, const MemOperand& src)
    {
        LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
    }

    void Assembler::ld1r(const VRegister& vt, const MemOperand& src)
    {
        LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
    }

    void Assembler::st1(const VRegister& vt, int lane, const MemOperand& dst)
    {
        LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
    }

    void Assembler::dmb(BarrierDomain domain, BarrierType type)
    {
        Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
    }

    void Assembler::dsb(BarrierDomain domain, BarrierType type)
    {
        Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
    }

    void Assembler::isb()
    {
        Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
    }

    void Assembler::csdb() { hint(CSDB); }

    void Assembler::fmov(const VRegister& vd, double imm)
    {
        if (vd.IsScalar()) {
            DCHECK(vd.Is1D());
            Emit(FMOV_d_imm | Rd(vd) | ImmFP(imm));
        } else {
            DCHECK(vd.Is2D());
            Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
            Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
        }
    }

    void Assembler::fmov(const VRegister& vd, float imm)
    {
        if (vd.IsScalar()) {
            DCHECK(vd.Is1S());
            Emit(FMOV_s_imm | Rd(vd) | ImmFP(imm));
        } else {
            DCHECK(vd.Is2S() | vd.Is4S());
            Instr op = NEONModifiedImmediate_MOVI;
            Instr q = vd.Is4S() ? NEON_Q : 0;
            Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
        }
    }

    void Assembler::fmov(const Register& rd, const VRegister& fn)
    {
        DCHECK_EQ(rd.SizeInBits(), fn.SizeInBits());
        FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
        Emit(op | Rd(rd) | Rn(fn));
    }

    void Assembler::fmov(const VRegister& vd, const Register& rn)
    {
        DCHECK_EQ(vd.SizeInBits(), rn.SizeInBits());
        FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
        Emit(op | Rd(vd) | Rn(rn));
    }

    void Assembler::fmov(const VRegister& vd, const VRegister& vn)
    {
        DCHECK_EQ(vd.SizeInBits(), vn.SizeInBits());
        Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
    }

    void Assembler::fmov(const VRegister& vd, int index, const Register& rn)
    {
        DCHECK((index == 1) && vd.Is1D() && rn.IsX());
        USE(index);
        Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
    }

    void Assembler::fmov(const Register& rd, const VRegister& vn, int index)
    {
        DCHECK((index == 1) && vn.Is1D() && rd.IsX());
        USE(index);
        Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
    }

    void Assembler::fmadd(const VRegister& fd, const VRegister& fn,
        const VRegister& fm, const VRegister& fa)
    {
        FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
    }

    void Assembler::fmsub(const VRegister& fd, const VRegister& fn,
        const VRegister& fm, const VRegister& fa)
    {
        FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
    }

    void Assembler::fnmadd(const VRegister& fd, const VRegister& fn,
        const VRegister& fm, const VRegister& fa)
    {
        FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
    }

    void Assembler::fnmsub(const VRegister& fd, const VRegister& fn,
        const VRegister& fm, const VRegister& fa)
    {
        FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
    }

    void Assembler::fnmul(const VRegister& vd, const VRegister& vn,
        const VRegister& vm)
    {
        DCHECK(AreSameSizeAndType(vd, vn, vm));
        Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
        Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
    }

    void Assembler::fcmp(const VRegister& fn, const VRegister& fm)
    {
        DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits());
        Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
    }

    void Assembler::fcmp(const VRegister& fn, double value)
    {
        USE(value);
        // Although the fcmp instruction can strictly only take an immediate value of
        // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
        // affect the result of the comparison.
        DCHECK_EQ(value, 0.0);
        Emit(FPType(fn) | FCMP_zero | Rn(fn));
    }

    void Assembler::fccmp(const VRegister& fn, const VRegister& fm,
        StatusFlags nzcv, Condition cond)
    {
        DCHECK_EQ(fn.SizeInBits(), fm.SizeInBits());
        Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
    }

    void Assembler::fcsel(const VRegister& fd, const VRegister& fn,
        const VRegister& fm, Condition cond)
    {
        DCHECK_EQ(fd.SizeInBits(), fn.SizeInBits());
        DCHECK_EQ(fd.SizeInBits(), fm.SizeInBits());
        Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
    }

    void Assembler::NEONFPConvertToInt(const Register& rd, const VRegister& vn,
        Instr op)
    {
        Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
    }

    void Assembler::NEONFPConvertToInt(const VRegister& vd, const VRegister& vn,
        Instr op)
    {
        if (vn.IsScalar()) {
            DCHECK((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
            op |= NEON_Q | NEONScalar;
        }
        Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
    }

    void Assembler::fcvt(const VRegister& vd, const VRegister& vn)
    {
        FPDataProcessing1SourceOp op;
        if (vd.Is1D()) {
            DCHECK(vn.Is1S() || vn.Is1H());
            op = vn.Is1S() ? FCVT_ds : FCVT_dh;
        } else if (vd.Is1S()) {
            DCHECK(vn.Is1D() || vn.Is1H());
            op = vn.Is1D() ? FCVT_sd : FCVT_sh;
        } else {
            DCHECK(vd.Is1H());
            DCHECK(vn.Is1D() || vn.Is1S());
            op = vn.Is1D() ? FCVT_hd : FCVT_hs;
        }
        FPDataProcessing1Source(vd, vn, op);
    }

    void Assembler::fcvtl(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vd.Is4S() && vn.Is4H()) || (vd.Is2D() && vn.Is2S()));
        Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
        Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
    }

    void Assembler::fcvtl2(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vd.Is4S() && vn.Is8H()) || (vd.Is2D() && vn.Is4S()));
        Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
        Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
    }

    void Assembler::fcvtn(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vn.Is4S() && vd.Is4H()) || (vn.Is2D() && vd.Is2S()));
        Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
        Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
    }

    void Assembler::fcvtn2(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vn.Is4S() && vd.Is8H()) || (vn.Is2D() && vd.Is4S()));
        Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
        Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
    }

    void Assembler::fcvtxn(const VRegister& vd, const VRegister& vn)
    {
        Instr format = 1 << NEONSize_offset;
        if (vd.IsScalar()) {
            DCHECK(vd.Is1S() && vn.Is1D());
            Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
        } else {
            DCHECK(vd.Is2S() && vn.Is2D());
            Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
        }
    }

    void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.Is4S() && vn.Is2D());
        Instr format = 1 << NEONSize_offset;
        Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
    }

#define NEON_FP2REGMISC_FCVT_LIST(V) \
    V(fcvtnu, NEON_FCVTNU, FCVTNU)   \
    V(fcvtns, NEON_FCVTNS, FCVTNS)   \
    V(fcvtpu, NEON_FCVTPU, FCVTPU)   \
    V(fcvtps, NEON_FCVTPS, FCVTPS)   \
    V(fcvtmu, NEON_FCVTMU, FCVTMU)   \
    V(fcvtms, NEON_FCVTMS, FCVTMS)   \
    V(fcvtau, NEON_FCVTAU, FCVTAU)   \
    V(fcvtas, NEON_FCVTAS, FCVTAS)

#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP)                     \
    void Assembler::FN(const Register& rd, const VRegister& vn)  \
    {                                                            \
        NEONFPConvertToInt(rd, vn, SCA_OP);                      \
    }                                                            \
    void Assembler::FN(const VRegister& vd, const VRegister& vn) \
    {                                                            \
        NEONFPConvertToInt(vd, vn, VEC_OP);                      \
    }
    NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
#undef DEFINE_ASM_FUNCS

    void Assembler::scvtf(const VRegister& vd, const VRegister& vn, int fbits)
    {
        DCHECK_GE(fbits, 0);
        if (fbits == 0) {
            NEONFP2RegMisc(vd, vn, NEON_SCVTF);
        } else {
            DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
            NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
        }
    }

    void Assembler::ucvtf(const VRegister& vd, const VRegister& vn, int fbits)
    {
        DCHECK_GE(fbits, 0);
        if (fbits == 0) {
            NEONFP2RegMisc(vd, vn, NEON_UCVTF);
        } else {
            DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
            NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
        }
    }

    void Assembler::scvtf(const VRegister& vd, const Register& rn, int fbits)
    {
        DCHECK_GE(fbits, 0);
        if (fbits == 0) {
            Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
        } else {
            Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | Rd(vd));
        }
    }

    void Assembler::ucvtf(const VRegister& fd, const Register& rn, int fbits)
    {
        DCHECK_GE(fbits, 0);
        if (fbits == 0) {
            Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
        } else {
            Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) | Rd(fd));
        }
    }

    void Assembler::NEON3Same(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, NEON3SameOp vop)
    {
        DCHECK(AreSameFormat(vd, vn, vm));
        DCHECK(vd.IsVector() || !vd.IsQ());

        Instr format, op = vop;
        if (vd.IsScalar()) {
            op |= NEON_Q | NEONScalar;
            format = SFormat(vd);
        } else {
            format = VFormat(vd);
        }

        Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
    }

    void Assembler::NEONFP3Same(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, Instr op)
    {
        DCHECK(AreSameFormat(vd, vn, vm));
        Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
    }

#define NEON_FP2REGMISC_LIST(V)                   \
    V(fabs, NEON_FABS, FABS)                      \
    V(fneg, NEON_FNEG, FNEG)                      \
    V(fsqrt, NEON_FSQRT, FSQRT)                   \
    V(frintn, NEON_FRINTN, FRINTN)                \
    V(frinta, NEON_FRINTA, FRINTA)                \
    V(frintp, NEON_FRINTP, FRINTP)                \
    V(frintm, NEON_FRINTM, FRINTM)                \
    V(frintx, NEON_FRINTX, FRINTX)                \
    V(frintz, NEON_FRINTZ, FRINTZ)                \
    V(frinti, NEON_FRINTI, FRINTI)                \
    V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \
    V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar)

#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP)                      \
    void Assembler::FN(const VRegister& vd, const VRegister& vn) \
    {                                                            \
        Instr op;                                                \
        if (vd.IsScalar()) {                                     \
            DCHECK(vd.Is1S() || vd.Is1D());                      \
            op = SCA_OP;                                         \
        } else {                                                 \
            DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S());         \
            op = VEC_OP;                                         \
        }                                                        \
        NEONFP2RegMisc(vd, vn, op);                              \
    }
    NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC

    void Assembler::shll(const VRegister& vd, const VRegister& vn, int shift)
    {
        DCHECK((vd.Is8H() && vn.Is8B() && shift == 8) || (vd.Is4S() && vn.Is4H() && shift == 16) || (vd.Is2D() && vn.Is2S() && shift == 32));
        USE(shift);
        Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
    }

    void Assembler::shll2(const VRegister& vd, const VRegister& vn, int shift)
    {
        USE(shift);
        DCHECK((vd.Is8H() && vn.Is16B() && shift == 8) || (vd.Is4S() && vn.Is8H() && shift == 16) || (vd.Is2D() && vn.Is4S() && shift == 32));
        Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
    }

    void Assembler::NEONFP2RegMisc(const VRegister& vd, const VRegister& vn,
        NEON2RegMiscOp vop, double value)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK_EQ(value, 0.0);
        USE(value);

        Instr op = vop;
        if (vd.IsScalar()) {
            DCHECK(vd.Is1S() || vd.Is1D());
            op |= NEON_Q | NEONScalar;
        } else {
            DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S());
        }

        Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
    }

    void Assembler::fcmeq(const VRegister& vd, const VRegister& vn, double value)
    {
        NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
    }

    void Assembler::fcmge(const VRegister& vd, const VRegister& vn, double value)
    {
        NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
    }

    void Assembler::fcmgt(const VRegister& vd, const VRegister& vn, double value)
    {
        NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
    }

    void Assembler::fcmle(const VRegister& vd, const VRegister& vn, double value)
    {
        NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
    }

    void Assembler::fcmlt(const VRegister& vd, const VRegister& vn, double value)
    {
        NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
    }

    void Assembler::frecpx(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsScalar());
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(vd.Is1S() || vd.Is1D());
        Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
    }

    void Assembler::fcvtzs(const Register& rd, const VRegister& vn, int fbits)
    {
        DCHECK(vn.Is1S() || vn.Is1D());
        DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
        if (fbits == 0) {
            Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
        } else {
            Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) | Rd(rd));
        }
    }

    void Assembler::fcvtzs(const VRegister& vd, const VRegister& vn, int fbits)
    {
        DCHECK_GE(fbits, 0);
        if (fbits == 0) {
            NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
        } else {
            DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
            NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
        }
    }

    void Assembler::fcvtzu(const Register& rd, const VRegister& vn, int fbits)
    {
        DCHECK(vn.Is1S() || vn.Is1D());
        DCHECK((fbits >= 0) && (fbits <= rd.SizeInBits()));
        if (fbits == 0) {
            Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
        } else {
            Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) | Rd(rd));
        }
    }

    void Assembler::fcvtzu(const VRegister& vd, const VRegister& vn, int fbits)
    {
        DCHECK_GE(fbits, 0);
        if (fbits == 0) {
            NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
        } else {
            DCHECK(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
            NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
        }
    }

    void Assembler::NEONFP2RegMisc(const VRegister& vd, const VRegister& vn,
        Instr op)
    {
        DCHECK(AreSameFormat(vd, vn));
        Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
    }

    void Assembler::NEON2RegMisc(const VRegister& vd, const VRegister& vn,
        NEON2RegMiscOp vop, int value)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK_EQ(value, 0);
        USE(value);

        Instr format, op = vop;
        if (vd.IsScalar()) {
            op |= NEON_Q | NEONScalar;
            format = SFormat(vd);
        } else {
            format = VFormat(vd);
        }

        Emit(format | op | Rn(vn) | Rd(vd));
    }

    void Assembler::cmeq(const VRegister& vd, const VRegister& vn, int value)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
    }

    void Assembler::cmge(const VRegister& vd, const VRegister& vn, int value)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
    }

    void Assembler::cmgt(const VRegister& vd, const VRegister& vn, int value)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
    }

    void Assembler::cmle(const VRegister& vd, const VRegister& vn, int value)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
    }

    void Assembler::cmlt(const VRegister& vd, const VRegister& vn, int value)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
    }

#define NEON_3SAME_LIST(V)                                           \
    V(add, NEON_ADD, vd.IsVector() || vd.Is1D())                     \
    V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D())                   \
    V(sub, NEON_SUB, vd.IsVector() || vd.Is1D())                     \
    V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D())                   \
    V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D())                   \
    V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D())                   \
    V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D())                   \
    V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D())                   \
    V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D())                 \
    V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D())                   \
    V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D())                   \
    V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D())                 \
    V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D())                 \
    V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS())   \
    V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
    V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD())         \
    V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD())         \
    V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD())       \
    V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD())       \
    V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD())         \
    V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD())         \
    V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD())           \
    V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD())         \
    V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD())           \
    V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD())         \
    V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD())           \
    V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD())         \
    V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD())           \
    V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD())         \
    V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD())           \
    V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD())           \
    V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD())           \
    V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD())           \
    V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD())             \
    V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD())             \
    V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD())             \
    V(and_, NEON_AND, vd.Is8B() || vd.Is16B())                       \
    V(orr, NEON_ORR, vd.Is8B() || vd.Is16B())                        \
    V(orn, NEON_ORN, vd.Is8B() || vd.Is16B())                        \
    V(eor, NEON_EOR, vd.Is8B() || vd.Is16B())                        \
    V(bic, NEON_BIC, vd.Is8B() || vd.Is16B())                        \
    V(bit, NEON_BIT, vd.Is8B() || vd.Is16B())                        \
    V(bif, NEON_BIF, vd.Is8B() || vd.Is16B())                        \
    V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B())                        \
    V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B())                      \
    V(uqadd, NEON_UQADD, true)                                       \
    V(sqadd, NEON_SQADD, true)                                       \
    V(uqsub, NEON_UQSUB, true)                                       \
    V(sqsub, NEON_SQSUB, true)                                       \
    V(sqshl, NEON_SQSHL, true)                                       \
    V(uqshl, NEON_UQSHL, true)                                       \
    V(sqrshl, NEON_SQRSHL, true)                                     \
    V(uqrshl, NEON_UQRSHL, true)

#define DEFINE_ASM_FUNC(FN, OP, AS)                              \
    void Assembler::FN(const VRegister& vd, const VRegister& vn, \
        const VRegister& vm)                                     \
    {                                                            \
        DCHECK(AS);                                              \
        NEON3Same(vd, vn, vm, OP);                               \
    }
    NEON_3SAME_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC

#define NEON_FP3SAME_LIST_V2(V)                   \
    V(fadd, NEON_FADD, FADD)                      \
    V(fsub, NEON_FSUB, FSUB)                      \
    V(fmul, NEON_FMUL, FMUL)                      \
    V(fdiv, NEON_FDIV, FDIV)                      \
    V(fmax, NEON_FMAX, FMAX)                      \
    V(fmaxnm, NEON_FMAXNM, FMAXNM)                \
    V(fmin, NEON_FMIN, FMIN)                      \
    V(fminnm, NEON_FMINNM, FMINNM)                \
    V(fmulx, NEON_FMULX, NEON_FMULX_scalar)       \
    V(frecps, NEON_FRECPS, NEON_FRECPS_scalar)    \
    V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \
    V(fabd, NEON_FABD, NEON_FABD_scalar)          \
    V(fmla, NEON_FMLA, 0)                         \
    V(fmls, NEON_FMLS, 0)                         \
    V(facge, NEON_FACGE, NEON_FACGE_scalar)       \
    V(facgt, NEON_FACGT, NEON_FACGT_scalar)       \
    V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar)       \
    V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar)       \
    V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar)       \
    V(faddp, NEON_FADDP, 0)                       \
    V(fmaxp, NEON_FMAXP, 0)                       \
    V(fminp, NEON_FMINP, 0)                       \
    V(fmaxnmp, NEON_FMAXNMP, 0)                   \
    V(fminnmp, NEON_FMINNMP, 0)

#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP)                      \
    void Assembler::FN(const VRegister& vd, const VRegister& vn, \
        const VRegister& vm)                                     \
    {                                                            \
        Instr op;                                                \
        if ((SCA_OP != 0) && vd.IsScalar()) {                    \
            DCHECK(vd.Is1S() || vd.Is1D());                      \
            op = SCA_OP;                                         \
        } else {                                                 \
            DCHECK(vd.IsVector());                               \
            DCHECK(vd.Is2S() || vd.Is2D() || vd.Is4S());         \
            op = VEC_OP;                                         \
        }                                                        \
        NEONFP3Same(vd, vn, vm, op);                             \
    }
    NEON_FP3SAME_LIST_V2(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC

    void Assembler::addp(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vd.Is1D() && vn.Is2D()));
        Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
    }

    void Assembler::faddp(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
        Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
    }

    void Assembler::fmaxp(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
        Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
    }

    void Assembler::fminp(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
        Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
    }

    void Assembler::fmaxnmp(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
        Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
    }

    void Assembler::fminnmp(const VRegister& vd, const VRegister& vn)
    {
        DCHECK((vd.Is1S() && vn.Is2S()) || (vd.Is1D() && vn.Is2D()));
        Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
    }

    void Assembler::orr(const VRegister& vd, const int imm8, const int left_shift)
    {
        NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_ORR);
    }

    void Assembler::mov(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        if (vd.IsD()) {
            orr(vd.V8B(), vn.V8B(), vn.V8B());
        } else {
            DCHECK(vd.IsQ());
            orr(vd.V16B(), vn.V16B(), vn.V16B());
        }
    }

    void Assembler::bic(const VRegister& vd, const int imm8, const int left_shift)
    {
        NEONModifiedImmShiftLsl(vd, imm8, left_shift, NEONModifiedImmediate_BIC);
    }

    void Assembler::movi(const VRegister& vd, const uint64_t imm, Shift shift,
        const int shift_amount)
    {
        DCHECK((shift == LSL) || (shift == MSL));
        if (vd.Is2D() || vd.Is1D()) {
            DCHECK_EQ(shift_amount, 0);
            int imm8 = 0;
            for (int i = 0; i < 8; ++i) {
                int byte = (imm >> (i * 8)) & 0xFF;
                DCHECK((byte == 0) || (byte == 0xFF));
                if (byte == 0xFF) {
                    imm8 |= (1 << i);
                }
            }
            Instr q = vd.Is2D() ? NEON_Q : 0;
            Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI | ImmNEONabcdefgh(imm8) | NEONCmode(0xE) | Rd(vd));
        } else if (shift == LSL) {
            NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
                NEONModifiedImmediate_MOVI);
        } else {
            NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
                NEONModifiedImmediate_MOVI);
        }
    }

    void Assembler::mvn(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        if (vd.IsD()) {
            not_(vd.V8B(), vn.V8B());
        } else {
            DCHECK(vd.IsQ());
            not_(vd.V16B(), vn.V16B());
        }
    }

    void Assembler::mvni(const VRegister& vd, const int imm8, Shift shift,
        const int shift_amount)
    {
        DCHECK((shift == LSL) || (shift == MSL));
        if (shift == LSL) {
            NEONModifiedImmShiftLsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
        } else {
            NEONModifiedImmShiftMsl(vd, imm8, shift_amount, NEONModifiedImmediate_MVNI);
        }
    }

    void Assembler::NEONFPByElement(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, int vm_index,
        NEONByIndexedElementOp vop)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK((vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S()) || (vd.Is2D() && vm.Is1D()) || (vd.Is1D() && vm.Is1D()));
        DCHECK((vm.Is1S() && (vm_index < 4)) || (vm.Is1D() && (vm_index < 2)));

        Instr op = vop;
        int index_num_bits = vm.Is1S() ? 2 : 1;
        if (vd.IsScalar()) {
            op |= NEON_Q | NEONScalar;
        }

        Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd));
    }

    void Assembler::NEONByElement(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, int vm_index,
        NEONByIndexedElementOp vop)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK((vd.Is4H() && vm.Is1H()) || (vd.Is8H() && vm.Is1H()) || (vd.Is1H() && vm.Is1H()) || (vd.Is2S() && vm.Is1S()) || (vd.Is4S() && vm.Is1S()) || (vd.Is1S() && vm.Is1S()));
        DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) || (vm.Is1S() && (vm_index < 4)));

        Instr format, op = vop;
        int index_num_bits = vm.Is1H() ? 3 : 2;
        if (vd.IsScalar()) {
            op |= NEONScalar | NEON_Q;
            format = SFormat(vn);
        } else {
            format = VFormat(vn);
        }
        Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd));
    }

    void Assembler::NEONByElementL(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, int vm_index,
        NEONByIndexedElementOp vop)
    {
        DCHECK((vd.Is4S() && vn.Is4H() && vm.Is1H()) || (vd.Is4S() && vn.Is8H() && vm.Is1H()) || (vd.Is1S() && vn.Is1H() && vm.Is1H()) || (vd.Is2D() && vn.Is2S() && vm.Is1S()) || (vd.Is2D() && vn.Is4S() && vm.Is1S()) || (vd.Is1D() && vn.Is1S() && vm.Is1S()));

        DCHECK((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) || (vm.Is1S() && (vm_index < 4)));

        Instr format, op = vop;
        int index_num_bits = vm.Is1H() ? 3 : 2;
        if (vd.IsScalar()) {
            op |= NEONScalar | NEON_Q;
            format = SFormat(vn);
        } else {
            format = VFormat(vn);
        }
        Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) | Rm(vm) | Rn(vn) | Rd(vd));
    }

#define NEON_BYELEMENT_LIST(V)                \
    V(mul, NEON_MUL_byelement, vn.IsVector()) \
    V(mla, NEON_MLA_byelement, vn.IsVector()) \
    V(mls, NEON_MLS_byelement, vn.IsVector()) \
    V(sqdmulh, NEON_SQDMULH_byelement, true)  \
    V(sqrdmulh, NEON_SQRDMULH_byelement, true)

#define DEFINE_ASM_FUNC(FN, OP, AS)                              \
    void Assembler::FN(const VRegister& vd, const VRegister& vn, \
        const VRegister& vm, int vm_index)                       \
    {                                                            \
        DCHECK(AS);                                              \
        NEONByElement(vd, vn, vm, vm_index, OP);                 \
    }
    NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC

#define NEON_FPBYELEMENT_LIST(V) \
    V(fmul, NEON_FMUL_byelement) \
    V(fmla, NEON_FMLA_byelement) \
    V(fmls, NEON_FMLS_byelement) \
    V(fmulx, NEON_FMULX_byelement)

#define DEFINE_ASM_FUNC(FN, OP)                                  \
    void Assembler::FN(const VRegister& vd, const VRegister& vn, \
        const VRegister& vm, int vm_index)                       \
    {                                                            \
        NEONFPByElement(vd, vn, vm, vm_index, OP);               \
    }
    NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC

#define NEON_BYELEMENT_LONG_LIST(V)                                \
    V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD())  \
    V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \
    V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD())  \
    V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \
    V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD())  \
    V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \
    V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD())      \
    V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ())     \
    V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD())      \
    V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ())     \
    V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD())      \
    V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ())     \
    V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD())      \
    V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ())     \
    V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD())      \
    V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ())     \
    V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD())      \
    V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ())

#define DEFINE_ASM_FUNC(FN, OP, AS)                              \
    void Assembler::FN(const VRegister& vd, const VRegister& vn, \
        const VRegister& vm, int vm_index)                       \
    {                                                            \
        DCHECK(AS);                                              \
        NEONByElementL(vd, vn, vm, vm_index, OP);                \
    }
    NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
#undef DEFINE_ASM_FUNC

    void Assembler::suqadd(const VRegister& vd, const VRegister& vn)
    {
        NEON2RegMisc(vd, vn, NEON_SUQADD);
    }

    void Assembler::usqadd(const VRegister& vd, const VRegister& vn)
    {
        NEON2RegMisc(vd, vn, NEON_USQADD);
    }

    void Assembler::abs(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEON2RegMisc(vd, vn, NEON_ABS);
    }

    void Assembler::sqabs(const VRegister& vd, const VRegister& vn)
    {
        NEON2RegMisc(vd, vn, NEON_SQABS);
    }

    void Assembler::neg(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsVector() || vd.Is1D());
        NEON2RegMisc(vd, vn, NEON_NEG);
    }

    void Assembler::sqneg(const VRegister& vd, const VRegister& vn)
    {
        NEON2RegMisc(vd, vn, NEON_SQNEG);
    }

    void Assembler::NEONXtn(const VRegister& vd, const VRegister& vn,
        NEON2RegMiscOp vop)
    {
        Instr format, op = vop;
        if (vd.IsScalar()) {
            DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || (vd.Is1S() && vn.Is1D()));
            op |= NEON_Q | NEONScalar;
            format = SFormat(vd);
        } else {
            DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D()));
            format = VFormat(vd);
        }
        Emit(format | op | Rn(vn) | Rd(vd));
    }

    void Assembler::xtn(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsVector() && vd.IsD());
        NEONXtn(vd, vn, NEON_XTN);
    }

    void Assembler::xtn2(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsVector() && vd.IsQ());
        NEONXtn(vd, vn, NEON_XTN);
    }

    void Assembler::sqxtn(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsScalar() || vd.IsD());
        NEONXtn(vd, vn, NEON_SQXTN);
    }

    void Assembler::sqxtn2(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsVector() && vd.IsQ());
        NEONXtn(vd, vn, NEON_SQXTN);
    }

    void Assembler::sqxtun(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsScalar() || vd.IsD());
        NEONXtn(vd, vn, NEON_SQXTUN);
    }

    void Assembler::sqxtun2(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsVector() && vd.IsQ());
        NEONXtn(vd, vn, NEON_SQXTUN);
    }

    void Assembler::uqxtn(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsScalar() || vd.IsD());
        NEONXtn(vd, vn, NEON_UQXTN);
    }

    void Assembler::uqxtn2(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(vd.IsVector() && vd.IsQ());
        NEONXtn(vd, vn, NEON_UQXTN);
    }

    // NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size".
    void Assembler::not_(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(vd.Is8B() || vd.Is16B());
        Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
    }

    void Assembler::rbit(const VRegister& vd, const VRegister& vn)
    {
        DCHECK(AreSameFormat(vd, vn));
        DCHECK(vd.Is8B() || vd.Is16B());
        Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
    }

    void Assembler::ext(const VRegister& vd, const VRegister& vn,
        const VRegister& vm, int index)
    {
        DCHECK(AreSameFormat(vd, vn, vm));
        DCHECK(vd.Is8B() || vd.Is16B());
        DCHECK((0 <= index) && (index < vd.LaneCount()));
        Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
    }

    void Assembler::dup(const VRegister& vd, const VRegister& vn, int vn_index)
    {
        Instr q, scalar;

        // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
        // number of lanes, and T is b, h, s or d.
        int lane_size = vn.LaneSizeInBytes();
        NEONFormatField format;
        switch (lane_size) {
        case 1:
            format = NEON_16B;
            break;
        case 2:
            format = NEON_8H;
            break;
        case 4:
            format = NEON_4S;
            break;
        default:
            DCHECK_EQ(lane_size, 8);
            format = NEON_2D;
            break;
        }

        if (vd.IsScalar()) {
            q = NEON_Q;
            scalar = NEONScalar;
        } else {
            DCHECK(!vd.Is1D());
            q = vd.IsD() ? 0 : NEON_Q;
            scalar = 0;
        }
        Emit(q | scalar | NEON_DUP_ELEMENT | ImmNEON5(format, vn_index) | Rn(vn) | Rd(vd));
    }

    void Assembler::dcptr(Label* label)
    {
        RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
        if (label->is_bound()) {
            // The label is bound, so it does not need to be updated and the internal
            // reference should be emitted.
            //
            // In this case, label->pos() returns the offset of the label from the
            // start of the buffer.
            internal_reference_positions_.push_back(pc_offset());
            dc64(reinterpret_cast<uintptr_t>(buffer_start_ + label->pos()));
        } else {
            int32_t offset;
            if (label->is_linked()) {
                // The label is linked, so the internal reference should be added
                // onto the end of the label's link chain.
                //
                // In this case, label->pos() returns the offset of the last linked
                // instruction from the start of the buffer.
                offset = label->pos() - pc_offset();
                DCHECK_NE(offset, kStartOfLabelLinkChain);
            } else {
                // The label is unused, so it now becomes linked and the internal
                // reference is at the start of the new link chain.
                offset = kStartOfLabelLinkChain;
            }
            // The instruction at pc is now the last link in the label's chain.
            label->link_to(pc_offset());

            // Traditionally the offset to the previous instruction in the chain is
            // encoded in the instruction payload (e.g. branch range) but internal
            // references are not instructions so while unbound they are encoded as
            // two consecutive brk instructions. The two 16-bit immediates are used
            // to encode the offset.
            offset >>= kInstrSizeLog2;
            DCHECK(is_int32(offset));
            uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
            uint32_t low16 = unsigned_bitextract_32(15, 0, offset);

            brk(high16);
            brk(low16);
        }
    }

    // Below, a difference in case for the same letter indicates a
    // negated bit. If b is 1, then B is 0.
    uint32_t Assembler::FPToImm8(double imm)
    {
        DCHECK(IsImmFP64(imm));
        // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
        //       0000.0000.0000.0000.0000.0000.0000.0000
        uint64_t bits = bit_cast<uint64_t>(imm);
        // bit7: a000.0000
        uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
        // bit6: 0b00.0000
        uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
        // bit5_to_0: 00cd.efgh
        uint64_t bit5_to_0 = (bits >> 48) & 0x3F;

        return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
    }

    Instr Assembler::ImmFP(double imm) { return FPToImm8(imm) << ImmFP_offset; }
    Instr Assembler::ImmNEONFP(double imm)
    {
        return ImmNEONabcdefgh(FPToImm8(imm));
    }

    // Code generation helpers.
    void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
        MoveWideImmediateOp mov_op)
    {
        // Ignore the top 32 bits of an immediate if we're moving to a W register.
        if (rd.Is32Bits()) {
            // Check that the top 32 bits are zero (a positive 32-bit number) or top
            // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
            DCHECK(((imm >> kWRegSizeInBits) == 0) || ((imm >> (kWRegSizeInBits - 1)) == 0x1FFFFFFFF));
            imm &= kWRegMask;
        }

        if (shift >= 0) {
            // Explicit shift specified.
            DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
            DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16));
            shift /= 16;
        } else {
            // Calculate a new immediate and shift combination to encode the immediate
            // argument.
            shift = 0;
            if ((imm & ~0xFFFFULL) == 0) {
                // Nothing to do.
            } else if ((imm & ~(0xFFFFULL << 16)) == 0) {
                imm >>= 16;
                shift = 1;
            } else if ((imm & ~(0xFFFFULL << 32)) == 0) {
                DCHECK(rd.Is64Bits());
                imm >>= 32;
                shift = 2;
            } else if ((imm & ~(0xFFFFULL << 48)) == 0) {
                DCHECK(rd.Is64Bits());
                imm >>= 48;
                shift = 3;
            }
        }

        DCHECK(is_uint16(imm));

        Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) | ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
    }

    void Assembler::AddSub(const Register& rd, const Register& rn,
        const Operand& operand, FlagsUpdate S, AddSubOp op)
    {
        DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
        DCHECK(!operand.NeedsRelocation(this));
        if (operand.IsImmediate()) {
            int64_t immediate = operand.ImmediateValue();
            DCHECK(IsImmAddSub(immediate));
            Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
            Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
        } else if (operand.IsShiftedRegister()) {
            DCHECK_EQ(operand.reg().SizeInBits(), rd.SizeInBits());
            DCHECK_NE(operand.shift(), ROR);

            // For instructions of the form:
            //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ]
            //   add/sub   <Wd>, wsp, <Wm> [, LSL #0-3 ]
            //   add/sub   wsp, wsp, <Wm> [, LSL #0-3 ]
            //   adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
            // or their 64-bit register equivalents, convert the operand from shifted to
            // extended register mode, and emit an add/sub extended instruction.
            if (rn.IsSP() || rd.IsSP()) {
                DCHECK(!(rd.IsSP() && (S == SetFlags)));
                DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
                    AddSubExtendedFixed | op);
            } else {
                DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
            }
        } else {
            DCHECK(operand.IsExtendedRegister());
            DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
        }
    }

    void Assembler::AddSubWithCarry(const Register& rd, const Register& rn,
        const Operand& operand, FlagsUpdate S,
        AddSubWithCarryOp op)
    {
        DCHECK_EQ(rd.SizeInBits(), rn.SizeInBits());
        DCHECK_EQ(rd.SizeInBits(), operand.reg().SizeInBits());
        DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
        DCHECK(!operand.NeedsRelocation(this));
        Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
    }

    void Assembler::hlt(int code)
    {
        DCHECK(is_uint16(code));
        Emit(HLT | ImmException(code));
    }

    void Assembler::brk(int code)
    {
        DCHECK(is_uint16(code));
        Emit(BRK | ImmException(code));
    }

    void Assembler::EmitStringData(const char* string)
    {
        size_t len = strlen(string) + 1;
        DCHECK_LE(RoundUp(len, kInstrSize), static_cast<size_t>(kGap));
        EmitData(string, static_cast<int>(len));
        // Pad with nullptr characters until pc_ is aligned.
        const char pad[] = { '\0', '\0', '\0', '\0' };
        static_assert(sizeof(pad) == kInstrSize,
            "Size of padding must match instruction size.");
        EmitData(pad, RoundUp(pc_offset(), kInstrSize) - pc_offset());
    }

    void Assembler::debug(const char* message, uint32_t code, Instr params)
    {
#ifdef USE_SIMULATOR
        if (options().enable_simulator_code) {
            // The arguments to the debug marker need to be contiguous in memory, so
            // make sure we don't try to emit pools.
            BlockPoolsScope scope(this);

            Label start;
            bind(&start);

            // Refer to instructions-arm64.h for a description of the marker and its
            // arguments.
            hlt(kImmExceptionIsDebug);
            DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugCodeOffset);
            dc32(code);
            DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugParamsOffset);
            dc32(params);
            DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugMessageOffset);
            EmitStringData(message);
            hlt(kImmExceptionIsUnreachable);

            return;
        }
        // Fall through if Serializer is enabled.
#else
        // Make sure we haven't dynamically enabled simulator code when there is no
        // simulator built in.
        DCHECK(!options().enable_simulator_code);
#endif

        if (params & BREAK) {
            brk(0);
        }
    }

    void Assembler::Logical(const Register& rd,
        const Register& rn,
        const Operand& operand,
        LogicalOp op)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        DCHECK(!operand.NeedsRelocation(this));
        if (operand.IsImmediate()) {
            int64_t immediate = operand.ImmediateValue();
            unsigned reg_size = rd.SizeInBits();

            DCHECK_NE(immediate, 0);
            DCHECK_NE(immediate, -1);
            DCHECK(rd.Is64Bits() || is_uint32(immediate));

            // If the operation is NOT, invert the operation and immediate.
            if ((op & NOT) == NOT) {
                op = static_cast<LogicalOp>(op & ~NOT);
                immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
            }

            unsigned n, imm_s, imm_r;
            if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
                // Immediate can be encoded in the instruction.
                LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
            } else {
                // This case is handled in the macro assembler.
                UNREACHABLE();
            }
        } else {
            DCHECK(operand.IsShiftedRegister());
            DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
            Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
            DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
        }
    }

    void Assembler::LogicalImmediate(const Register& rd,
        const Register& rn,
        unsigned n,
        unsigned imm_s,
        unsigned imm_r,
        LogicalOp op)
    {
        unsigned reg_size = rd.SizeInBits();
        Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
        Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) | ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | Rn(rn));
    }

    void Assembler::ConditionalCompare(const Register& rn,
        const Operand& operand,
        StatusFlags nzcv,
        Condition cond,
        ConditionalCompareOp op)
    {
        Instr ccmpop;
        DCHECK(!operand.NeedsRelocation(this));
        if (operand.IsImmediate()) {
            int64_t immediate = operand.ImmediateValue();
            DCHECK(IsImmConditionalCompare(immediate));
            ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(static_cast<unsigned>(immediate));
        } else {
            DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
            ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
        }
        Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
    }

    void Assembler::DataProcessing1Source(const Register& rd,
        const Register& rn,
        DataProcessing1SourceOp op)
    {
        DCHECK(rd.SizeInBits() == rn.SizeInBits());
        Emit(SF(rn) | op | Rn(rn) | Rd(rd));
    }

    void Assembler::FPDataProcessing1Source(const VRegister& vd,
        const VRegister& vn,
        FPDataProcessing1SourceOp op)
    {
        Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
    }

    void Assembler::FPDataProcessing2Source(const VRegister& fd,
        const VRegister& fn,
        const VRegister& fm,
        FPDataProcessing2SourceOp op)
    {
        DCHECK(fd.SizeInBits() == fn.SizeInBits());
        DCHECK(fd.SizeInBits() == fm.SizeInBits());
        Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
    }

    void Assembler::FPDataProcessing3Source(const VRegister& fd,
        const VRegister& fn,
        const VRegister& fm,
        const VRegister& fa,
        FPDataProcessing3SourceOp op)
    {
        DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
        Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
    }

    void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd, const int imm8,
        const int left_shift,
        NEONModifiedImmediateOp op)
    {
        DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() || vd.Is2S() || vd.Is4S());
        DCHECK((left_shift == 0) || (left_shift == 8) || (left_shift == 16) || (left_shift == 24));
        DCHECK(is_uint8(imm8));

        int cmode_1, cmode_2, cmode_3;
        if (vd.Is8B() || vd.Is16B()) {
            DCHECK_EQ(op, NEONModifiedImmediate_MOVI);
            cmode_1 = 1;
            cmode_2 = 1;
            cmode_3 = 1;
        } else {
            cmode_1 = (left_shift >> 3) & 1;
            cmode_2 = left_shift >> 4;
            cmode_3 = 0;
            if (vd.Is4H() || vd.Is8H()) {
                DCHECK((left_shift == 0) || (left_shift == 8));
                cmode_3 = 1;
            }
        }
        int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);

        Instr q = vd.IsQ() ? NEON_Q : 0;

        Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
    }

    void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8,
        const int shift_amount,
        NEONModifiedImmediateOp op)
    {
        DCHECK(vd.Is2S() || vd.Is4S());
        DCHECK((shift_amount == 8) || (shift_amount == 16));
        DCHECK(is_uint8(imm8));

        int cmode_0 = (shift_amount >> 4) & 1;
        int cmode = 0xC | cmode_0;

        Instr q = vd.IsQ() ? NEON_Q : 0;

        Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
    }

    void Assembler::EmitShift(const Register& rd,
        const Register& rn,
        Shift shift,
        unsigned shift_amount)
    {
        switch (shift) {
        case LSL:
            lsl(rd, rn, shift_amount);
            break;
        case LSR:
            lsr(rd, rn, shift_amount);
            break;
        case ASR:
            asr(rd, rn, shift_amount);
            break;
        case ROR:
            ror(rd, rn, shift_amount);
            break;
        default:
            UNREACHABLE();
        }
    }

    void Assembler::EmitExtendShift(const Register& rd,
        const Register& rn,
        Extend extend,
        unsigned left_shift)
    {
        DCHECK(rd.SizeInBits() >= rn.SizeInBits());
        unsigned reg_size = rd.SizeInBits();
        // Use the correct size of register.
        Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
        // Bits extracted are high_bit:0.
        unsigned high_bit = (8 << (extend & 0x3)) - 1;
        // Number of bits left in the result that are not introduced by the shift.
        unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);

        if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
            switch (extend) {
            case UXTB:
            case UXTH:
            case UXTW:
                ubfm(rd, rn_, non_shift_bits, high_bit);
                break;
            case SXTB:
            case SXTH:
            case SXTW:
                sbfm(rd, rn_, non_shift_bits, high_bit);
                break;
            case UXTX:
            case SXTX: {
                DCHECK_EQ(rn.SizeInBits(), kXRegSizeInBits);
                // Nothing to extend. Just shift.
                lsl(rd, rn_, left_shift);
                break;
            }
            default:
                UNREACHABLE();
            }
        } else {
            // No need to extend as the extended bits would be shifted away.
            lsl(rd, rn_, left_shift);
        }
    }

    void Assembler::DataProcShiftedRegister(const Register& rd,
        const Register& rn,
        const Operand& operand,
        FlagsUpdate S,
        Instr op)
    {
        DCHECK(operand.IsShiftedRegister());
        DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
        DCHECK(!operand.NeedsRelocation(this));
        Emit(SF(rd) | op | Flags(S) | ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
    }

    void Assembler::DataProcExtendedRegister(const Register& rd,
        const Register& rn,
        const Operand& operand,
        FlagsUpdate S,
        Instr op)
    {
        DCHECK(!operand.NeedsRelocation(this));
        Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
        Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) | dest_reg | RnSP(rn));
    }

    bool Assembler::IsImmAddSub(int64_t immediate)
    {
        return is_uint12(immediate) || (is_uint12(immediate >> 12) && ((immediate & 0xFFF) == 0));
    }

    void Assembler::LoadStore(const CPURegister& rt,
        const MemOperand& addr,
        LoadStoreOp op)
    {
        Instr memop = op | Rt(rt) | RnSP(addr.base());

        if (addr.IsImmediateOffset()) {
            unsigned size = CalcLSDataSize(op);
            if (IsImmLSScaled(addr.offset(), size)) {
                int offset = static_cast<int>(addr.offset());
                // Use the scaled addressing mode.
                Emit(LoadStoreUnsignedOffsetFixed | memop | ImmLSUnsigned(offset >> size));
            } else if (IsImmLSUnscaled(addr.offset())) {
                int offset = static_cast<int>(addr.offset());
                // Use the unscaled addressing mode.
                Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
            } else {
                // This case is handled in the macro assembler.
                UNREACHABLE();
            }
        } else if (addr.IsRegisterOffset()) {
            Extend ext = addr.extend();
            Shift shift = addr.shift();
            unsigned shift_amount = addr.shift_amount();

            // LSL is encoded in the option field as UXTX.
            if (shift == LSL) {
                ext = UXTX;
            }

            // Shifts are encoded in one bit, indicating a left shift by the memory
            // access size.
            DCHECK((shift_amount == 0) || (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
            Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) | ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
        } else {
            // Pre-index and post-index modes.
            DCHECK(!rt.Is(addr.base()));
            if (IsImmLSUnscaled(addr.offset())) {
                int offset = static_cast<int>(addr.offset());
                if (addr.IsPreIndex()) {
                    Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
                } else {
                    DCHECK(addr.IsPostIndex());
                    Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
                }
            } else {
                // This case is handled in the macro assembler.
                UNREACHABLE();
            }
        }
    }

    bool Assembler::IsImmLSUnscaled(int64_t offset)
    {
        return is_int9(offset);
    }

    bool Assembler::IsImmLSScaled(int64_t offset, unsigned size)
    {
        bool offset_is_size_multiple = (((offset >> size) << size) == offset);
        return offset_is_size_multiple && is_uint12(offset >> size);
    }

    bool Assembler::IsImmLSPair(int64_t offset, unsigned size)
    {
        bool offset_is_size_multiple = (((offset >> size) << size) == offset);
        return offset_is_size_multiple && is_int7(offset >> size);
    }

    bool Assembler::IsImmLLiteral(int64_t offset)
    {
        int inst_size = static_cast<int>(kInstrSizeLog2);
        bool offset_is_inst_multiple = (((offset >> inst_size) << inst_size) == offset);
        DCHECK_GT(offset, 0);
        offset >>= kLoadLiteralScaleLog2;
        return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
    }

    // Test if a given value can be encoded in the immediate field of a logical
    // instruction.
    // If it can be encoded, the function returns true, and values pointed to by n,
    // imm_s and imm_r are updated with immediates encoded in the format required
    // by the corresponding fields in the logical instruction.
    // If it can not be encoded, the function returns false, and the values pointed
    // to by n, imm_s and imm_r are undefined.
    bool Assembler::IsImmLogical(uint64_t value,
        unsigned width,
        unsigned* n,
        unsigned* imm_s,
        unsigned* imm_r)
    {
        DCHECK((n != nullptr) && (imm_s != nullptr) && (imm_r != nullptr));
        DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));

        bool negate = false;

        // Logical immediates are encoded using parameters n, imm_s and imm_r using
        // the following table:
        //
        //    N   imms    immr    size        S             R
        //    1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
        //    0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
        //    0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
        //    0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
        //    0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
        //    0  11110s  xxxxxr     2    UInt(s)       UInt(r)
        // (s bits must not be all set)
        //
        // A pattern is constructed of size bits, where the least significant S+1 bits
        // are set. The pattern is rotated right by R, and repeated across a 32 or
        // 64-bit value, depending on destination register width.
        //
        // Put another way: the basic format of a logical immediate is a single
        // contiguous stretch of 1 bits, repeated across the whole word at intervals
        // given by a power of 2. To identify them quickly, we first locate the
        // lowest stretch of 1 bits, then the next 1 bit above that; that combination
        // is different for every logical immediate, so it gives us all the
        // information we need to identify the only logical immediate that our input
        // could be, and then we simply check if that's the value we actually have.
        //
        // (The rotation parameter does give the possibility of the stretch of 1 bits
        // going 'round the end' of the word. To deal with that, we observe that in
        // any situation where that happens the bitwise NOT of the value is also a
        // valid logical immediate. So we simply invert the input whenever its low bit
        // is set, and then we know that the rotated case can't arise.)

        if (value & 1) {
            // If the low bit is 1, negate the value, and set a flag to remember that we
            // did (so that we can adjust the return values appropriately).
            negate = true;
            value = ~value;
        }

        if (width == kWRegSizeInBits) {
            // To handle 32-bit logical immediates, the very easiest thing is to repeat
            // the input value twice to make a 64-bit word. The correct encoding of that
            // as a logical immediate will also be the correct encoding of the 32-bit
            // value.

            // The most-significant 32 bits may not be zero (ie. negate is true) so
            // shift the value left before duplicating it.
            value <<= kWRegSizeInBits;
            value |= value >> kWRegSizeInBits;
        }

        // The basic analysis idea: imagine our input word looks like this.
        //
        //    0011111000111110001111100011111000111110001111100011111000111110
        //                                                          c  b    a
        //                                                          |<--d-->|
        //
        // We find the lowest set bit (as an actual power-of-2 value, not its index)
        // and call it a. Then we add a to our original number, which wipes out the
        // bottommost stretch of set bits and replaces it with a 1 carried into the
        // next zero bit. Then we look for the new lowest set bit, which is in
        // position b, and subtract it, so now our number is just like the original
        // but with the lowest stretch of set bits completely gone. Now we find the
        // lowest set bit again, which is position c in the diagram above. Then we'll
        // measure the distance d between bit positions a and c (using CLZ), and that
        // tells us that the only valid logical immediate that could possibly be equal
        // to this number is the one in which a stretch of bits running from a to just
        // below b is replicated every d bits.
        uint64_t a = LargestPowerOf2Divisor(value);
        uint64_t value_plus_a = value + a;
        uint64_t b = LargestPowerOf2Divisor(value_plus_a);
        uint64_t value_plus_a_minus_b = value_plus_a - b;
        uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);

        int d, clz_a, out_n;
        uint64_t mask;

        if (c != 0) {
            // The general case, in which there is more than one stretch of set bits.
            // Compute the repeat distance d, and set up a bitmask covering the basic
            // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
            // of these cases the N bit of the output will be zero.
            clz_a = CountLeadingZeros(a, kXRegSizeInBits);
            int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
            d = clz_a - clz_c;
            mask = ((uint64_t { 1 } << d) - 1);
            out_n = 0;
        } else {
            // Handle degenerate cases.
            //
            // If any of those 'find lowest set bit' operations didn't find a set bit at
            // all, then the word will have been zero thereafter, so in particular the
            // last lowest_set_bit operation will have returned zero. So we can test for
            // all the special case conditions in one go by seeing if c is zero.
            if (a == 0) {
                // The input was zero (or all 1 bits, which will come to here too after we
                // inverted it at the start of the function), for which we just return
                // false.
                return false;
            } else {
                // Otherwise, if c was zero but a was not, then there's just one stretch
                // of set bits in our word, meaning that we have the trivial case of
                // d == 64 and only one 'repetition'. Set up all the same variables as in
                // the general case above, and set the N bit in the output.
                clz_a = CountLeadingZeros(a, kXRegSizeInBits);
                d = 64;
                mask = ~uint64_t { 0 };
                out_n = 1;
            }
        }

        // If the repeat period d is not a power of two, it can't be encoded.
        if (!base::bits::IsPowerOfTwo(d)) {
            return false;
        }

        if (((b - a) & ~mask) != 0) {
            // If the bit stretch (b - a) does not fit within the mask derived from the
            // repeat period, then fail.
            return false;
        }

        // The only possible option is b - a repeated every d bits. Now we're going to
        // actually construct the valid logical immediate derived from that
        // specification, and see if it equals our original input.
        //
        // To repeat a value every d bits, we multiply it by a number of the form
        // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
        // be derived using a table lookup on CLZ(d).
        static const uint64_t multipliers[] = {
            0x0000000000000001UL,
            0x0000000100000001UL,
            0x0001000100010001UL,
            0x0101010101010101UL,
            0x1111111111111111UL,
            0x5555555555555555UL,
        };
        int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
        // Ensure that the index to the multipliers array is within bounds.
        DCHECK((multiplier_idx >= 0) && (static_cast<size_t>(multiplier_idx) < arraysize(multipliers)));
        uint64_t multiplier = multipliers[multiplier_idx];
        uint64_t candidate = (b - a) * multiplier;

        if (value != candidate) {
            // The candidate pattern doesn't match our input value, so fail.
            return false;
        }

        // We have a match! This is a valid logical immediate, so now we have to
        // construct the bits and pieces of the instruction encoding that generates
        // it.

        // Count the set bits in our basic stretch. The special case of clz(0) == -1
        // makes the answer come out right for stretches that reach the very top of
        // the word (e.g. numbers like 0xFFFFC00000000000).
        int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
        int s = clz_a - clz_b;

        // Decide how many bits to rotate right by, to put the low bit of that basic
        // stretch in position a.
        int r;
        if (negate) {
            // If we inverted the input right at the start of this function, here's
            // where we compensate: the number of set bits becomes the number of clear
            // bits, and the rotation count is based on position b rather than position
            // a (since b is the location of the 'lowest' 1 bit after inversion).
            s = d - s;
            r = (clz_b + 1) & (d - 1);
        } else {
            r = (clz_a + 1) & (d - 1);
        }

        // Now we're done, except for having to encode the S output in such a way that
        // it gives both the number of set bits and the length of the repeated
        // segment. The s field is encoded like this:
        //
        //     imms    size        S
        //    ssssss    64    UInt(ssssss)
        //    0sssss    32    UInt(sssss)
        //    10ssss    16    UInt(ssss)
        //    110sss     8    UInt(sss)
        //    1110ss     4    UInt(ss)
        //    11110s     2    UInt(s)
        //
        // So we 'or' (-d << 1) with our computed s to form imms.
        *n = out_n;
        *imm_s = ((-d << 1) | (s - 1)) & 0x3F;
        *imm_r = r;

        return true;
    }

    bool Assembler::IsImmConditionalCompare(int64_t immediate)
    {
        return is_uint5(immediate);
    }

    bool Assembler::IsImmFP32(float imm)
    {
        // Valid values will have the form:
        // aBbb.bbbc.defg.h000.0000.0000.0000.0000
        uint32_t bits = bit_cast<uint32_t>(imm);
        // bits[19..0] are cleared.
        if ((bits & 0x7FFFF) != 0) {
            return false;
        }

        // bits[29..25] are all set or all cleared.
        uint32_t b_pattern = (bits >> 16) & 0x3E00;
        if (b_pattern != 0 && b_pattern != 0x3E00) {
            return false;
        }

        // bit[30] and bit[29] are opposite.
        if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
            return false;
        }

        return true;
    }

    bool Assembler::IsImmFP64(double imm)
    {
        // Valid values will have the form:
        // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
        // 0000.0000.0000.0000.0000.0000.0000.0000
        uint64_t bits = bit_cast<uint64_t>(imm);
        // bits[47..0] are cleared.
        if ((bits & 0xFFFFFFFFFFFFL) != 0) {
            return false;
        }

        // bits[61..54] are all set or all cleared.
        uint32_t b_pattern = (bits >> 48) & 0x3FC0;
        if (b_pattern != 0 && b_pattern != 0x3FC0) {
            return false;
        }

        // bit[62] and bit[61] are opposite.
        if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
            return false;
        }

        return true;
    }

    void Assembler::GrowBuffer()
    {
        // Compute new buffer size.
        int old_size = buffer_->size();
        int new_size = std::min(2 * old_size, old_size + 1 * MB);

        // Some internal data structures overflow for very large buffers,
        // they must ensure that kMaximalBufferSize is not too large.
        if (new_size > kMaximalBufferSize) {
            V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
        }

        // Set up new buffer.
        std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
        DCHECK_EQ(new_size, new_buffer->size());
        byte* new_start = new_buffer->start();

        // Copy the data.
        intptr_t pc_delta = new_start - buffer_start_;
        intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
        size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
        memmove(new_start, buffer_start_, pc_offset());
        memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
            reloc_size);

        // Switch buffers.
        buffer_ = std::move(new_buffer);
        buffer_start_ = new_start;
        pc_ += pc_delta;
        reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
            reloc_info_writer.last_pc() + pc_delta);

        // None of our relocation types are pc relative pointing outside the code
        // buffer nor pc absolute pointing inside the code buffer, so there is no need
        // to relocate any emitted relocation entries.

        // Relocate internal references.
        for (auto pos : internal_reference_positions_) {
            intptr_t* p = reinterpret_cast<intptr_t*>(buffer_start_ + pos);
            *p += pc_delta;
        }

        // Pending relocation entries are also relative, no need to relocate.
    }

    void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
        ConstantPoolMode constant_pool_mode)
    {
        if ((rmode == RelocInfo::INTERNAL_REFERENCE) || (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) || (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) || (rmode == RelocInfo::DEOPT_INLINING_ID) || (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) {
            // Adjust code for new modes.
            DCHECK(RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) || RelocInfo::IsDeoptPosition(rmode) || RelocInfo::IsInternalReference(rmode) || RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
            // These modes do not need an entry in the constant pool.
        } else if (constant_pool_mode == NEEDS_POOL_ENTRY) {
            bool new_constpool_entry = constpool_.RecordEntry(data, rmode);
            // Make sure the constant pool is not emitted in place of the next
            // instruction for which we just recorded relocation info.
            BlockConstPoolFor(1);
            if (!new_constpool_entry)
                return;
        }
        // For modes that cannot use the constant pool, a different sequence of
        // instructions will be emitted by this function's caller.

        if (!ShouldRecordRelocInfo(rmode))
            return;

        // We do not try to reuse pool constants.
        RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());

        DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
        reloc_info_writer.Write(&rinfo);
    }

    void Assembler::near_jump(int offset, RelocInfo::Mode rmode)
    {
        if (!RelocInfo::IsNone(rmode))
            RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
        b(offset);
    }

    void Assembler::near_call(int offset, RelocInfo::Mode rmode)
    {
        if (!RelocInfo::IsNone(rmode))
            RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
        bl(offset);
    }

    void Assembler::near_call(HeapObjectRequest request)
    {
        RequestHeapObject(request);
        int index = AddCodeTarget(Handle<Code>());
        RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
        bl(index);
    }

    void Assembler::BlockConstPoolFor(int instructions)
    {
        int pc_limit = pc_offset() + instructions * kInstrSize;
        if (no_const_pool_before_ < pc_limit) {
            no_const_pool_before_ = pc_limit;
            // Make sure the pool won't be blocked for too long.
            DCHECK(pc_limit < constpool_.MaxPcOffset());
        }

        if (next_constant_pool_check_ < no_const_pool_before_) {
            next_constant_pool_check_ = no_const_pool_before_;
        }
    }

    void Assembler::CheckConstPool(bool force_emit, bool require_jump)
    {
        // Some short sequence of instruction mustn't be broken up by constant pool
        // emission, such sequences are protected by calls to BlockConstPoolFor and
        // BlockConstPoolScope.
        if (is_const_pool_blocked()) {
            // Something is wrong if emission is forced and blocked at the same time.
            DCHECK(!force_emit);
            return;
        }

        // There is nothing to do if there are no pending constant pool entries.
        if (constpool_.IsEmpty()) {
            // Calculate the offset of the next check.
            SetNextConstPoolCheckIn(kCheckConstPoolInterval);
            return;
        }

        // We emit a constant pool when:
        //  * requested to do so by parameter force_emit (e.g. after each function).
        //  * the distance to the first instruction accessing the constant pool is
        //    kApproxMaxDistToConstPool or more.
        //  * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
        int dist = constpool_.DistanceToFirstUse();
        int count = constpool_.EntryCount();
        if (!force_emit && (dist < kApproxMaxDistToConstPool) && (count < kApproxMaxPoolEntryCount)) {
            return;
        }

        // Emit veneers for branches that would go out of range during emission of the
        // constant pool.
        int worst_case_size = constpool_.WorstCaseSize();
        CheckVeneerPool(false, require_jump,
            kVeneerDistanceMargin + worst_case_size);

        // Check that the code buffer is large enough before emitting the constant
        // pool (this includes the gap to the relocation information).
        int needed_space = worst_case_size + kGap + 1 * kInstrSize;
        while (buffer_space() <= needed_space) {
            GrowBuffer();
        }

        Label size_check;
        bind(&size_check);
        constpool_.Emit(require_jump);
        DCHECK(SizeOfCodeGeneratedSince(&size_check) <= static_cast<unsigned>(worst_case_size));

        // Since a constant pool was just emitted, move the check offset forward by
        // the standard interval.
        SetNextConstPoolCheckIn(kCheckConstPoolInterval);
    }

    bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin)
    {
        // Account for the branch around the veneers and the guard.
        int protection_offset = 2 * kInstrSize;
        return pc_offset() > max_reachable_pc - margin - protection_offset - static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
    }

    void Assembler::RecordVeneerPool(int location_offset, int size)
    {
        RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
            RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
        reloc_info_writer.Write(&rinfo);
    }

    void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin)
    {
        BlockPoolsScope scope(this);
        RecordComment("[ Veneers");

        // The exact size of the veneer pool must be recorded (see the comment at the
        // declaration site of RecordConstPool()), but computing the number of
        // veneers that will be generated is not obvious. So instead we remember the
        // current position and will record the size after the pool has been
        // generated.
        Label size_check;
        bind(&size_check);
        int veneer_pool_relocinfo_loc = pc_offset();

        Label end;
        if (need_protection) {
            b(&end);
        }

        EmitVeneersGuard();

#ifdef DEBUG
        Label veneer_size_check;
#endif

        std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;

        it = unresolved_branches_.begin();
        while (it != unresolved_branches_.end()) {
            if (force_emit || ShouldEmitVeneer(it->first, margin)) {
                Instruction* branch = InstructionAt(it->second.pc_offset_);
                Label* label = it->second.label_;

#ifdef DEBUG
                bind(&veneer_size_check);
#endif
                // Patch the branch to point to the current position, and emit a branch
                // to the label.
                Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
                RemoveBranchFromLabelLinkChain(branch, label, veneer);
                branch->SetImmPCOffsetTarget(options(), veneer);
                b(label);
#ifdef DEBUG
                DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <= static_cast<uint64_t>(kMaxVeneerCodeSize));
                veneer_size_check.Unuse();
#endif

                it_to_delete = it++;
                unresolved_branches_.erase(it_to_delete);
            } else {
                ++it;
            }
        }

        // Record the veneer pool size.
        int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
        RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);

        if (unresolved_branches_.empty()) {
            next_veneer_pool_check_ = kMaxInt;
        } else {
            next_veneer_pool_check_ = unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
        }

        bind(&end);

        RecordComment("]");
    }

    void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
        int margin)
    {
        // There is nothing to do if there are no pending veneer pool entries.
        if (unresolved_branches_.empty()) {
            DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
            return;
        }

        DCHECK(pc_offset() < unresolved_branches_first_limit());

        // Some short sequence of instruction mustn't be broken up by veneer pool
        // emission, such sequences are protected by calls to BlockVeneerPoolFor and
        // BlockVeneerPoolScope.
        if (is_veneer_pool_blocked()) {
            DCHECK(!force_emit);
            return;
        }

        if (!require_jump) {
            // Prefer emitting veneers protected by an existing instruction.
            margin *= kVeneerNoProtectionFactor;
        }
        if (force_emit || ShouldEmitVeneers(margin)) {
            EmitVeneers(force_emit, require_jump, margin);
        } else {
            next_veneer_pool_check_ = unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
        }
    }

    int Assembler::buffer_space() const
    {
        return static_cast<int>(reloc_info_writer.pos() - pc_);
    }

    void Assembler::RecordConstPool(int size)
    {
        // We only need this for debugger support, to correctly compute offsets in the
        // code.
        RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
    }

    void PatchingAssembler::PatchAdrFar(int64_t target_offset)
    {
        // The code at the current instruction should be:
        //   adr  rd, 0
        //   nop  (adr_far)
        //   nop  (adr_far)
        //   movz scratch, 0

        // Verify the expected code.
        Instruction* expected_adr = InstructionAt(0);
        CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
        int rd_code = expected_adr->Rd();
        for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
            CHECK(InstructionAt((i + 1) * kInstrSize)->IsNop(ADR_FAR_NOP));
        }
        Instruction* expected_movz = InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstrSize);
        CHECK(expected_movz->IsMovz() && (expected_movz->ImmMoveWide() == 0) && (expected_movz->ShiftMoveWide() == 0));
        int scratch_code = expected_movz->Rd();

        // Patch to load the correct address.
        Register rd = Register::XRegFromCode(rd_code);
        Register scratch = Register::XRegFromCode(scratch_code);
        // Addresses are only 48 bits.
        adr(rd, target_offset & 0xFFFF);
        movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
        movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
        DCHECK_EQ(target_offset >> 48, 0);
        add(rd, rd, scratch);
    }

    void PatchingAssembler::PatchSubSp(uint32_t immediate)
    {
        // The code at the current instruction should be:
        //   sub sp, sp, #0

        // Verify the expected code.
        Instruction* expected_adr = InstructionAt(0);
        CHECK(expected_adr->IsAddSubImmediate());
        sub(sp, sp, immediate);
    }

} // namespace internal
} // namespace v8

#endif // V8_TARGET_ARCH_ARM64
