#include "src/pbc/transform/portable-bytecode-lower.h"

#include "include/v8-version.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-optimizer.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects/bytecode-array.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/pbc/portable-bytecode-array-iterator.h"
#include "src/pbc/transform/portable-transformer.h"

using namespace v8::internal;

namespace portable {
const interpreter::BytecodeArrayBuilder::ToBooleanMode boolean_mode =
    interpreter::BytecodeArrayBuilder::ToBooleanMode::kAlreadyBoolean;
const interpreter::BytecodeArrayBuilder::ToBooleanMode not_boolean_mode =
    interpreter::BytecodeArrayBuilder::ToBooleanMode::kConvertToBoolean;

const size_t kInvalidConstantEntry = -1;

class LowerBytecodeLabels {
  using label_type = interpreter::BytecodeLabel;
  using list_type = ZoneLinkedList<label_type>;

 public:
  explicit LowerBytecodeLabels(Zone* zone, const label_type& label)
      : labels_(zone), is_bound_(false) {
    labels_.emplace_back(label);
    jmp_iter = labels_.begin();
  }
  LowerBytecodeLabels() = delete;
  LowerBytecodeLabels(const LowerBytecodeLabels&) = delete;
  LowerBytecodeLabels& operator=(const LowerBytecodeLabels&) = delete;

  void push_back(const label_type& label) { labels_.emplace_back(label); }
  label_type* get_unbound() {
    DCHECK(jmp_iter != labels_.end());
    auto& label = *jmp_iter++;
    return &label;
  }
  void Bind(interpreter::BytecodeArrayBuilder* builder) {
    DCHECK(!is_bound_);
    is_bound_ = true;
    for (auto& label : labels_) {
      builder->Bind(&label);
    }
  }
  bool is_bound() const {
    DCHECK_IMPLIES(
        is_bound_,
        std::all_of(labels_.begin(), labels_.end(), [](const label_type& l) {
          return !l.has_referrer_jump() || l.is_bound();
        }));
    return is_bound_;
  }
  bool empty() const { return labels_.empty(); }

 private:
  list_type labels_;
  typename list_type::iterator jmp_iter;
  bool is_bound_;
};

struct ForInLowerInfo {
  ForInLowerInfo(int32_t slot_id, interpreter::RegisterList reg_list)
      : for_in_slot_id(slot_id), triple(reg_list) {}

  int for_in_slot_id;
  interpreter::RegisterList triple;
};

class PortableLowering {
 public:
  explicit PortableLowering(PBCLowerItems items)
      : items_(items),
        local_zone_(isolate()->allocator(), "pbc lowering"),
        bytecode_iterator_(raw_bytecode().FirstBytecode(),
                           raw_bytecode().BytecodeSizeInBytes(),
                           items_.pbc_constant_table_),
        feedback_(&local_zone_),
        constant_entry_map_(items.pbc_constant_table_.NumberOfEntries(),
                            kInvalidConstantEntry),
        suspend_count_(0),
        generator_jump_table_(nullptr),
        builder_(&local_zone_, items.pbc_func_.GetParameterCount(),
                 // TODO:(ck) optimize register use?
                 items.pbc_func_.GetRegisterCount(), &feedback_) {}

  Handle<i::BytecodeArray> Lower();
  inline interpreter::BytecodeArrayBuilder& builder() { return builder_; }
  inline BytecodeArrayIterator& bytecode_iterator() {
    return bytecode_iterator_;
  }
  inline std::unordered_map<int32_t, LowerBytecodeLabels>& forward_labels() {
    return forward_labels_;
  }
  inline std::unordered_map<int32_t, interpreter::BytecodeLoopHeader>&
  backward_labels() {
    return backward_labels_;
  }
  inline Isolate* isolate() { return items_.isolate_; }
  inline Zone& zone() { return local_zone_; }
  inline Handle<SharedFunctionInfo> shared_func_info() {
    return items_.shared_func_info_;
  }
  inline PbcBytecode raw_bytecode() { return items_.pbc_bytecode_; }
  std::vector<int>& cached_feedback_slot() { return cached_feedback_slot_; }
  inline i::FeedbackVectorSpec& feedback() { return feedback_; }
  inline std::vector<ForInLowerInfo>& for_in_info_stack() {
    return for_in_info_stack_;
  }
  inline i::LanguageMode language_mode() {
    return items_.pbc_func_.IsStrictMode() ? i::LanguageMode::kStrict
                                           : i::LanguageMode::kSloppy;
  }
  inline interpreter::Register generator_object() const {
    return interpreter::Register(
        items_.pbc_func_.GetIncomingNewTargetOrGeneratorRegister());
  }
  inline interpreter::BytecodeJumpTable* generator_jump_table() const {
    return generator_jump_table_;
  }
  inline uint32_t suspend_count() { return suspend_count_; }

  void inc_suspend_count() { suspend_count_++; }
  Handle<Object> GetConstantFromTable(size_t index) {
    return Handle<Object>(items_.constant_pool_->get(static_cast<int>(index)),
                          items_.isolate_);
  }
  PbcShortValueKind GetShortConstantKind(size_t index) {
    return items_.pbc_constant_table_.ShortEntries()[index].Kind();
  }
  PbcValueKind GetConstantKind(size_t index) {
    if (items_.pbc_constant_table_.IsShortValue()) {
      return ToNormalKind(GetShortConstantKind(index));
    }
    return items_.pbc_constant_table_.Entries()[index].Kind();
  }
  uint16_t GetParammeterCountFromConstantPool(size_t index) {
    P_DCHECK(GetConstantKind(index) == PbcValueKind::kFunction);
    if (items_.pbc_constant_table_.IsShortValue()) {
      PbcShortValue entry = items_.pbc_constant_table_.ShortEntries()[index];
      PbcShortFunction func_ref = static_cast<const PbcShortFunction&>(entry);
      return func_ref.GetParameterCount();
    }
    PbcValue entry = items_.pbc_constant_table_.Entries()[index];
    FunctionRef func_ref = static_cast<const FunctionRef&>(entry);
    return func_ref.GetParameterCount();
  }
  UniqueTag GetUniqueTag(size_t index) {
    if (items_.pbc_constant_table_.IsShortValue()) {
      static_cast<const PbcShortUnique&>(
          items_.pbc_constant_table_.ShortEntries()[index])
          .Tag();
    }
    return static_cast<const UniqueScalar&>(
               items_.pbc_constant_table_.Entries()[index])
        .Tag();
  }
  size_t GetOrCreateHandleConstantEntry(size_t data_entry) {
    size_t entry = constant_entry_map_[data_entry];
    if (entry != kInvalidConstantEntry) {
      // share constant pool value
      return entry;
    }
    // add new constant pool value
    Handle<Object> constant =
        Handle<Object>(items_.constant_pool_->get(static_cast<int>(data_entry)),
                       items_.isolate_);
    entry = builder_.AllocateDeferredConstantPoolEntry();
    builder_.SetDeferredConstantPoolEntry(entry, constant);
    constant_entry_map_[data_entry] = entry;
    return entry;
  }
  std::unordered_map<uint32_t, interpreter::BytecodeJumpTable*>&
  get_jump_tables() {
    return jump_tables_;
  }

#if V8_MAJOR_VERSION == 11
  interpreter::RegisterList construct_forward_args() {
    return construct_forward_args_;
  }
#endif

 private:
  void InitHandlerTable();
  void PreAnalysis();
  void VisitBytecodes();
  void DumpBytecode(Handle<i::BytecodeArray> array,
                    Handle<SharedFunctionInfo> shared_func);

  PBCLowerItems items_;
  Zone local_zone_;
  BytecodeArrayIterator bytecode_iterator_;
  i::FeedbackVectorSpec feedback_;

  // record jmp ins target
  std::unordered_map<int32_t, LowerBytecodeLabels> forward_labels_;
  std::set<int32_t> forward_offsets_;
  std::unordered_map<int32_t, interpreter::BytecodeLoopHeader> backward_labels_;
  std::set<int32_t> backward_offsets_;
  std::unordered_map<
      uint32_t,
      std::vector<std::pair<interpreter::BytecodeJumpTable*, uint32_t>>>
      case_labels_;
  std::set<int32_t> case_offsets_;
  std::unordered_map<uint32_t, interpreter::BytecodeJumpTable*> jump_tables_;

  std::vector<int> cached_feedback_slot_;

  // map raw constant pool and new constant_pool
  std::vector<size_t> constant_entry_map_;

  // generator related
  uint32_t suspend_count_;
  interpreter::BytecodeJumpTable* generator_jump_table_;

  interpreter::BytecodeArrayBuilder builder_;
  // TODO:(wwq) use CFG to match ForInInit and ForInCurrent
  std::vector<ForInLowerInfo> for_in_info_stack_;

#if V8_MAJOR_VERSION == 11
  interpreter::RegisterList construct_forward_args_;
#endif
};

class LowerRegisterAllocationScope final {
 public:
  explicit LowerRegisterAllocationScope(PortableLowering* lower)
      : lower_(lower),
        outer_next_register_index_(
            lower_->builder().register_allocator()->next_register_index()) {}
  ~LowerRegisterAllocationScope() {
    lower_->builder().register_allocator()->ReleaseRegisters(
        outer_next_register_index_);
  }
  LowerRegisterAllocationScope(const LowerRegisterAllocationScope&) = delete;
  LowerRegisterAllocationScope& operator=(const LowerRegisterAllocationScope&) =
      delete;

 private:
  PortableLowering* lower_;
  int outer_next_register_index_;
};

// An array of portable bytecode visit functions with type of `void
// VisitXXX(PortableLowering&);`
void (*visits[static_cast<uint32_t>(Bytecode::kLast) + 1])(
    PortableLowering&) = {nullptr};
// Single OP lower function
#define DECLARE_VISIT_BYTECODE(name, ...) \
  void Visit##name(PortableLowering& lowerer);
P_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE

struct VisitsInitializer {
  VisitsInitializer() {
    uint32_t id = 0;
#define DECLARE_VISIT_BYTECODE(name, ...)        \
  id = static_cast<uint32_t>(Bytecode::k##name); \
  visits[id] = Visit##name;

    P_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
  }
};

VisitsInitializer initial;

// Portable bytecode lower helper functions
void VisitBinaryOp(PortableLowering& lowerer, Token::Value op);
void VisitBinaryOpWithImm(PortableLowering& lowerer, Token::Value op);
void VisitUnaryOp(PortableLowering& lowerer, Token::Value op);
void VisitCallVarArgs(PortableLowering& lowerer, interpreter::Bytecode op);
void VisitCompareOp(PortableLowering& lowerer, Token::Value op);
interpreter::BytecodeLabel* VisitForwardJump(PortableLowering& lowerer);

Handle<i::BytecodeArray> PortableLowering::Lower() {
  PreAnalysis();

  // Build generator prologue
  auto kind = shared_func_info()->kind();
  if (IsResumableFunction(kind) && items_.pbc_func_.GetSuspendCount() > 0) {
    generator_jump_table_ =
        builder().AllocateJumpTable(items_.pbc_func_.GetSuspendCount(), 0);
    builder().SwitchOnGeneratorState(generator_object(), generator_jump_table_);
  }

  VisitBytecodes();

  Handle<i::BytecodeArray> new_array = builder_.ToBytecodeArray(isolate());
  new_array->set_incoming_new_target_or_generator_register(generator_object());
  // TODO: update feedback lazy? PbcTransform set_feedback_metadata as well
  Handle<FeedbackMetadata> feedback_metadata =
      FeedbackMetadata::New(isolate(), &feedback_);
  shared_func_info()->set_feedback_metadata(*feedback_metadata,
                                            v8::kReleaseStore);
  if (i::v8_flags.log_pbc) {
    DumpBytecode(new_array, shared_func_info());
  }
  return new_array;
}

void PortableLowering::InitHandlerTable() {
  for (uint32_t i = 0; i < items_.pbc_func_.GetExceptionCount(); ++i) {
    builder_.NewHandlerEntry();
  }
}

void PortableLowering::PreAnalysis() {
  // Init handler table
  InitHandlerTable();

  // Control flow analysis
  BytecodeArrayIterator iter(raw_bytecode().FirstBytecode(),
                             raw_bytecode().BytecodeSizeInBytes(),
                             items_.pbc_constant_table_);
  for (; !iter.done(); iter.Advance()) {
    auto bp = iter.current_bytecode();
    switch (bp) {
#define BYTECODE_CASE(name, ...) case Bytecode::k##name:
      P_JUMP_FORWARD_BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CASE
      {
        int32_t target_offset = iter.GetJumpTargetOffset();
        DCHECK(target_offset > iter.GetAbsoluteOffset(0));
        interpreter::BytecodeLabel label;
        auto [iter, inserted] =
            forward_labels_.try_emplace(target_offset, &local_zone_, label);
        if (!inserted) {
          iter->second.push_back(label);
        } else {
          forward_offsets_.emplace(target_offset);
        }
        if (bp == Bytecode::kYieldFinally) {
          interpreter::BytecodeLabel label2;
          iter->second.push_back(label2);
        }
        break;
      }
      case Bytecode::kJumpLoop: {
        int32_t target_offset = iter.GetJumpTargetOffset();
        DCHECK(target_offset < iter.GetAbsoluteOffset(0));
        interpreter::BytecodeLoopHeader header;
        backward_labels_[target_offset] = header;
        backward_offsets_.emplace(target_offset);
        break;
      }
      case Bytecode::kSwitchOnSmiNoFeedback: {
        uint32_t min_case = iter.GetImmediateOperand(2);
        uint32_t table_size = iter.GetUnsignedImmediateOperand(1);
        interpreter::BytecodeJumpTable* jump_table =
            builder_.AllocateJumpTable(table_size, min_case);

        uint32_t current_offset = iter.GetAbsoluteOffset(0);
        jump_tables_[current_offset] = jump_table;

        uint32_t table_start = iter.GetIndexOperand(0);
        uint32_t max_case = min_case + table_size - 1;
        for (uint32_t case_value = min_case; case_value <= max_case;
             ++case_value) {
          uint32_t item_index = table_start + case_value - min_case;
          uint32_t target_offset =
              current_offset +
              i::Smi::ToInt(
                  *PortableLowering::GetConstantFromTable(item_index));
          P_DCHECK(target_offset != current_offset);
          if (target_offset > current_offset) {
            case_labels_[target_offset].push_back(
                std::make_pair(jump_table, case_value));
          } else {
            P_UNREACHABLE();
          }
          case_offsets_.emplace(target_offset);
        }
        break;
      }
#if V8_MAJOR_VERSION == 11
      case Bytecode::kConstructForwardAllArgs: {
        construct_forward_args_ =
            builder_.register_allocator()->NewRegisterList(1);
        builder_.CreateArguments(i::CreateArgumentsType::kRestParameter);
        builder_.StoreAccumulatorInRegister(construct_forward_args_[0]);
        break;
      }
#endif
      default:
        break;
    }  // end switch
  }  // end for
}

void PortableLowering::VisitBytecodes() {
  auto current_forward = forward_offsets_.begin();
  auto current_backward = backward_offsets_.begin();
  auto current_case = case_offsets_.begin();

  for (; !bytecode_iterator_.done(); bytecode_iterator_.Advance()) {
    // Fix jump labels
    int32_t current_offset = bytecode_iterator_.GetOffsetWithPrefix(0);
    if (forward_offsets_.end() != current_forward &&
        current_offset == *current_forward) {
      auto iter = forward_labels_.find(current_offset);
      DCHECK(iter != forward_labels_.end());
      iter->second.Bind(&builder_);
      ++current_forward;
    }
    if (backward_offsets_.end() != current_backward &&
        current_offset == *current_backward) {
      auto iter = backward_labels_.find(current_offset);
      DCHECK(iter != backward_labels_.end());
      builder_.Bind(&iter->second);
      ++current_backward;
    }
    if (case_offsets_.end() != current_case &&
        current_offset == *current_case) {
      auto iter = case_labels_.find(current_offset);
      DCHECK(iter != case_labels_.end());
      for (auto case_label : iter->second) {
        builder_.Bind(case_label.first, case_label.second);
      }
      ++current_case;
    }

    // Lower bytecode
    uint32_t id = static_cast<uint32_t>(bytecode_iterator_.current_bytecode());
    auto* visit_func = visits[id];
    (*visit_func)(*this);

  }  // end for bytecode_iterator
}

void PortableLowering::DumpBytecode(Handle<v8::internal::BytecodeArray> array,
                                    Handle<SharedFunctionInfo> shared_func) {
  std::cout << std::endl;
  i::StdoutStream os;
  std::cout << "[portable lowered bytecode for function: "
            << shared_func->DebugNameCStr() << " (" << shared_func << ")]"
            << std::endl;
  std::cout << "Bytecode length: " << array->length() << std::endl;
  array->Disassemble(os);
  std::cout << std::endl;
}

void VisitStar(PortableLowering& lowerer) {
  interpreter::Register target(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  lowerer.builder().StoreAccumulatorInRegister(target);
}

#define SHORT_STAR_VISITOR(Name, ...)                                     \
  void Visit##Name(PortableLowering& lowerer) {                           \
    int32_t index = static_cast<int32_t>(interpreter::Bytecode::kStar0) - \
                    static_cast<int32_t>(interpreter::Bytecode::k##Name); \
    lowerer.builder().StoreAccumulatorInRegister(                         \
        interpreter::Register(index));                                    \
  }
SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
#undef SHORT_STAR_VISITOR

void VisitLdar(PortableLowering& lowerer) {
  interpreter::Register source(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  lowerer.builder().LoadAccumulatorWithRegister(source);
}

void VisitLdaZero(PortableLowering& lowerer) {
  lowerer.builder().LoadLiteral(i::Smi::FromInt(0));
}

void VisitLdaSmi(PortableLowering& lowerer) {
  int32_t constant = lowerer.bytecode_iterator().GetImmediateOperand(0);
  lowerer.builder().LoadLiteral(i::Smi::FromInt(constant));
}

void VisitLdaUndefined(PortableLowering& lowerer) {
  lowerer.builder().LoadUndefined();
}

void VisitLdaNull(PortableLowering& lowerer) { lowerer.builder().LoadNull(); }

void VisitLdaTheHole(PortableLowering& lowerer) {
  lowerer.builder().LoadTheHole();
}

void VisitLdaTrue(PortableLowering& lowerer) { lowerer.builder().LoadTrue(); }

void VisitLdaFalse(PortableLowering& lowerer) { lowerer.builder().LoadFalse(); }

void VisitLdaConstant(PortableLowering& lowerer) {
  size_t constant_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  if (SmiValuesAre32Bits()) {
    auto value = lowerer.GetConstantFromTable(constant_entry);
#if V8_MAJOR_VERSION == 11
    if (value->IsSmi()) {
      lowerer.builder().LoadLiteral(i::Smi::cast(*value));
#else
    if (i::Is<i::Smi>(value)) {
      lowerer.builder().LoadLiteral(*i::Cast<i::Smi>(value));
#endif
      return;
    }
  }
  size_t entry = lowerer.GetOrCreateHandleConstantEntry(constant_entry);
  lowerer.builder().LoadConstantPoolEntry(entry);
}

void VisitLdaContextSlot(PortableLowering& lowerer) {
  interpreter::Register context(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  int32_t slot_id = lowerer.bytecode_iterator().GetIndexOperand(1);
  uint32_t depth = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(2);
  lowerer.builder().LoadContextSlot(
      context, slot_id, depth, interpreter::BytecodeArrayBuilder::kMutableSlot);
}

void VisitLdaImmutableContextSlot(PortableLowering& lowerer) {
  interpreter::Register context(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  int32_t slot_id = lowerer.bytecode_iterator().GetIndexOperand(1);
  uint32_t depth = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(2);
  lowerer.builder().LoadContextSlot(
      context, slot_id, depth,
      interpreter::BytecodeArrayBuilder::kImmutableSlot);
}

void VisitLdaCurrentContextSlot(PortableLowering& lowerer) {
  int32_t slot_id = lowerer.bytecode_iterator().GetIndexOperand(0);
  lowerer.builder().LoadContextSlot(
      interpreter::Register::current_context(), slot_id, 0,
      interpreter::BytecodeArrayBuilder::kMutableSlot);
}

void VisitLdaImmutableCurrentContextSlot(PortableLowering& lowerer) {
  int32_t slot_id = lowerer.bytecode_iterator().GetIndexOperand(0);
  lowerer.builder().LoadContextSlot(
      interpreter::Register::current_context(), slot_id, 0,
      interpreter::BytecodeArrayBuilder::kImmutableSlot);
}

void VisitMov(PortableLowering& lowerer) {
  interpreter::Register source(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register target(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  lowerer.builder().MoveRegister(source, target);
}

void VisitPushContext(PortableLowering& lowerer) {
  interpreter::Register context(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  lowerer.builder().PushContext(context);
}

void VisitPopContext(PortableLowering& lowerer) {
  interpreter::Register context(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  lowerer.builder().PopContext(context);
}

void VisitTestReferenceEqual(PortableLowering& lowerer) {
  interpreter::Register left(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  lowerer.builder().CompareReference(left);
}

void VisitTestUndetectable(PortableLowering& lowerer) {
  lowerer.builder().CompareUndetectable();
}

void VisitTestNull(PortableLowering& lowerer) {
  lowerer.builder().CompareNull();
}

void VisitTestUndefined(PortableLowering& lowerer) {
  lowerer.builder().CompareUndefined();
}

void VisitTestTypeOf(PortableLowering& lowerer) {
  interpreter::TestTypeOfFlags::LiteralFlag literal_flag =
      interpreter::TestTypeOfFlags::Decode(
          lowerer.bytecode_iterator().GetFlag8Operand(0));
  lowerer.builder().CompareTypeOf(literal_flag);
}

int& GetCachedFeedbackSlot(PortableLowering& lowerer, uint32_t feedback_id) {
  auto& cached_feedback_slot = lowerer.cached_feedback_slot();
  if (feedback_id < cached_feedback_slot.size()) {
    return cached_feedback_slot[feedback_id];
  } else {
    P_DCHECK(cached_feedback_slot.size() == feedback_id);
    cached_feedback_slot.push_back(-1);
    return cached_feedback_slot.back();
  }
}

void VisitLdaGlobal(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int32_t feedback_slot =
      lowerer.feedback().AddLoadGlobalICSlot(i::TypeofMode::kNotInside).ToInt();
  lowerer.builder().LoadGlobal(
      lowerer.GetOrCreateHandleConstantEntry(name_entry), feedback_slot,
      i::TypeofMode::kNotInside);
}

void VisitLdaGlobalWithFeedback(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int& feedback_slot = GetCachedFeedbackSlot(
      lowerer, lowerer.bytecode_iterator().GetIndexOperand(1));
  if (feedback_slot == -1) {
    feedback_slot = lowerer.feedback()
                        .AddLoadGlobalICSlot(i::TypeofMode::kNotInside)
                        .ToInt();
  }
  lowerer.builder().LoadGlobal(
      lowerer.GetOrCreateHandleConstantEntry(name_entry), feedback_slot,
      i::TypeofMode::kNotInside);
}

void VisitLdaGlobalInsideTypeof(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int32_t feedback_slot =
      lowerer.feedback().AddLoadGlobalICSlot(i::TypeofMode::kInside).ToInt();
  lowerer.builder().LoadGlobal(
      lowerer.GetOrCreateHandleConstantEntry(name_entry), feedback_slot,
      i::TypeofMode::kInside);
}

void VisitLdaGlobalInsideTypeofWithFeedback(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int& feedback_slot = GetCachedFeedbackSlot(
      lowerer, lowerer.bytecode_iterator().GetIndexOperand(1));
  if (feedback_slot == -1) {
    feedback_slot =
        lowerer.feedback().AddLoadGlobalICSlot(i::TypeofMode::kInside).ToInt();
  }
  lowerer.builder().LoadGlobal(
      lowerer.GetOrCreateHandleConstantEntry(name_entry), feedback_slot,
      i::TypeofMode::kInside);
}

void VisitStaGlobal(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int32_t feedback_slot =
      lowerer.feedback().AddStoreGlobalICSlot(lowerer.language_mode()).ToInt();
  lowerer.builder().StoreGlobal(
      lowerer.GetOrCreateHandleConstantEntry(name_entry), feedback_slot);
}

void VisitStaGlobalWithFeedback(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int& feedback_slot = GetCachedFeedbackSlot(
      lowerer, lowerer.bytecode_iterator().GetIndexOperand(1));
  if (feedback_slot == -1) {
    feedback_slot = lowerer.feedback()
                        .AddStoreGlobalICSlot(lowerer.language_mode())
                        .ToInt();
  }
  lowerer.builder().StoreGlobal(
      lowerer.GetOrCreateHandleConstantEntry(name_entry), feedback_slot);
}

void VisitStaContextSlot(PortableLowering& lowerer) {
  interpreter::Register context(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  int32_t slot_id = lowerer.bytecode_iterator().GetIndexOperand(1);
  uint32_t depth = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(2);
  lowerer.builder().StoreContextSlot(context, slot_id, depth);
}

void VisitStaCurrentContextSlot(PortableLowering& lowerer) {
  int32_t slot_id = lowerer.bytecode_iterator().GetIndexOperand(0);
  lowerer.builder().StoreContextSlot(interpreter::Register::current_context(),
                                     slot_id, 0);
}

void VisitLdaLookupSlot(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  lowerer.builder().LoadLookupSlot(
      lowerer.GetOrCreateHandleConstantEntry(name_entry),
      i::TypeofMode::kNotInside);
}

void VisitLdaLookupContextSlot(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int32_t slot_id = lowerer.bytecode_iterator().GetIndexOperand(1);
  uint32_t depth = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(2);
  lowerer.builder().LoadLookupContextSlot(
      lowerer.GetOrCreateHandleConstantEntry(name_entry),
      i::TypeofMode::kNotInside,
#if V8_MAJOR_VERSION == 13
      ContextKind::kDefault,
#endif
      slot_id, depth);
}

void VisitLdaLookupGlobalSlot(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  uint32_t depth = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(1);
  int32_t feedback_slot =
      lowerer.feedback().AddLoadGlobalICSlot(i::TypeofMode::kNotInside).ToInt();
  lowerer.builder().LoadLookupGlobalSlot(
      lowerer.GetOrCreateHandleConstantEntry(name_entry),
      i::TypeofMode::kNotInside, feedback_slot, depth);
}

void VisitLdaLookupSlotInsideTypeof(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  lowerer.builder().LoadLookupSlot(
      lowerer.GetOrCreateHandleConstantEntry(name_entry),
      i::TypeofMode::kInside);
}

void VisitLdaLookupContextSlotInsideTypeof(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int32_t slot_id = lowerer.bytecode_iterator().GetIndexOperand(1);
  uint32_t depth = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(2);
  lowerer.builder().LoadLookupContextSlot(
      lowerer.GetOrCreateHandleConstantEntry(name_entry),
      i::TypeofMode::kInside,
#if V8_MAJOR_VERSION == 13
      ContextKind::kDefault,
#endif
      slot_id, depth);
}

void VisitLdaLookupGlobalSlotInsideTypeof(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  uint32_t depth = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(1);
  int32_t feedback_slot =
      lowerer.feedback().AddLoadGlobalICSlot(i::TypeofMode::kInside).ToInt();
  lowerer.builder().LoadLookupGlobalSlot(
      lowerer.GetOrCreateHandleConstantEntry(name_entry),
      i::TypeofMode::kInside, feedback_slot, depth);
}

void VisitStaLookupSlot(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int bytecode_flags = lowerer.bytecode_iterator().GetFlag8Operand(1);
  i::LanguageMode language_mode = static_cast<i::LanguageMode>(
      interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
          bytecode_flags));
  i::LookupHoistingMode lookup_hoisting_mode =
      static_cast<i::LookupHoistingMode>(
          interpreter::StoreLookupSlotFlags::LookupHoistingModeBit::decode(
              bytecode_flags));
  lowerer.builder().StoreLookupSlot(
      lowerer.GetOrCreateHandleConstantEntry(name_entry), language_mode,
      lookup_hoisting_mode);
}

void VisitGetNamedProperty(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(1);
  int32_t feedback_slot = lowerer.feedback().AddLoadICSlot().ToInt();
  lowerer.builder().LoadNamedProperty(
      object, lowerer.GetOrCreateHandleConstantEntry(name_entry),
      feedback_slot);
}

void VisitGetNamedPropertyWithFeedback(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(1);
  int& feedback_slot = GetCachedFeedbackSlot(
      lowerer, lowerer.bytecode_iterator().GetIndexOperand(2));
  if (feedback_slot == -1) {
    feedback_slot = lowerer.feedback().AddLoadICSlot().ToInt();
  }
  lowerer.builder().LoadNamedProperty(
      object, lowerer.GetOrCreateHandleConstantEntry(name_entry),
      feedback_slot);
}

void VisitGetKeyedProperty(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  int32_t feedback_slot = lowerer.feedback().AddKeyedLoadICSlot().ToInt();
  lowerer.builder().LoadKeyedProperty(object, feedback_slot);
}

void VisitSetNamedProperty(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(1);
  int feedback_slot =
      lowerer.feedback().AddStoreICSlot(lowerer.language_mode()).ToInt();
  lowerer.builder().SetNamedProperty(
      object, lowerer.GetOrCreateHandleConstantEntry(name_entry), feedback_slot,
      lowerer.language_mode());
}

void VisitSetNamedPropertyWithFeedback(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(1);
  int& feedback_slot = GetCachedFeedbackSlot(
      lowerer, lowerer.bytecode_iterator().GetIndexOperand(2));
  if (feedback_slot == -1) {
    feedback_slot =
        lowerer.feedback().AddStoreICSlot(lowerer.language_mode()).ToInt();
  }
  lowerer.builder().SetNamedProperty(
      object, lowerer.GetOrCreateHandleConstantEntry(name_entry), feedback_slot,
      lowerer.language_mode());
}

void VisitDefineNamedOwnProperty(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(1);
  int32_t feedback_slot = lowerer.feedback().AddDefineNamedOwnICSlot().ToInt();
  lowerer.builder().DefineNamedOwnProperty(
      object, lowerer.GetOrCreateHandleConstantEntry(name_entry),
      feedback_slot);
}

void VisitSetKeyedProperty(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register key(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  int32_t feedback_slot =
      lowerer.feedback().AddKeyedStoreICSlot(lowerer.language_mode()).ToInt();
  lowerer.builder().SetKeyedProperty(object, key, feedback_slot,
                                     lowerer.language_mode());
}

void VisitDefineKeyedOwnProperty(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register key(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  int32_t flags = lowerer.bytecode_iterator().GetFlag8Operand(2);
  int32_t feedback_slot = lowerer.feedback().AddDefineKeyedOwnICSlot().ToInt();
  i::DefineKeyedOwnPropertyFlag flag =
      static_cast<i::DefineKeyedOwnPropertyFlag>(flags);
  lowerer.builder().DefineKeyedOwnProperty(object, key, flag, feedback_slot);
}

namespace {
#define P_BINARY_OP_TOKEN_LIST(V)                              \
  V(add, Token::kAdd, Token::Value::ADD)                       \
  V(sub, Token::kSub, Token::Value::SUB)                       \
  V(mul, Token::kMul, Token::Value::MUL)                       \
  V(div, Token::kDiv, Token::Value::DIV)                       \
  V(mod, Token::kMod, Token::Value::MOD)                       \
  V(exp, Token::kExp, Token::Value::EXP)                       \
  V(bit_and, Token::kBitAnd, Token::Value::BIT_AND)            \
  V(bit_or, Token::kBitOr, Token::Value::BIT_OR)               \
  V(bit_xor, Token::kBitXor, Token::Value::BIT_XOR)            \
  V(shl, Token::kShl, Token::Value::SHL)                       \
  V(sar, Token::kSar, Token::Value::SAR)                       \
  V(shr, Token::kShr, Token::Value::SHR)                       \
  V(inc, Token::kInc, Token::Value::INC)                       \
  V(dec, Token::kDec, Token::Value::DEC)                       \
  V(bit_not, Token::kBitNot, Token::Value::BIT_NOT)            \
  V(eq, Token::kEq, Token::Value::EQ)                          \
  V(eq_strict, Token::kEqStrict, Token::Value::EQ_STRICT)      \
  V(lt, Token::kLessThan, Token::Value::LT)                    \
  V(gt, Token::kGreaterThan, Token::Value::GT)                 \
  V(lte, Token::kLessThanEq, Token::Value::LTE)                \
  V(gte, Token::kGreaterThanEq, Token::Value::GTE)             \
  V(instance_of, Token::kInstanceOf, Token::Value::INSTANCEOF) \
  V(in, Token::kIn, Token::Value::IN)
#if V8_MAJOR_VERSION == 11

#define BIN_OP(name, token13, token11) Token::Value token_##name = token11;
P_BINARY_OP_TOKEN_LIST(BIN_OP)
#undef BIN_OP

#else
#define BIN_OP(name, token13, token11) Token::Value token_##name = token13;
P_BINARY_OP_TOKEN_LIST(BIN_OP)
#undef BIN_OP

#endif

#undef P_BINARY_OP_TOKEN_LIST
}  // namespace

void VisitBinaryOp(PortableLowering& lowerer, Token::Value op) {
  interpreter::Register left(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  int32_t feedback_slot = lowerer.feedback().AddBinaryOpICSlot().ToInt();
  lowerer.builder().BinaryOperation(op, left, feedback_slot);
}

void VisitAdd(PortableLowering& lowerer) { VisitBinaryOp(lowerer, token_add); }

void VisitSub(PortableLowering& lowerer) { VisitBinaryOp(lowerer, token_sub); }

void VisitMul(PortableLowering& lowerer) { VisitBinaryOp(lowerer, token_mul); }

void VisitDiv(PortableLowering& lowerer) { VisitBinaryOp(lowerer, token_div); }

void VisitMod(PortableLowering& lowerer) { VisitBinaryOp(lowerer, token_mod); }

void VisitExp(PortableLowering& lowerer) { VisitBinaryOp(lowerer, token_exp); }

void VisitBitwiseOr(PortableLowering& lowerer) {
  VisitBinaryOp(lowerer, token_bit_or);
}

void VisitBitwiseXor(PortableLowering& lowerer) {
  VisitBinaryOp(lowerer, token_bit_xor);
}

void VisitBitwiseAnd(PortableLowering& lowerer) {
  VisitBinaryOp(lowerer, token_bit_and);
}

void VisitShiftLeft(PortableLowering& lowerer) {
  VisitBinaryOp(lowerer, token_shl);
}

void VisitShiftRight(PortableLowering& lowerer) {
  VisitBinaryOp(lowerer, token_sar);
}

void VisitShiftRightLogical(PortableLowering& lowerer) {
  VisitBinaryOp(lowerer, token_shr);
}

void VisitBinaryOpWithImm(PortableLowering& lowerer, Token::Value op) {
  auto right =
      i::Smi::FromInt(lowerer.bytecode_iterator().GetImmediateOperand(0));
  int32_t feedback_slot = lowerer.feedback().AddBinaryOpICSlot().ToInt();
  lowerer.builder().BinaryOperationSmiLiteral(op, right, feedback_slot);
}

void VisitAddSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_add);
}

void VisitSubSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_sub);
}

void VisitMulSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_mul);
}

void VisitDivSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_div);
}

void VisitModSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_mod);
}

void VisitExpSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_exp);
}

void VisitBitwiseOrSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_bit_or);
}

void VisitBitwiseXorSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_bit_xor);
}

void VisitBitwiseAndSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_bit_and);
}

void VisitShiftLeftSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_shl);
}

void VisitShiftRightSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_sar);
}

void VisitShiftRightLogicalSmi(PortableLowering& lowerer) {
  VisitBinaryOpWithImm(lowerer, token_shr);
}

void VisitUnaryOp(PortableLowering& lowerer, Token::Value op) {
  int32_t feedback_slot = lowerer.feedback().AddBinaryOpICSlot().ToInt();
  lowerer.builder().UnaryOperation(op, feedback_slot);
}

void VisitInc(PortableLowering& lowerer) { VisitUnaryOp(lowerer, token_inc); }

void VisitDec(PortableLowering& lowerer) { VisitUnaryOp(lowerer, token_dec); }

void VisitNegate(PortableLowering& lowerer) {
  VisitUnaryOp(lowerer, token_sub);
}

void VisitBitwiseNot(PortableLowering& lowerer) {
  VisitUnaryOp(lowerer, token_bit_not);
}

void VisitToBooleanLogicalNot(PortableLowering& lowerer) {
  lowerer.builder().LogicalNot(not_boolean_mode);
}

void VisitLogicalNot(PortableLowering& lowerer) {
  lowerer.builder().LogicalNot(boolean_mode);
}

void VisitTypeOf(PortableLowering& lowerer) {
#if V8_MAJOR_VERSION == 11
  lowerer.builder().TypeOf();
#else
  int32_t feedback_slot = lowerer.feedback().AddTypeOfSlot().ToInt();
  lowerer.builder().TypeOf(feedback_slot);
#endif
}

void VisitDeletePropertyStrict(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  lowerer.builder().Delete(object, i::LanguageMode::kStrict);
}

void VisitDeletePropertySloppy(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  lowerer.builder().Delete(object, i::LanguageMode::kSloppy);
}

void VisitCallVarArgs(PortableLowering& lowerer, interpreter::Bytecode op) {
  interpreter::Register callee(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register first_reg(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  size_t reg_count = lowerer.bytecode_iterator().GetRegisterCountOperand(2);
  int32_t feedback_slot = lowerer.feedback().AddCallICSlot().ToInt();
  interpreter::RegisterList args(first_reg.index(),
                                 static_cast<int>(reg_count));
  switch (op) {
    case interpreter::Bytecode::kCallAnyReceiver: {
      lowerer.builder().CallAnyReceiver(callee, args, feedback_slot);
      break;
    }
    case interpreter::Bytecode::kCallProperty: {
      lowerer.builder().CallProperty(callee, args, feedback_slot);
      break;
    }
    case interpreter::Bytecode::kCallUndefinedReceiver: {
      lowerer.builder().CallUndefinedReceiver(callee, args, feedback_slot);
      break;
    }
    case interpreter::Bytecode::kCallWithSpread: {
      lowerer.builder().CallWithSpread(callee, args, feedback_slot);
      break;
    }
    case interpreter::Bytecode::kConstructWithSpread: {
      lowerer.builder().ConstructWithSpread(callee, args, feedback_slot);
      break;
    }
    default:
      UNREACHABLE();
  }
}

void VisitCallAnyReceiver(PortableLowering& lowerer) {
  VisitCallVarArgs(lowerer, interpreter::Bytecode::kCallAnyReceiver);
}

void VisitCallProperty(PortableLowering& lowerer) {
  VisitCallVarArgs(lowerer, interpreter::Bytecode::kCallProperty);
}

void VisitCallProperty0(PortableLowering& lowerer) {
  interpreter::Register callee(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register receiver(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  int32_t feedback_slot = lowerer.feedback().AddCallICSlot().ToInt();
  lowerer.builder().CallProperty(callee, {receiver}, feedback_slot);
}

void VisitCallProperty1(PortableLowering& lowerer) {
  interpreter::Register callee(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register receiver(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  interpreter::Register reg0(
      lowerer.bytecode_iterator().GetRegisterOperand(2).index());
  int32_t feedback_slot = lowerer.feedback().AddCallICSlot().ToInt();
  lowerer.builder().CallProperty(callee, {receiver, reg0}, feedback_slot);
}

void VisitCallProperty2(PortableLowering& lowerer) {
  interpreter::Register callee(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register receiver(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  interpreter::Register reg0(
      lowerer.bytecode_iterator().GetRegisterOperand(2).index());
  interpreter::Register reg1(
      lowerer.bytecode_iterator().GetRegisterOperand(3).index());
  int32_t feedback_slot = lowerer.feedback().AddCallICSlot().ToInt();
  lowerer.builder().CallProperty(callee, {receiver, reg0, reg1}, feedback_slot);
}

void VisitCallUndefinedReceiver(PortableLowering& lowerer) {
  VisitCallVarArgs(lowerer, interpreter::Bytecode::kCallUndefinedReceiver);
}

void VisitCallUndefinedReceiver0(PortableLowering& lowerer) {
  interpreter::Register callee(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  int32_t feedback_slot = lowerer.feedback().AddCallICSlot().ToInt();
  std::vector<interpreter::Register> regs(0);
  lowerer.builder().CallUndefinedReceiver(callee, regs, feedback_slot);
}

void VisitCallUndefinedReceiver1(PortableLowering& lowerer) {
  interpreter::Register callee(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register arg0(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  int32_t feedback_slot = lowerer.feedback().AddCallICSlot().ToInt();
  lowerer.builder().CallUndefinedReceiver(callee, {arg0}, feedback_slot);
}

void VisitCallUndefinedReceiver2(PortableLowering& lowerer) {
  interpreter::Register callee(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register arg0(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  interpreter::Register arg1(
      lowerer.bytecode_iterator().GetRegisterOperand(2).index());
  int32_t feedback_slot = lowerer.feedback().AddCallICSlot().ToInt();
  lowerer.builder().CallUndefinedReceiver(callee, {arg0, arg1}, feedback_slot);
}

void VisitCallRuntime(PortableLowering& lowerer) {
  Runtime::FunctionId func_id =
      lowerer.bytecode_iterator().GetRuntimeIdOperand(0);
  std::string func_name = Runtime::NameOfIndex(func_id);
  const i::Runtime::Function* func = i::Runtime::FunctionForName(
      reinterpret_cast<const unsigned char*>(func_name.c_str()),
      static_cast<int>(func_name.size()));
  if (func == nullptr) {
    P_UNREACHABLE();
  }
  interpreter::Register receiver(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  size_t reg_count = lowerer.bytecode_iterator().GetRegisterCountOperand(2);
  interpreter::RegisterList args(receiver.index(), static_cast<int>(reg_count));
#if V8_MAJOR_VERSION == 11
  if (func_id == Runtime::kDynamicImportCall) {
    interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
    // In version 132, DynamicImportCall has 4 arguments while 114 has only 3.
    // Need to move the last argument from index 3 to index 2.
    constexpr size_t import_assertions_index = 2;
    constexpr size_t import_options_index = 3;
    builder.MoveRegister(args[import_options_index],
                         args[import_assertions_index]);
    args = args.Truncate(3);
  }
#endif
  lowerer.builder().CallRuntime(func->function_id, args);
}

void VisitCallJSRuntime(PortableLowering& lowerer) {
  uint32_t callee = lowerer.bytecode_iterator().GetNativeContextIndexOperand(0);
  int index = static_cast<int>(JSRuntime::ToContextField(callee));
  interpreter::Register first_reg(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  size_t reg_count = lowerer.bytecode_iterator().GetRegisterCountOperand(2);
  interpreter::RegisterList args(first_reg.index(),
                                 static_cast<int>(reg_count));

  lowerer.builder().CallJSRuntime(index, args);
}

void VisitConstruct(PortableLowering& lowerer) {
  interpreter::Register constructor(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register first_reg(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  size_t reg_count = lowerer.bytecode_iterator().GetRegisterCountOperand(2);
  int32_t feedback_slot = lowerer.feedback().AddCallICSlot().ToInt();
  interpreter::RegisterList args(first_reg.index(),
                                 static_cast<int>(reg_count));
  lowerer.builder().Construct(constructor, args, feedback_slot);
}

void VisitCompareOp(PortableLowering& lowerer, Token::Value op) {
  interpreter::Register left(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  int32_t feedback_slot = 0;
  if (op == token_in) {
    feedback_slot = lowerer.feedback().AddKeyedHasICSlot().ToInt();
  } else if (op == token_instance_of) {
    feedback_slot = lowerer.feedback().AddInstanceOfSlot().ToInt();
  } else {
    feedback_slot = lowerer.feedback().AddCompareICSlot().ToInt();
  }
  lowerer.builder().CompareOperation(op, left, feedback_slot);
}

void VisitTestEqual(PortableLowering& lowerer) {
  VisitCompareOp(lowerer, token_eq);
}

void VisitTestEqualStrict(PortableLowering& lowerer) {
  VisitCompareOp(lowerer, token_eq_strict);
}

void VisitTestLessThan(PortableLowering& lowerer) {
  VisitCompareOp(lowerer, token_lt);
}

void VisitTestGreaterThan(PortableLowering& lowerer) {
  VisitCompareOp(lowerer, token_gt);
}

void VisitTestLessThanOrEqual(PortableLowering& lowerer) {
  VisitCompareOp(lowerer, token_lte);
}

void VisitTestGreaterThanOrEqual(PortableLowering& lowerer) {
  VisitCompareOp(lowerer, token_gte);
}

void VisitTestInstanceOf(PortableLowering& lowerer) {
  VisitCompareOp(lowerer, token_instance_of);
}

void VisitTestIn(PortableLowering& lowerer) {
  VisitCompareOp(lowerer, token_in);
}

void VisitToNumber(PortableLowering& lowerer) {
  int32_t feedback_slot = lowerer.feedback().AddBinaryOpICSlot().ToInt();
  lowerer.builder().ToNumber(feedback_slot);
}

void VisitToNumeric(PortableLowering& lowerer) {
  int32_t feedback_slot = lowerer.feedback().AddBinaryOpICSlot().ToInt();
  lowerer.builder().ToNumeric(feedback_slot);
}

void VisitToString(PortableLowering& lowerer) { lowerer.builder().ToString(); }

void VisitToBoolean(PortableLowering& lowerer) {
  lowerer.builder().ToBoolean(not_boolean_mode);
}

void VisitCreateRegExpLiteral(PortableLowering& lowerer) {
  size_t pattern_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  int32_t literal_flags = lowerer.bytecode_iterator().GetFlag16Operand(1);
  int32_t feedback_slot = lowerer.feedback().AddLiteralSlot().ToInt();
  lowerer.builder().CreateRegExpLiteral(
      lowerer.GetOrCreateHandleConstantEntry(pattern_entry), feedback_slot,
      literal_flags);
}

void VisitCreateArrayLiteral(PortableLowering& lowerer) {
  size_t array_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  P_DCHECK(lowerer.GetConstantKind(array_entry) == PbcValueKind::kArrayLiteral);
  int32_t literal_flags = lowerer.bytecode_iterator().GetFlag8Operand(1);
  int32_t feedback_slot = lowerer.feedback().AddLiteralSlot().ToInt();
  lowerer.builder().CreateArrayLiteral(
      lowerer.GetOrCreateHandleConstantEntry(array_entry), feedback_slot,
      literal_flags);
}

void VisitCreateArrayFromIterable(PortableLowering& lowerer) {
  lowerer.builder().CreateArrayFromIterable();
}

void VisitCreateEmptyArrayLiteral(PortableLowering& lowerer) {
  int32_t feedback_slot = lowerer.feedback().AddLiteralSlot().ToInt();
  lowerer.builder().CreateEmptyArrayLiteral(feedback_slot);
}

void VisitCreateObjectLiteral(PortableLowering& lowerer) {
  size_t object_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  P_DCHECK(lowerer.GetConstantKind(object_entry) ==
               PbcValueKind::kObjectLiteral ||
           lowerer.GetConstantKind(object_entry) ==
               PbcValueKind::kDefaultObjectLiteral ||
           lowerer.GetUniqueTag(object_entry) ==
               UniqueTag::kEmptyObjectBoilerplateDescription);
  int32_t literal_flags = lowerer.bytecode_iterator().GetFlag8Operand(1);
  int32_t feedback_slot = lowerer.feedback().AddLiteralSlot().ToInt();
  lowerer.builder().CreateObjectLiteral(
      lowerer.GetOrCreateHandleConstantEntry(object_entry), feedback_slot,
      literal_flags);
}

void VisitCreateEmptyObjectLiteral(PortableLowering& lowerer) {
  lowerer.builder().CreateEmptyObjectLiteral();
}

void VisitCreateClosure(PortableLowering& lowerer) {
  size_t shared_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  P_DCHECK(lowerer.GetConstantKind(shared_entry) == PbcValueKind::kFunction);

#if V8_MAJOR_VERSION == 11
  int32_t feedback_slot = lowerer.feedback().AddCreateClosureSlot();
#else
  uint16_t parameter_count =
      lowerer.GetParammeterCountFromConstantPool(shared_entry);
  int32_t feedback_slot =
      lowerer.feedback().AddCreateClosureParameterCount(parameter_count);
#endif

  int32_t flags = lowerer.bytecode_iterator().GetFlag8Operand(1);
  lowerer.builder().CreateClosure(
      lowerer.GetOrCreateHandleConstantEntry(shared_entry), feedback_slot,
      flags);
}

void VisitCreateClosureWithFeedback(PortableLowering& lowerer) {
  size_t shared_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  P_DCHECK(lowerer.GetConstantKind(shared_entry) == PbcValueKind::kFunction);

  int& feedback_slot = GetCachedFeedbackSlot(
      lowerer, lowerer.bytecode_iterator().GetIndexOperand(1));
  if (feedback_slot == -1) {
#if V8_MAJOR_VERSION == 11
    feedback_slot = lowerer.feedback().AddCreateClosureSlot();
#else
    uint16_t parameter_count =
        lowerer.GetParammeterCountFromConstantPool(shared_entry);
    feedback_slot =
        lowerer.feedback().AddCreateClosureParameterCount(parameter_count);
#endif
  }

  int32_t flags = lowerer.bytecode_iterator().GetFlag8Operand(2);
  lowerer.builder().CreateClosure(
      lowerer.GetOrCreateHandleConstantEntry(shared_entry), feedback_slot,
      flags);
}

void VisitCreateBlockContext(PortableLowering& lowerer) {
  size_t scope_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  P_DCHECK(lowerer.GetConstantKind(scope_entry) == PbcValueKind::kScope);
  lowerer.builder().CreateBlockContext(
      lowerer.GetOrCreateHandleConstantEntry(scope_entry));
}

void VisitCreateCatchContext(PortableLowering& lowerer) {
  interpreter::Register exception(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  size_t scope_entry = lowerer.bytecode_iterator().GetIndexOperand(1);
  P_DCHECK(lowerer.GetConstantKind(scope_entry) == PbcValueKind::kScope);
  lowerer.builder().CreateCatchContext(
      exception, lowerer.GetOrCreateHandleConstantEntry(scope_entry));
}

void VisitCreateFunctionContext(PortableLowering& lowerer) {
  size_t scope_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  P_DCHECK(lowerer.GetConstantKind(scope_entry) == PbcValueKind::kScope);
  uint32_t slots = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(1);
  lowerer.builder().CreateFunctionContext(
      lowerer.GetOrCreateHandleConstantEntry(scope_entry), slots);
}

void VisitCreateEvalContext(PortableLowering& lowerer) {
  size_t scope_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  P_DCHECK(lowerer.GetConstantKind(scope_entry) == PbcValueKind::kScope);
  uint32_t slots = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(1);
  lowerer.builder().CreateFunctionContext(
      lowerer.GetOrCreateHandleConstantEntry(scope_entry), slots);
}

void VisitCreateMappedArguments(PortableLowering& lowerer) {
  lowerer.builder().CreateArguments(i::CreateArgumentsType::kMappedArguments);
}

void VisitCreateUnmappedArguments(PortableLowering& lowerer) {
  lowerer.builder().CreateArguments(i::CreateArgumentsType::kUnmappedArguments);
}

void VisitCreateRestParameter(PortableLowering& lowerer) {
  lowerer.builder().CreateArguments(i::CreateArgumentsType::kRestParameter);
}

void VisitJumpLoop(PortableLowering& lowerer) {
  int32_t target_offset = lowerer.bytecode_iterator().GetJumpTargetOffset();
  DCHECK(lowerer.backward_labels().find(target_offset) !=
         lowerer.backward_labels().end());
  int32_t loop_depth = lowerer.bytecode_iterator().GetImmediateOperand(1);
  int32_t feedback_slot = lowerer.feedback().AddJumpLoopSlot().ToInt();
  interpreter::BytecodeLoopHeader* loop_header =
      &lowerer.backward_labels()[target_offset];

  // TODO: fix source position
  lowerer.builder().JumpLoop(loop_header, loop_depth, kNoSourcePosition,
                             feedback_slot);
}

interpreter::BytecodeLabel* VisitForwardJump(PortableLowering& lowerer) {
  int32_t target_offset = lowerer.bytecode_iterator().GetJumpTargetOffset();
  P_DCHECK(lowerer.forward_labels().find(target_offset) !=
           lowerer.forward_labels().end());
  interpreter::BytecodeLabel* target_label =
      lowerer.forward_labels().at(target_offset).get_unbound();
  return target_label;
}

std::pair<interpreter::BytecodeLabel*, interpreter::BytecodeLabel*>
VisitForwardJump2(PortableLowering& lowerer) {
  int32_t target_offset = lowerer.bytecode_iterator().GetJumpTargetOffset();
  P_DCHECK(lowerer.forward_labels().find(target_offset) !=
           lowerer.forward_labels().end());
  interpreter::BytecodeLabel* target_label1 =
      lowerer.forward_labels().at(target_offset).get_unbound();
  interpreter::BytecodeLabel* target_label2 =
      lowerer.forward_labels().at(target_offset).get_unbound();
  return {target_label1, target_label2};
}

void VisitJump(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().Jump(target_label);
}

void VisitJumpConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().Jump(target_label);
}

void VisitJumpIfNullConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfNull(target_label);
}

void VisitJumpIfNotNullConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfNotNull(target_label);
}

void VisitJumpIfUndefinedConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfUndefined(target_label);
}

void VisitJumpIfNotUndefinedConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfNotUndefined(target_label);
}

void VisitJumpIfUndefinedOrNullConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfUndefinedOrNull(target_label);
}

void VisitJumpIfTrueConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfTrue(boolean_mode, target_label);
}

void VisitJumpIfFalseConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfFalse(boolean_mode, target_label);
}

void VisitJumpIfToBooleanTrueConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfTrue(not_boolean_mode, target_label);
}

void VisitJumpIfToBooleanFalseConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfFalse(not_boolean_mode, target_label);
}

void VisitJumpIfToBooleanTrue(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfTrue(not_boolean_mode, target_label);
}

void VisitJumpIfToBooleanFalse(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfFalse(not_boolean_mode, target_label);
}

void VisitJumpIfTrue(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfTrue(boolean_mode, target_label);
}

void VisitJumpIfFalse(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfFalse(boolean_mode, target_label);
}

void VisitJumpIfNull(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfNull(target_label);
}

void VisitJumpIfNotNull(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfNotNull(target_label);
}

void VisitJumpIfUndefined(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfUndefined(target_label);
}

void VisitJumpIfNotUndefined(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfNotUndefined(target_label);
}

void VisitJumpIfUndefinedOrNull(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfUndefinedOrNull(target_label);
}

void VisitSetPendingMessage(PortableLowering& lowerer) {
  lowerer.builder().SetPendingMessage();
}

void VisitThrow(PortableLowering& lowerer) { lowerer.builder().Throw(); }

void VisitAsyncReturn(PortableLowering& lowerer) {
  LowerRegisterAllocationScope register_scope(&lowerer);
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  auto func_kind = lowerer.shared_func_info()->kind();

  if (IsAsyncGeneratorFunction(func_kind)) {
    interpreter::RegisterList args =
        builder.register_allocator()->NewRegisterList(3);
    builder
        .MoveRegister(lowerer.generator_object(), args[0])  // generator
        .StoreAccumulatorInRegister(args[1])                // value
        .LoadTrue()
        .StoreAccumulatorInRegister(args[2])  // done
        .CallRuntime(i::Runtime::kInlineAsyncGeneratorResolve, args);
  } else {
    DCHECK(IsAsyncFunction(func_kind) ||
#if V8_MAJOR_VERSION == 11
           IsAsyncModule(func_kind)
#else
           IsModuleWithTopLevelAwait(func_kind)
#endif
    );
    interpreter::RegisterList args =
        builder.register_allocator()->NewRegisterList(2);
    builder
        .MoveRegister(lowerer.generator_object(), args[0])  // generator
        .StoreAccumulatorInRegister(args[1])                // value
        .CallRuntime(i::Runtime::kInlineAsyncFunctionResolve, args);
  }
  builder.Return();
}

void VisitReturn(PortableLowering& lowerer) { lowerer.builder().Return(); }

void VisitDebugger(PortableLowering& lowerer) { lowerer.builder().Debugger(); }

void VisitWide(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitExtraWide(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitNop0(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitNop1(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitNop2(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitNop3(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitNop4(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitStaScriptContextSlot(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitStaCurrentScriptContextSlot(PortableLowering& lowerer) {
  P_UNREACHABLE();
}
void VisitGetNamedPropertyFromSuper(PortableLowering& lowerer) {
  P_UNREACHABLE();
}
void VisitGetEnumeratedKeyedProperty(PortableLowering& lowerer) {
  P_UNREACHABLE();
}
void VisitLdaModuleVariable(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitStaModuleVariable(PortableLowering& lowerer) { P_UNREACHABLE(); }

void VisitStaInArrayLiteral(PortableLowering& lowerer) { P_UNREACHABLE(); }

void VisitDefineKeyedOwnPropertyInLiteral(PortableLowering& lowerer) {
  P_UNREACHABLE();
}
void VisitGetSuperConstructor(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  interpreter::Register out(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  builder.GetSuperConstructor(out);
}
void VisitFindNonDefaultConstructorOrConstruct(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  interpreter::Register this_function(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register new_target(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  interpreter::Register first_reg(
      lowerer.bytecode_iterator().GetRegisterOperand(2).index());
  interpreter::RegisterList output(first_reg.index(), 2);
  builder.FindNonDefaultConstructorOrConstruct(this_function, new_target,
                                               output);
}

void VisitCallWithSpread(PortableLowering& lowerer) {
  VisitCallVarArgs(lowerer, interpreter::Bytecode::kCallWithSpread);
}

void VisitCallRuntimeForPair(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitInvokeIntrinsic(PortableLowering& lowerer) { P_UNREACHABLE(); }

void VisitConstructWithSpread(PortableLowering& lowerer) {
  VisitCallVarArgs(lowerer, interpreter::Bytecode::kConstructWithSpread);
}

void VisitConstructForwardAllArgs(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  interpreter::Register constructor(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  int32_t feedback_slot = lowerer.feedback().AddCallICSlot().ToInt();
#if V8_MAJOR_VERSION == 11
  builder.ConstructWithSpread(constructor, lowerer.construct_forward_args(),
                              feedback_slot);
#else
  builder.ConstructForwardAllArgs(constructor, feedback_slot);
#endif
}

void VisitToName(PortableLowering& lowerer) {
#if V8_MAJOR_VERSION == 11
  LowerRegisterAllocationScope register_scope(&lowerer);
  interpreter::Register out =
      lowerer.builder().register_allocator()->NewRegister();
  lowerer.builder().ToName(out).LoadAccumulatorWithRegister(out);
#else
  lowerer.builder().ToName();
#endif
}

void VisitToObject(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitCloneObject(PortableLowering& lowerer) { P_UNREACHABLE(); }

void VisitGetTemplateObject(PortableLowering& lowerer) {
  size_t object_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  P_DCHECK(lowerer.GetConstantKind(object_entry) ==
           PbcValueKind::kTemplateLiteral);
  size_t entry = lowerer.GetOrCreateHandleConstantEntry(object_entry);
  int32_t feedback_slot = lowerer.feedback().AddLiteralSlot().ToInt();
  lowerer.builder().GetTemplateObject(entry, feedback_slot);
}

void VisitCreateWithContext(PortableLowering& lowerer) { P_UNREACHABLE(); }
void VisitJumpIfJSReceiverConstant(PortableLowering& lowerer) {
  P_UNREACHABLE();
}
void VisitNop19(PortableLowering& lowerer) { P_UNREACHABLE(); }

void VisitJumpIfJSReceiver(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  lowerer.builder().JumpIfJSReceiver(target_label);
}

void VisitNop13(PortableLowering& lowerer) { P_UNREACHABLE(); }

void VisitSwitchOnSmiNoFeedback(PortableLowering& lowerer) {
  uint32_t current_offset = lowerer.bytecode_iterator().GetAbsoluteOffset(0);
  std::unordered_map<uint32_t, interpreter::BytecodeJumpTable*>& jump_tables_ =
      lowerer.get_jump_tables();
  interpreter::BytecodeJumpTable* jump_table = jump_tables_[current_offset];
  lowerer.builder().SwitchOnSmiNoFeedback(jump_table);
}

// Suspends the generator to resume at the next suspend_id, with output stored
// in the accumulator. When the generator is resumed, the sent value is loaded
// in the accumulator.
void BuildSuspendPoint(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  // Because we eliminate jump targets in dead code, we also eliminate resumes
  // when the suspend is not emitted because otherwise the below call to Bind
  // would start a new basic block and the code would be considered alive.
  if (builder.RemainderOfBlockIsDead()) {
    return;
  }

  const int suspend_id = lowerer.suspend_count();
  lowerer.inc_suspend_count();

  interpreter::RegisterList registers =
      builder.register_allocator()->AllLiveRegisters();

  // Save context, registers, and state. This bytecode then returns the value
  // in the accumulator.
  builder.SuspendGenerator(lowerer.generator_object(), registers, suspend_id);

  // Upon resume, we continue here.
  builder.Bind(lowerer.generator_jump_table(), suspend_id);

  // Clobbers all registers and sets the accumulator to the
  // [[input_or_debug_pos]] slot of the generator object.
  builder.ResumeGenerator(lowerer.generator_object(), registers);
}

class IteratorRecord final {
 public:
  IteratorRecord(interpreter::Register object_register,
                 interpreter::Register next_register,
                 IteratorType type = IteratorType::kNormal)
      : type_(type), object_(object_register), next_(next_register) {
    DCHECK(object_.is_valid() && next_.is_valid());
  }

  inline IteratorType type() const { return type_; }
  inline interpreter::Register object() const { return object_; }
  inline interpreter::Register next() const { return next_; }

 private:
  IteratorType type_;
  interpreter::Register object_;
  interpreter::Register next_;
};

// LoopScope delimits the scope of {loop}, from its header to its final jump.
// It should be constructed iff a (conceptual) back edge should be produced. In
// the case of creating a LoopBuilder but never emitting the loop, it is valid
// to skip the creation of LoopScope.
class LoopScope final {
 public:
  explicit LoopScope(interpreter::LoopBuilder* loop) : loop_builder_(loop) {
    loop_builder_->LoopHeader();
  }

  ~LoopScope() { loop_builder_->JumpToHeader(0, nullptr); }

 private:
  interpreter::LoopBuilder* const loop_builder_;
};

void BuildAwait(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  {
    // Await(operand) and suspend.
    LowerRegisterAllocationScope register_scope(&lowerer);

    i::Runtime::FunctionId await_intrinsic_id;
#if V8_MAJOR_VERSION == 11
    if (IsAsyncGeneratorFunction(lowerer.shared_func_info()->kind())) {
      await_intrinsic_id =
          i::Runtime::kInlineAsyncGeneratorAwaitUncaught;  // todo: caught?
    } else {
      await_intrinsic_id = i::Runtime::kInlineAsyncFunctionAwaitUncaught;
    }
#else
    if (IsAsyncGeneratorFunction(lowerer.shared_func_info()->kind())) {
      await_intrinsic_id = i::Runtime::kInlineAsyncGeneratorAwait;
    } else {
      await_intrinsic_id = i::Runtime::kInlineAsyncFunctionAwait;
    }
#endif
    interpreter::RegisterList args =
        builder.register_allocator()->NewRegisterList(2);
    builder.MoveRegister(lowerer.generator_object(), args[0])
        .StoreAccumulatorInRegister(args[1])
        .CallRuntime(await_intrinsic_id, args);
  }

  BuildSuspendPoint(lowerer);

  interpreter::Register input = builder.register_allocator()->NewRegister();
  interpreter::Register resume_mode =
      builder.register_allocator()->NewRegister();

  // Now dispatch on resume mode.
  interpreter::BytecodeLabel resume_next;
  builder.StoreAccumulatorInRegister(input)
      .CallRuntime(i::Runtime::kInlineGeneratorGetResumeMode,
                   lowerer.generator_object())
      .StoreAccumulatorInRegister(resume_mode)
      .LoadLiteral(i::Smi::FromInt(JSGeneratorObject::kNext))
      .CompareReference(resume_mode)
      .JumpIfTrue(boolean_mode, &resume_next);

  // Resume with "throw" completion (rethrow the received value).
  // TODO(): Add a debug-only check that the accumulator is
  // JSGeneratorObject::kThrow.
  builder.LoadAccumulatorWithRegister(input).ReThrow();

  // Resume with next.
  builder.Bind(&resume_next);
  builder.LoadAccumulatorWithRegister(input);
}

void BuildGetIterator(PortableLowering& lowerer, IteratorType hint) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  if (hint == IteratorType::kAsync) {
    LowerRegisterAllocationScope register_scope(&lowerer);

    interpreter::Register obj = builder.register_allocator()->NewRegister();
    interpreter::Register method = builder.register_allocator()->NewRegister();

    // Set method to GetMethod(obj, @@asyncIterator)
    builder.StoreAccumulatorInRegister(obj).LoadAsyncIteratorProperty(
        obj, lowerer.feedback().AddLoadICSlot().ToInt());

    interpreter::BytecodeLabel async_iterator_undefined, done;
    builder.JumpIfUndefinedOrNull(&async_iterator_undefined);

    // Let iterator be Call(method, obj)
    builder.StoreAccumulatorInRegister(method).CallProperty(
        method, interpreter::RegisterList(obj),
        lowerer.feedback().AddCallICSlot().ToInt());

    // If Type(iterator) is not Object, throw a TypeError exception.
    builder.JumpIfJSReceiver(&done);
    builder.CallRuntime(i::Runtime::kThrowSymbolAsyncIteratorInvalid);

    builder.Bind(&async_iterator_undefined);
    // If method is undefined, Let syncMethod be GetMethod(obj, @@iterator)
    builder
        .LoadIteratorProperty(obj, lowerer.feedback().AddLoadICSlot().ToInt())
        .StoreAccumulatorInRegister(method);

    // Let syncIterator be Call(syncMethod, obj)
    builder.CallProperty(method, interpreter::RegisterList(obj),
                         lowerer.feedback().AddCallICSlot().ToInt());

    // Return CreateAsyncFromSyncIterator(syncIterator)
    // alias `method` register as it's no longer used
    interpreter::Register sync_iter = method;
    builder.StoreAccumulatorInRegister(sync_iter).CallRuntime(
        i::Runtime::kInlineCreateAsyncFromSyncIterator, sync_iter);

    builder.Bind(&done);
  } else {
    LowerRegisterAllocationScope register_scope(&lowerer);
    interpreter::Register obj = builder.register_allocator()->NewRegister();
    int load_feedback_index = lowerer.feedback().AddLoadICSlot().ToInt();
    int call_feedback_index = lowerer.feedback().AddCallICSlot().ToInt();

    // Let method be GetMethod(obj, @@iterator) and
    // iterator be Call(method, obj). If iterator is
    // not JSReceiver, then throw TypeError.
    builder.StoreAccumulatorInRegister(obj).GetIterator(
        obj, load_feedback_index, call_feedback_index);
  }
}

// Returns an IteratorRecord which is valid for the lifetime of the current
// register_allocation_scope.
IteratorRecord BuildGetIteratorRecord(PortableLowering& lowerer,
                                      interpreter::Register next,
                                      interpreter::Register object,
                                      IteratorType hint) {
  DCHECK(next.is_valid() && object.is_valid());
  BuildGetIterator(lowerer, hint);

  lowerer.builder()
      .StoreAccumulatorInRegister(object)
      .LoadNamedProperty(
          object, lowerer.isolate()->ast_string_constants()->next_string(),
          lowerer.feedback().AddLoadICSlot().ToInt())
      .StoreAccumulatorInRegister(next);
  return IteratorRecord(object, next, hint);
}

void BuildCallIteratorMethod(PortableLowering& lowerer,
                             interpreter::Register iterator,
                             const AstRawString* method_name,
                             interpreter::RegisterList receiver_and_args,
                             interpreter::BytecodeLabel* if_called,
                             interpreter::BytecodeLabels* if_notcalled) {
  LowerRegisterAllocationScope register_scope(&lowerer);

  interpreter::Register method =
      lowerer.builder().register_allocator()->NewRegister();
  i::FeedbackSlot slot = lowerer.feedback().AddLoadICSlot();
  lowerer.builder()
      .LoadNamedProperty(iterator, method_name, slot.ToInt())
      .JumpIfUndefinedOrNull(if_notcalled->New())
      .StoreAccumulatorInRegister(method)
      .CallProperty(method, receiver_and_args,
                    lowerer.feedback().AddCallICSlot().ToInt());

  if (if_called != nullptr) {
    lowerer.builder().Jump(if_called);
  }
}

void BuildIteratorClose(PortableLowering& lowerer,
                        const IteratorRecord& iterator) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  LowerRegisterAllocationScope register_scope(&lowerer);
  interpreter::BytecodeLabels done(&lowerer.zone());
  interpreter::BytecodeLabel if_called;
  interpreter::RegisterList args = interpreter::RegisterList(iterator.object());
  BuildCallIteratorMethod(
      lowerer, iterator.object(),
      lowerer.isolate()->ast_string_constants()->return_string(), args,
      &if_called, &done);
  builder.Bind(&if_called);

  if (iterator.type() == IteratorType::kAsync) {
    BuildAwait(lowerer);
  }

  builder.JumpIfJSReceiver(done.New());
  {
    LowerRegisterAllocationScope inner_register_scope(&lowerer);
    interpreter::Register return_result =
        builder.register_allocator()->NewRegister();
    builder.StoreAccumulatorInRegister(return_result)
        .CallRuntime(i::Runtime::kThrowIteratorResultNotAnObject,
                     return_result);
  }

  done.Bind(&builder);
}

void YieldStarGenerate(
    PortableLowering& lowerer,
    std::pair<interpreter::BytecodeLabel*, interpreter::BytecodeLabel*>
        finally_label,
    interpreter::Register finally_value) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  const AstStringConstants* ast_string_constants =
      lowerer.isolate()->ast_string_constants();
  P_DCHECK_NOT_NULL(ast_string_constants);
  interpreter::Register output = builder.register_allocator()->NewRegister();
  interpreter::Register resume_mode =
      builder.register_allocator()->NewRegister();

  bool is_async = IsAsyncGeneratorFunction(lowerer.shared_func_info()->kind());
  IteratorType iterator_type =
      is_async ? IteratorType::kAsync : IteratorType::kNormal;

  {
    LowerRegisterAllocationScope register_scope(&lowerer);
    interpreter::RegisterList iterator_and_input =
        builder.register_allocator()->NewRegisterList(2);
    IteratorRecord iterator = BuildGetIteratorRecord(
        lowerer, builder.register_allocator()->NewRegister() /* next method */,
        iterator_and_input[0], iterator_type);

    interpreter::Register input = iterator_and_input[1];
    builder.LoadUndefined().StoreAccumulatorInRegister(input);
    builder.LoadLiteral(i::Smi::FromInt(JSGeneratorObject::kNext))
        .StoreAccumulatorInRegister(resume_mode);
    {
      // This loop builder does not construct counters as the loop is not
      // visible to the user, and we therefore neither pass the block coverage
      // builder nor the expression.
      //
      // In addition to the normal suspend for yield*, a yield* in an async
      // generator has 2 additional suspends:
      //   - One for awaiting the iterator result of closing the generator when
      //     resumed with a "throw" completion, and a throw method is not
      //     present on the delegated iterator
      //   - One for awaiting the iterator result yielded by the delegated
      //     iterator

      interpreter::LoopBuilder loop_builder(&builder, nullptr, nullptr,
                                            &lowerer.feedback());
      LoopScope loop_scope(&loop_builder);

      {
        interpreter::BytecodeLabels after_switch(&lowerer.zone());
        interpreter::BytecodeJumpTable* switch_jump_table =
            builder.AllocateJumpTable(2, 1);

        builder.LoadAccumulatorWithRegister(resume_mode)
            .SwitchOnSmiNoFeedback(switch_jump_table);

        // Fallthrough to default case.
        // TODO(ignition): Add debug code to check that {resume_mode} really is
        // {JSGeneratorObject::kNext} in this case.
        static_assert(JSGeneratorObject::kNext == 0);
        {
          i::FeedbackSlot slot = lowerer.feedback().AddCallICSlot();
          builder.CallProperty(iterator.next(), iterator_and_input,
                               slot.ToInt());
          builder.Jump(after_switch.New());
        }

        static_assert(JSGeneratorObject::kReturn == 1);
        builder.Bind(switch_jump_table, JSGeneratorObject::kReturn);
        {
          const AstRawString* return_string =
              ast_string_constants->return_string();
          interpreter::BytecodeLabels no_return_method(&lowerer.zone());

          BuildCallIteratorMethod(lowerer, iterator.object(), return_string,
                                  iterator_and_input, after_switch.New(),
                                  &no_return_method);
          no_return_method.Bind(&builder);
          if (finally_label.first) {
            P_DCHECK(finally_value.is_valid());
            builder.MoveRegister(input, finally_value);
            builder.Jump(finally_label.first);
          } else {
            builder.LoadAccumulatorWithRegister(input);
            if (iterator_type == IteratorType::kAsync) {
              // Await input.
              BuildAwait(lowerer);
              VisitAsyncReturn(lowerer);
            } else {
              builder.Return();
            }
          }
        }

        static_assert(JSGeneratorObject::kThrow == 2);
        builder.Bind(switch_jump_table, JSGeneratorObject::kThrow);
        {
          const AstRawString* throw_string =
              ast_string_constants->throw_string();
          interpreter::BytecodeLabels no_throw_method(&lowerer.zone());
          BuildCallIteratorMethod(lowerer, iterator.object(), throw_string,
                                  iterator_and_input, after_switch.New(),
                                  &no_throw_method);

          // If there is no "throw" method, perform IteratorClose, and finally
          // throw a TypeError.
          no_throw_method.Bind(&builder);
          BuildIteratorClose(lowerer, iterator);
          builder.CallRuntime(i::Runtime::kThrowThrowMethodMissing);
        }

        after_switch.Bind(&builder);
      }

      if (iterator_type == IteratorType::kAsync) {
        // Await the result of the method invocation.
        BuildAwait(lowerer);
      }

      // Check that output is an object.
      interpreter::BytecodeLabel check_if_done;
      builder.StoreAccumulatorInRegister(output)
          .JumpIfJSReceiver(&check_if_done)
          .CallRuntime(i::Runtime::kThrowIteratorResultNotAnObject, output);

      builder.Bind(&check_if_done);
      // Break once output.done is true.
      builder.LoadNamedProperty(output, ast_string_constants->done_string(),
                                lowerer.feedback().AddLoadICSlot().ToInt());

      loop_builder.BreakIfTrue(not_boolean_mode);

      // Suspend the current generator.
      if (iterator_type == IteratorType::kNormal) {
        builder.LoadAccumulatorWithRegister(output);
      } else {
        LowerRegisterAllocationScope inner_register_scope(&lowerer);
        DCHECK_EQ(iterator_type, IteratorType::kAsync);
        // If generatorKind is async, perform
        // AsyncGeneratorResolve(output.value, /* done = */ false), which will
        // resolve the current AsyncGeneratorRequest's promise with
        // output.value.
        builder.LoadNamedProperty(output, ast_string_constants->value_string(),
                                  lowerer.feedback().AddLoadICSlot().ToInt());

        interpreter::RegisterList args =
            builder.register_allocator()->NewRegisterList(3);
        builder
            .MoveRegister(lowerer.generator_object(), args[0])  // generator
            .StoreAccumulatorInRegister(args[1])                // value
            .LoadFalse()
            .StoreAccumulatorInRegister(args[2])  // done
            .CallRuntime(i::Runtime::kInlineAsyncGeneratorResolve, args);
      }

      BuildSuspendPoint(lowerer);
      builder.StoreAccumulatorInRegister(input);
      builder
          .CallRuntime(i::Runtime::kInlineGeneratorGetResumeMode,
                       lowerer.generator_object())
          .StoreAccumulatorInRegister(resume_mode);

      loop_builder.BindContinueTarget();
    }
  }

  // Decide if we trigger a return or if the yield* expression should just
  // produce a value.
  interpreter::BytecodeLabel completion_is_output_value;
  interpreter::Register output_value =
      builder.register_allocator()->NewRegister();
  builder
      .LoadNamedProperty(output, ast_string_constants->value_string(),
                         lowerer.feedback().AddLoadICSlot().ToInt())
      .StoreAccumulatorInRegister(output_value)
      .LoadLiteral(i::Smi::FromInt(JSGeneratorObject::kReturn))
      .CompareReference(resume_mode)
      .JumpIfFalse(boolean_mode, &completion_is_output_value);
  if (finally_label.second) {
    P_DCHECK(finally_value.is_valid());
    builder.MoveRegister(output_value, finally_value);
    builder.Jump(finally_label.second);
  } else {
    builder.LoadAccumulatorWithRegister(output_value);
    if (iterator_type == IteratorType::kAsync) {
      VisitAsyncReturn(lowerer);
    } else {
      builder.Return();
    }
  }

  builder.Bind(&completion_is_output_value);
  builder.LoadAccumulatorWithRegister(output_value);
}

void YieldGenerate(PortableLowering& lowerer,
                   interpreter::BytecodeLabel* finally_label,
                   interpreter::Register finally_value) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();

  bool is_async = IsAsyncGeneratorFunction(lowerer.shared_func_info()->kind());
  // If this is not the first yield
  if (lowerer.suspend_count() > 0) {
    if (is_async) {
      // AsyncGenerator yields (with the exception of the initial yield)
      // delegate work to the AsyncGeneratorYieldWithAwait stub, which Awaits
      // the operand and on success, wraps the value in an IteratorResult.
      //
      // In the spec the Await is a separate operation, but they are combined
      // here to reduce bytecode size.
      LowerRegisterAllocationScope register_scope(&lowerer);
      interpreter::RegisterList args =
          builder.register_allocator()->NewRegisterList(2);
      builder
          .MoveRegister(lowerer.generator_object(), args[0])  // generator
          .StoreAccumulatorInRegister(args[1])                // value
          .CallRuntime(i::Runtime::kInlineAsyncGeneratorYieldWithAwait, args);
    } else {
      // Generator yields (with the exception of the initial yield) wrap the
      // value into IteratorResult.
      LowerRegisterAllocationScope register_scope(&lowerer);
      interpreter::RegisterList args =
          builder.register_allocator()->NewRegisterList(2);
      builder
          .StoreAccumulatorInRegister(args[0])  // value
          .LoadFalse()
          .StoreAccumulatorInRegister(args[1])  // done
          .CallRuntime(i::Runtime::kInlineCreateIterResultObject, args);
    }
  }

  BuildSuspendPoint(lowerer);

  interpreter::Register input = builder.register_allocator()->NewRegister();
  builder.StoreAccumulatorInRegister(input).CallRuntime(
      i::Runtime::kInlineGeneratorGetResumeMode, lowerer.generator_object());

  // Now dispatch on resume mode.
  static_assert(JSGeneratorObject::kNext + 1 == JSGeneratorObject::kReturn);
  static_assert(JSGeneratorObject::kReturn + 1 == JSGeneratorObject::kThrow);
  interpreter::BytecodeJumpTable* jump_table =
      builder.AllocateJumpTable(is_async ? 3 : 2, JSGeneratorObject::kNext);

  builder.SwitchOnSmiNoFeedback(jump_table);

  if (is_async) {
    // Resume with rethrow (switch fallthrough).
    // This case is only necessary in async generators.
    builder.LoadAccumulatorWithRegister(input);
    builder.ReThrow();

    // Add label for kThrow (next case).
    builder.Bind(jump_table, JSGeneratorObject::kThrow);
  }

  {
    // Resume with throw (switch fallthrough in sync case).
    builder.LoadAccumulatorWithRegister(input);
    builder.Throw();
  }

  {
    // Resume with return.
    builder.Bind(jump_table, JSGeneratorObject::kReturn);
    if (finally_label) {
      P_DCHECK(finally_value.is_valid());
      builder.MoveRegister(input, finally_value);
      builder.Jump(finally_label);
    } else {
      builder.LoadAccumulatorWithRegister(input);
      if (is_async) {
        VisitAsyncReturn(lowerer);
      } else {
        builder.Return();
      }
    }
  }

  {
    // Resume with next.
    builder.Bind(jump_table, JSGeneratorObject::kNext);
    builder.LoadAccumulatorWithRegister(input);
  }
}

void VisitYield(PortableLowering& lowerer) {
  int32_t flags = lowerer.bytecode_iterator().GetFlag8Operand(0);
  YieldIterationFlag flag = static_cast<YieldIterationFlag>(flags);
  if (flag == YieldIterationFlag::kNoIteration) {
    YieldGenerate(lowerer, nullptr, interpreter::Register());
  } else {
    P_DCHECK(flag == YieldIterationFlag::kIteration);
    YieldStarGenerate(lowerer, {nullptr, nullptr}, interpreter::Register());
  }
}

void VisitYieldFinally(PortableLowering& lowerer) {
  interpreter::Register finally_value(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  int32_t flags = lowerer.bytecode_iterator().GetFlag8Operand(2);
  YieldIterationFlag flag = static_cast<YieldIterationFlag>(flags);
  if (flag == YieldIterationFlag::kNoIteration) {
    interpreter::BytecodeLabel* finally_label = VisitForwardJump(lowerer);
    YieldGenerate(lowerer, finally_label, finally_value);
  } else {
    P_DCHECK(flag == YieldIterationFlag::kIteration);
    auto finally_label = VisitForwardJump2(lowerer);
    YieldStarGenerate(lowerer, finally_label, finally_value);
  }
}

void VisitYieldFinallyConstant(PortableLowering& lowerer) {
  interpreter::BytecodeLabel* finally_label = VisitForwardJump(lowerer);
  interpreter::Register finally_value(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  int32_t flags = lowerer.bytecode_iterator().GetFlag8Operand(2);
  YieldIterationFlag flag = static_cast<YieldIterationFlag>(flags);
  if (flag == YieldIterationFlag::kNoIteration) {
    YieldGenerate(lowerer, finally_label, finally_value);
  } else {
    P_DCHECK(flag == YieldIterationFlag::kIteration);
    P_UNREACHABLE();
  }
}

void VisitAwait(PortableLowering& lowerer) { BuildAwait(lowerer); }

void VisitCreateGeneratorObject(PortableLowering& lowerer) {
  LowerRegisterAllocationScope register_scope(&lowerer);
  interpreter::RegisterList args =
      lowerer.builder().register_allocator()->NewRegisterList(2);
  auto func_kind = lowerer.shared_func_info()->kind();
  i::Runtime::FunctionId function_id =
      ((IsAsyncFunction(func_kind) && !IsAsyncGeneratorFunction(func_kind)) ||
#if V8_MAJOR_VERSION == 11
       IsAsyncModule(func_kind)
#else
       IsModuleWithTopLevelAwait(func_kind)
#endif
           )
          ? i::Runtime::kInlineAsyncFunctionEnter
          : i::Runtime::kInlineCreateJSGeneratorObject;
  lowerer.builder()
      .MoveRegister(interpreter::Register::function_closure(), args[0])
      .MoveRegister(lowerer.builder().Receiver(), args[1])
      .CallRuntime(function_id, args)
      .StoreAccumulatorInRegister(lowerer.generator_object());
}

void VisitForInInit(portable::PortableLowering& lowerer) {
  interpreter::Register cursor(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  lowerer.builder().ToObject(object);
  lowerer.builder().ForInEnumerate(object);
  interpreter::RegisterList triple =
      lowerer.builder().register_allocator()->NewRegisterList(3);
  int32_t slot_id = lowerer.feedback().AddForInSlot().ToInt();
  lowerer.builder().ForInPrepare(triple, slot_id);
  lowerer.for_in_info_stack().emplace_back(slot_id, triple);
  lowerer.builder()
      .LoadLiteral(i::Smi::zero())
      .StoreAccumulatorInRegister(cursor);
}

void VisitJumpIfForInFinish(portable::PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  interpreter::Register cursor(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  auto& info = lowerer.for_in_info_stack().back();
  interpreter::Register end(info.triple.last_register());
#if V8_MAJOR_VERSION == 11
  lowerer.builder().ForInContinue(cursor, end);
  lowerer.builder().JumpIfFalse(boolean_mode, target_label);
#else
  lowerer.builder().JumpIfForInDone(target_label, cursor, end);
#endif
}

void VisitJumpIfForInFinishConstant(portable::PortableLowering& lowerer) {
  interpreter::BytecodeLabel* target_label = VisitForwardJump(lowerer);
  interpreter::Register cursor(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  auto& info = lowerer.for_in_info_stack().back();
  interpreter::Register end(info.triple.last_register());
#if V8_MAJOR_VERSION == 11
  lowerer.builder().ForInContinue(cursor, end);
  lowerer.builder().JumpIfFalse(boolean_mode, target_label);
#else
  lowerer.builder().JumpIfForInDone(target_label, cursor, end);
#endif
}

void VisitForInCurrent(PortableLowering& lowerer) {
  interpreter::Register object(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register cursor(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  P_CHECK(!lowerer.for_in_info_stack().empty());
  auto& info = lowerer.for_in_info_stack().back();
  lowerer.builder().ForInNext(object, cursor, info.triple.Truncate(2),
                              info.for_in_slot_id);
  lowerer.for_in_info_stack().pop_back();
}

void VisitForInAdvance(PortableLowering& lowerer) {
  interpreter::Register cursor(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  lowerer.builder().ForInStep(cursor);
#if V8_MAJOR_VERSION == 11
  lowerer.builder().StoreAccumulatorInRegister(cursor);
#endif
}

void VisitReThrow(PortableLowering&) { P_UNREACHABLE(); }

void VisitThrowReferenceErrorIfHole(PortableLowering& lowerer) {
  size_t name_entry = lowerer.bytecode_iterator().GetIndexOperand(0);
  lowerer.builder().ThrowReferenceErrorIfHole(
      lowerer.GetOrCreateHandleConstantEntry(name_entry));
}

void VisitThrowSuperNotCalledIfHole(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  builder.ThrowSuperNotCalledIfHole();
}
void VisitThrowSuperAlreadyCalledIfNotHole(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  builder.ThrowSuperAlreadyCalledIfNotHole();
}
void VisitThrowIfNotSuperConstructor(PortableLowering&) { P_UNREACHABLE(); }

void VisitAsyncIteratorInit(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  interpreter::Register iterator(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register source_or_next(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());

  builder.StoreAccumulatorInRegister(source_or_next);

  // Set method to GetMethod(obj, @@asyncIterator)
  builder.LoadAsyncIteratorProperty(source_or_next,
                                    lowerer.feedback().AddLoadICSlot().ToInt());

  interpreter::BytecodeLabel async_iterator_undefined, done;
  builder.JumpIfUndefinedOrNull(&async_iterator_undefined);

  interpreter::Register method = iterator;
  builder.StoreAccumulatorInRegister(method).CallProperty(
      method, interpreter::RegisterList(source_or_next),
      lowerer.feedback().AddCallICSlot().ToInt());

  // If Type(iterator) is not Object, throw a TypeError exception.
  builder.JumpIfJSReceiver(&done);
  builder.CallRuntime(i::Runtime::kThrowSymbolAsyncIteratorInvalid);

  builder.Bind(&async_iterator_undefined);
  // If method is undefined,
  //     Let syncMethod be GetMethod(obj, @@iterator)
  builder
      .LoadIteratorProperty(source_or_next,
                            lowerer.feedback().AddLoadICSlot().ToInt())
      .StoreAccumulatorInRegister(method);

  //     Let syncIterator be Call(syncMethod, obj)
  builder.CallProperty(method, interpreter::RegisterList(source_or_next),
                       lowerer.feedback().AddCallICSlot().ToInt());

  // Return CreateAsyncFromSyncIterator(syncIterator)
  // alias `method` register as it's no longer used
  builder.StoreAccumulatorInRegister(iterator).CallRuntime(
      i::Runtime::kInlineCreateAsyncFromSyncIterator, iterator);

  builder.Bind(&done);

  // iterator.next
  builder.StoreAccumulatorInRegister(iterator);
  builder.LoadNamedProperty(
      iterator, lowerer.isolate()->ast_string_constants()->next_string(),
      lowerer.feedback().AddLoadICSlot().ToInt());
  if (iterator != source_or_next) {
    builder.StoreAccumulatorInRegister(source_or_next);
  }
}

void VisitIteratorInit(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  interpreter::Register iterator(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register source_or_next(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  int load_slot = lowerer.feedback().AddLoadICSlot().ToInt();
  int call_slot = lowerer.feedback().AddCallICSlot().ToInt();
  builder.StoreAccumulatorInRegister(source_or_next)
      .GetIterator(source_or_next, load_slot, call_slot)
      .StoreAccumulatorInRegister(iterator);
  // iterator.next
  builder.LoadNamedProperty(
      iterator, lowerer.isolate()->ast_string_constants()->next_string(),
      lowerer.feedback().AddLoadICSlot().ToInt());
  if (iterator != source_or_next) {
    builder.StoreAccumulatorInRegister(source_or_next);
  }
}

// store iterator.next() to acc and next_result
// next_result = iterator.next()
// if(!IsObject(next_result))throw error;
void IteratorCallNextMethod(PortableLowering& lowerer,
                            interpreter::Register iterator,
                            interpreter::Register next_method,
                            interpreter::Register next_result, bool is_async) {
  interpreter::BytecodeLabel is_object;
  lowerer.builder().CallProperty(next_method,
                                 interpreter::RegisterList(iterator),
                                 lowerer.feedback().AddCallICSlot().ToInt());
  if (is_async) {
    BuildAwait(lowerer);
  }
  lowerer.builder()
      .StoreAccumulatorInRegister(next_result)
      .JumpIfJSReceiver(&is_object)
      .CallRuntime(i::Runtime::kThrowIteratorResultNotAnObject, next_result)
      .Bind(&is_object);
}

// done = true;
// next_result = iterator.next();
// if (!next_result.done) {
//    value = next_result.value;
//    done = false;
// }
template <bool is_async>
void VisitIteratorAdvanceImpl(PortableLowering& lowerer) {
  interpreter::Register done(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register iterator(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  interpreter::Register next_method(
      lowerer.bytecode_iterator().GetRegisterOperand(2).index());
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  int load_value_slot = lowerer.feedback().AddLoadICSlot().ToInt();
  int load_done_slot = lowerer.feedback().AddLoadICSlot().ToInt();
  LowerRegisterAllocationScope register_scope(&lowerer);
  interpreter::Register next_result =
      builder.register_allocator()->NewRegister();

  builder.LoadTrue().StoreAccumulatorInRegister(done);
  // next_result = iterator.next();
  IteratorCallNextMethod(lowerer, iterator, next_method, next_result, is_async);

  // if (!next_result.done) {value=next_result.value; done=false;}
  interpreter::BytecodeLabel is_done;
  builder
      .LoadNamedProperty(
          next_result, lowerer.isolate()->ast_string_constants()->done_string(),
          load_done_slot)
      .JumpIfTrue(not_boolean_mode, &is_done)
      .LoadNamedProperty(
          next_result,
          lowerer.isolate()->ast_string_constants()->value_string(),
          load_value_slot)  // next_result.value
      .StoreAccumulatorInRegister(next_result)
      .LoadFalse()
      .StoreAccumulatorInRegister(done)
      .LoadAccumulatorWithRegister(next_result)
      .Bind(&is_done);
}

void VisitIteratorAdvance(PortableLowering& lowerer) {
  VisitIteratorAdvanceImpl<0>(lowerer);
}

void VisitAsyncIteratorAdvance(PortableLowering& lowerer) {
  VisitIteratorAdvanceImpl<1>(lowerer);
}

// array = []
// if (!done) {
//   index = 0;
//   done = true;
//   loop {
//     next_result = iterator.next();
//     if (next_result.done)break;
//     array[index] = next_result.value;
//     index++;
//   }
// }
void VisitIteratorRest(PortableLowering& lowerer) {
  interpreter::Register done(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  interpreter::Register iterator(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  interpreter::Register next_method(
      lowerer.bytecode_iterator().GetRegisterOperand(2).index());
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  LowerRegisterAllocationScope register_scope(&lowerer);
  interpreter::Register array = builder.register_allocator()->NewRegister();

  // array = []
  builder.CreateEmptyArrayLiteral(lowerer.feedback().AddLiteralSlot().ToInt())
      .StoreAccumulatorInRegister(array);

  // if(done)goto DONE;
  interpreter::BytecodeLabel is_done;
  builder.LoadAccumulatorWithRegister(done).JumpIfTrue(not_boolean_mode,
                                                       &is_done);
  // index = 0;
  interpreter::Register index = builder.register_allocator()->NewRegister();
  builder.LoadLiteral(i::Smi::zero()).StoreAccumulatorInRegister(index);
  // done = true;
  builder.LoadTrue().StoreAccumulatorInRegister(done);

  int load_done_slot = lowerer.feedback().AddLoadICSlot().ToInt();
  int load_value_slot = lowerer.feedback().AddLoadICSlot().ToInt();
  int element_slot = lowerer.feedback().AddStoreInArrayLiteralICSlot().ToInt();
  int index_slot = lowerer.feedback().AddBinaryOpICSlot().ToInt();
  // loop
  {
    interpreter::LoopBuilder loop_builder(&builder, nullptr, nullptr,
                                          &lowerer.feedback());
    LoopScope loop_scope(&loop_builder);
    // next_result = iterator.next()
    interpreter::Register next_result =
        builder.register_allocator()->NewRegister();
    IteratorCallNextMethod(lowerer, iterator, next_method, next_result, false);
    // if( next_result.done)goto LoopDone
    interpreter::BytecodeLabel loop_done;
    builder.LoadNamedProperty(
        next_result, lowerer.isolate()->ast_string_constants()->done_string(),
        load_done_slot);
    loop_builder.BreakIfTrue(not_boolean_mode);
    // array[index] = next_result.value
    builder
        .LoadNamedProperty(
            next_result,
            lowerer.isolate()->ast_string_constants()->value_string(),
            load_value_slot)
        .StoreInArrayLiteral(array, index, element_slot)
        .LoadAccumulatorWithRegister(index)
        .UnaryOperation(token_inc, index_slot)  // index++;
        .StoreAccumulatorInRegister(index);
    loop_builder.BindContinueTarget();
  }
  // DONE:
  builder.Bind(&is_done).LoadAccumulatorWithRegister(array);
}

// if(!done) {
//   method = iterator.return;
//   if (method != undefined && method != null) {
//     return_res = method();
//     if (!IsJSReceiver(return_res)) throw xxx;
//   }
// }
//
// if ignore_return_exception is true, try {...IteratorFinish...} catch(e) {}
// will be produced
template <bool is_async>
void VisitIteratorFinishImpl(PortableLowering& lowerer) {
  interpreter::Register iterator(
      lowerer.bytecode_iterator().GetRegisterOperand(0).index());
  int32_t flags = lowerer.bytecode_iterator().GetFlag8Operand(1);
  IteratorFinishFlag exception_flag = static_cast<IteratorFinishFlag>(flags);

  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  interpreter::BytecodeLabels is_done(&lowerer.zone());
  interpreter::RegisterList args = interpreter::RegisterList(iterator);

  builder.JumpIfTrue(not_boolean_mode, is_done.New());

  bool ignore_return_exception =
      exception_flag == IteratorFinishFlag::kIgnoreException;

  BuildCallIteratorMethod(
      lowerer, iterator,
      lowerer.isolate()->ast_string_constants()->return_string(), args, nullptr,
      &is_done);

  if (is_async) {
    BuildAwait(lowerer);
  }

  if (!ignore_return_exception) {
    LowerRegisterAllocationScope register_scope(&lowerer);
    interpreter::Register return_result =
        builder.register_allocator()->NewRegister();

    builder.JumpIfJSReceiver(is_done.New())
        .StoreAccumulatorInRegister(return_result)
        .CallRuntime(i::Runtime::kThrowIteratorResultNotAnObject,
                     return_result);
  }
  is_done.Bind(&builder);
}

void VisitIteratorFinish(PortableLowering& lowerer) {
  VisitIteratorFinishImpl<0>(lowerer);
}

void VisitAsyncIteratorFinish(PortableLowering& lowerer) {
  VisitIteratorFinishImpl<1>(lowerer);
}

void VisitMarkTryBegin(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  int handler_id = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(0);
  interpreter::Register context(
      lowerer.bytecode_iterator().GetRegisterOperand(1).index());
  builder.MarkTryBegin(handler_id, context);
}

void VisitMarkTryEnd(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  int handler_id = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(0);
  builder.MarkTryEnd(handler_id);
}

void VisitMarkHandler(PortableLowering& lowerer) {
  interpreter::BytecodeArrayBuilder& builder = lowerer.builder();
  builder.GetRegisterOptimizer()->Flush();
  int handler_id = lowerer.bytecode_iterator().GetUnsignedImmediateOperand(0);
  builder.MarkHandler(handler_id, v8::internal::HandlerTable::CAUGHT);
}

void VisitGetIterator(PortableLowering&) { P_UNREACHABLE(); }
void VisitNop18(PortableLowering&) { P_UNREACHABLE(); }
void VisitIllegal(PortableLowering&) { P_UNREACHABLE(); }

Handle<i::BytecodeArray> PBCLower::Lower(PBCLowerItems items) {
  PortableLowering lowerer(items);
  return lowerer.Lower();
}

}  // namespace portable