// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
#include <pollux/exec/operator_utils.h>
#include <pollux/exec/vector_hasher.h>
#include <pollux/expression/eval_ctx.h>
#include <pollux/vector/constant_vector.h>
#include <pollux/vector/flat_vector.h>

namespace kumo::pollux::exec {

namespace {

template <TypeKind kind>
void scalarGatherCopy(
    BaseVector* target,
    vector_size_t targetIndex,
    vector_size_t count,
    const std::vector<const RowVector*>& sources,
    const std::vector<vector_size_t>& sourceIndices,
    column_index_t sourceColumnChannel) {
  POLLUX_DCHECK(target->is_flat_encoding());

  using T = typename TypeTraits<kind>::NativeType;
  auto* flat_vector = target->template as_unchecked<FlatVector<T>>();
  uint64_t* rawNulls = nullptr;
  if (std::is_same_v<T, StringView>) {
    for (int i = 0; i < count; ++i) {
      POLLUX_DCHECK(!sources[i]->may_have_nulls());
      if (sources[i]
              ->childAt(sourceColumnChannel)
              ->is_null_at(sourceIndices[i])) {
        if (MELON_UNLIKELY(rawNulls == nullptr)) {
          rawNulls = target->mutable_raw_nulls();
        }
        bits::set_null(rawNulls, targetIndex + i, true);
        continue;
      }
      auto* source = sources[i]->childAt(sourceColumnChannel).get();
      flat_vector->setNoCopy(
          targetIndex + i,
          source->template as_unchecked<FlatVector<T>>()->value_at(
              sourceIndices[i]));
      flat_vector->acquireSharedStringBuffers(source);
    }
  } else {
    for (int i = 0; i < count; ++i) {
      POLLUX_DCHECK(!sources[i]->may_have_nulls());
      if (sources[i]
              ->childAt(sourceColumnChannel)
              ->is_null_at(sourceIndices[i])) {
        if (MELON_UNLIKELY(rawNulls == nullptr)) {
          rawNulls = target->mutable_raw_nulls();
        }
        bits::set_null(rawNulls, targetIndex + i, true);
        continue;
      }
      flat_vector->set(
          targetIndex + i,
          sources[i]
              ->childAt(sourceColumnChannel)
              ->template as_unchecked<FlatVector<T>>()
              ->value_at(sourceIndices[i]));
    }
  }
}

void complexGatherCopy(
    BaseVector* target,
    vector_size_t targetIndex,
    vector_size_t count,
    const std::vector<const RowVector*>& sources,
    const std::vector<vector_size_t>& sourceIndices,
    column_index_t sourceChannel) {
  for (int i = 0; i < count; ++i) {
    target->copy(
        sources[i]->childAt(sourceChannel).get(),
        targetIndex + i,
        sourceIndices[i],
        1);
  }
}

void gatherCopy(
    BaseVector* target,
    vector_size_t targetIndex,
    vector_size_t count,
    const std::vector<const RowVector*>& sources,
    const std::vector<vector_size_t>& sourceIndices,
    column_index_t sourceChannel) {
  if (target->is_scalar()) {
    POLLUX_DYNAMIC_SCALAR_TYPE_DISPATCH(
        scalarGatherCopy,
        target->type()->kind(),
        target,
        targetIndex,
        count,
        sources,
        sourceIndices,
        sourceChannel);
  } else {
    complexGatherCopy(
        target, targetIndex, count, sources, sourceIndices, sourceChannel);
  }
}

// We want to aggregate some operator runtime metrics per operator rather than
// per event. This function returns true for such metrics.
bool shouldAggregateRuntimeMetric(const std::string& name) {
  static const melon::F14FastSet<std::string> metricNames{
      "dataSourceAddSplitWallNanos",
      "dataSourceReadWallNanos",
      "dataSourceLazyWallNanos",
      "queuedWallNanos",
      "flushTimes"};
  if (metricNames.contains(name)) {
    return true;
  }

  // 'blocked*WallNanos'
  if (name.size() > 16 and strncmp(name.c_str(), "blocked", 7) == 0) {
    return true;
  }

  return false;
}

} // namespace

void deselectRowsWithNulls(
    const std::vector<std::unique_ptr<VectorHasher>>& hashers,
    SelectivityVector& rows) {
  bool anyChange = false;
  for (int32_t i = 0; i < hashers.size(); ++i) {
    auto& decoded = hashers[i]->decodedVector();
    if (decoded.may_have_nulls()) {
      anyChange = true;
      const auto* nulls = hashers[i]->decodedVector().nulls(&rows);
      bits::andBits(rows.asMutableRange().bits(), nulls, 0, rows.end());
    }
  }

  if (anyChange) {
    rows.updateBounds();
  }
}

uint64_t* FilterEvalCtx::getRawSelectedBits(
    vector_size_t size,
    memory::MemoryPool* pool) {
  uint64_t* rawBits;
  BaseVector::ensure_buffer<bool, uint64_t>(size, pool, &selectedBits, &rawBits);
  return rawBits;
}

vector_size_t* FilterEvalCtx::getRawSelectedIndices(
    vector_size_t size,
    memory::MemoryPool* pool) {
  vector_size_t* rawSelected;
  BaseVector::ensure_buffer<vector_size_t>(
      size, pool, &selectedIndices, &rawSelected);
  return rawSelected;
}

namespace {
vector_size_t processConstantFilterResults(
    const VectorPtr& filterResult,
    const SelectivityVector& rows) {
  const auto constant = filterResult->as<ConstantVector<bool>>();
  if (constant->is_null_at(0) || constant->value_at(0) == false) {
    return 0;
  }
  return rows.countSelected();
}

vector_size_t processFlatFilterResults(
    const VectorPtr& filterResult,
    const SelectivityVector& rows,
    FilterEvalCtx& filterEvalCtx,
    memory::MemoryPool* pool) {
  const auto size = rows.size();

  auto* selectedBits = filterEvalCtx.getRawSelectedBits(size, pool);
  auto* nonNullBits =
      filterResult->as<FlatVector<bool>>()->rawValues<uint64_t>();
  if (filterResult->may_have_nulls()) {
    bits::andBits(selectedBits, nonNullBits, filterResult->raw_nulls(), 0, size);
  } else {
    ::memcpy(selectedBits, nonNullBits, bits::nbytes(size));
  }
  if (!rows.isAllSelected()) {
    bits::andBits(selectedBits, rows.allBits(), 0, size);
  }

  vector_size_t passed = 0;
  auto* rawSelected = filterEvalCtx.getRawSelectedIndices(size, pool);
  bits::forEachSetBit(
      selectedBits, 0, size, [&rawSelected, &passed](vector_size_t row) {
        rawSelected[passed++] = row;
      });
  return passed;
}

vector_size_t processEncodedFilterResults(
    const VectorPtr& filterResult,
    const SelectivityVector& rows,
    FilterEvalCtx& filterEvalCtx,
    memory::MemoryPool* pool) {
  auto size = rows.size();

  DecodedVector& decoded = filterEvalCtx.decodedResult;
  decoded.decode(*filterResult.get(), rows);
  auto values = decoded.data<uint64_t>();
  auto nulls = decoded.nulls(&rows);
  auto indices = decoded.indices();

  vector_size_t passed = 0;
  auto* rawSelected = filterEvalCtx.getRawSelectedIndices(size, pool);
  auto* rawSelectedBits = filterEvalCtx.getRawSelectedBits(size, pool);
  memset(rawSelectedBits, 0, bits::nbytes(size));
  for (int32_t i = 0; i < size; ++i) {
    if (!rows.isValid(i)) {
      continue;
    }
    auto index = indices[i];
    if ((!nulls || !bits::isBitNull(nulls, i)) &&
        bits::isBitSet(values, index)) {
      rawSelected[passed++] = i;
      bits::setBit(rawSelectedBits, i);
    }
  }
  return passed;
}
} // namespace

vector_size_t processFilterResults(
    const VectorPtr& filterResult,
    const SelectivityVector& rows,
    FilterEvalCtx& filterEvalCtx,
    memory::MemoryPool* pool) {
  switch (filterResult->encoding()) {
    case VectorEncoding::Simple::CONSTANT:
      return processConstantFilterResults(filterResult, rows);
    case VectorEncoding::Simple::FLAT:
      return processFlatFilterResults(filterResult, rows, filterEvalCtx, pool);
    default:
      return processEncodedFilterResults(
          filterResult, rows, filterEvalCtx, pool);
  }
}

VectorPtr wrapOne(
    vector_size_t wrapSize,
    BufferPtr wrapIndices,
    const VectorPtr& inputVector,
    BufferPtr wrapNulls,
    WrapState& wrapState) {
  if (!wrapIndices) {
    POLLUX_CHECK_NULL(wrapNulls);
    return inputVector;
  }

  if (inputVector->encoding() != VectorEncoding::Simple::DICTIONARY) {
    return BaseVector::wrap_in_dictionary(
        wrapNulls, wrapIndices, wrapSize, inputVector);
  }

  if (wrapState.transposeResults.empty()) {
    wrapState.nulls = wrapNulls.get();
  } else {
    POLLUX_CHECK(
        wrapState.nulls == wrapNulls.get(),
        "Must have identical wrapNulls for all wrapped columns");
  }
  auto baseIndices = inputVector->wrap_info();
  auto baseValues = inputVector->value_vector();
  // The base will be wrapped again without loading any lazy. The
  // rewrapping is permitted in this case.
  baseValues->clear_containing_lazy_and_wrapped();
  auto* rawBaseNulls = inputVector->raw_nulls();
  if (rawBaseNulls) {
    // Dictionary adds nulls.
    BufferPtr newIndices =
        AlignedBuffer::allocate<vector_size_t>(wrapSize, inputVector->pool());
    BufferPtr newNulls =
        AlignedBuffer::allocate<bool>(wrapSize, inputVector->pool());
    const uint64_t* rawWrapNulls =
        wrapNulls ? wrapNulls->as<uint64_t>() : nullptr;
    BaseVector::transpose_indices_with_nulls(
        baseIndices->as<vector_size_t>(),
        rawBaseNulls,
        wrapSize,
        wrapIndices->as<vector_size_t>(),
        rawWrapNulls,
        newIndices->asMutable<vector_size_t>(),
        newNulls->asMutable<uint64_t>());

    return BaseVector::wrap_in_dictionary(
        newNulls, newIndices, wrapSize, baseValues);
  }

  // if another column had the same indices as this one and this one does not
  // add nulls, we use the same transposed wrapping.
  auto it = wrapState.transposeResults.find(baseIndices.get());
  if (it != wrapState.transposeResults.end()) {
    return BaseVector::wrap_in_dictionary(
        wrapNulls, it->second, wrapSize, baseValues);
  }

  auto newIndices =
      AlignedBuffer::allocate<vector_size_t>(wrapSize, inputVector->pool());
  BaseVector::transpose_indices(
      baseIndices->as<vector_size_t>(),
      wrapSize,
      wrapIndices->as<vector_size_t>(),
      newIndices->asMutable<vector_size_t>());
  // If another column has the same wrapping and does not add nulls, we can use
  // the same transposed indices.
  wrapState.transposeResults[baseIndices.get()] = newIndices;
  return BaseVector::wrap_in_dictionary(
      wrapNulls, newIndices, wrapSize, baseValues);
}

VectorPtr wrapChild(
    vector_size_t size,
    BufferPtr mapping,
    const VectorPtr& child,
    BufferPtr nulls) {
  if (!mapping) {
    return child;
  }

  return BaseVector::wrap_in_dictionary(nulls, mapping, size, child);
}

RowVectorPtr
wrap(vector_size_t size, BufferPtr mapping, const RowVectorPtr& vector) {
  if (!mapping) {
    return vector;
  }

  return wrap(
      size,
      std::move(mapping),
      as_row_type(vector->type()),
      vector->children(),
      vector->pool());
}

RowVectorPtr wrap(
    vector_size_t size,
    BufferPtr mapping,
    const RowTypePtr& rowType,
    const std::vector<VectorPtr>& childVectors,
    memory::MemoryPool* pool) {
  if (mapping == nullptr) {
    return RowVector::createEmpty(rowType, pool);
  }
  std::vector<VectorPtr> wrappedChildren;
  wrappedChildren.reserve(childVectors.size());
  for (auto& child : childVectors) {
    wrappedChildren.emplace_back(wrapChild(size, mapping, child));
  }
  return std::make_shared<RowVector>(
      pool, rowType, nullptr, size, wrappedChildren);
}

void loadColumns(const RowVectorPtr& input, core::ExecCtx& execCtx) {
  LocalDecodedVector decodedHolder(execCtx);
  LocalSelectivityVector baseRowsHolder(&execCtx);
  LocalSelectivityVector rowsHolder(&execCtx);
  SelectivityVector* rows = nullptr;
  for (auto& child : input->children()) {
    if (is_lazy_not_loaded(*child)) {
      if (!rows) {
        rows = rowsHolder.get(input->size());
        rows->setAll();
      }
      LazyVector::ensureLoadedRows(
          child,
          *rows,
          *decodedHolder.get(),
          *baseRowsHolder.get(input->size()));
    }
  }
}

void gatherCopy(
    RowVector* target,
    vector_size_t targetIndex,
    vector_size_t count,
    const std::vector<const RowVector*>& sources,
    const std::vector<vector_size_t>& sourceIndices,
    const std::vector<IdentityProjection>& columnMap) {
  POLLUX_DCHECK_GE(count, 0);
  if (MELON_UNLIKELY(count <= 0)) {
    return;
  }
  POLLUX_CHECK_LE(count, sources.size());
  POLLUX_CHECK_LE(count, sourceIndices.size());
  POLLUX_DCHECK_EQ(sources.size(), sourceIndices.size());
  if (!columnMap.empty()) {
    for (const auto& columnProjection : columnMap) {
      gatherCopy(
          target->childAt(columnProjection.outputChannel).get(),
          targetIndex,
          count,
          sources,
          sourceIndices,
          columnProjection.inputChannel);
    }
  } else {
    for (auto i = 0; i < target->type()->size(); ++i) {
      gatherCopy(
          target->childAt(i).get(),
          targetIndex,
          count,
          sources,
          sourceIndices,
          i);
    }
  }
}

std::string makeOperatorSpillPath(
    const std::string& spillDir,
    int pipelineId,
    int driverId,
    int32_t operatorId) {
  POLLUX_CHECK(!spillDir.empty());
  return fmt::format("{}/{}_{}_{}", spillDir, pipelineId, driverId, operatorId);
}

void addOperatorRuntimeStats(
    const std::string& name,
    const RuntimeCounter& value,
    std::unordered_map<std::string, RuntimeMetric>& stats) {
  auto statIt = stats.find(name);
  if (UNLIKELY(statIt == stats.end())) {
    statIt = stats.insert(std::pair(name, RuntimeMetric(value.unit))).first;
  } else {
    POLLUX_CHECK_EQ(statIt->second.unit, value.unit);
  }
  statIt->second.addValue(value.value);
}

void aggregateOperatorRuntimeStats(
        std::unordered_map<std::string, RuntimeMetric>& stats) {
  for (auto& runtimeMetric : stats) {
    if (shouldAggregateRuntimeMetric(runtimeMetric.first)) {
      runtimeMetric.second.aggregate();
    }
  }
}

melon::Range<vector_size_t*> initializeRowNumberMapping(
    BufferPtr& mapping,
    vector_size_t size,
    memory::MemoryPool* pool) {
  if (!mapping || !mapping->unique() ||
      mapping->size() < sizeof(vector_size_t) * size) {
    mapping = allocate_indices(size, pool);
  }
  return melon::Range(mapping->asMutable<vector_size_t>(), size);
}

void projectChildren(
    std::vector<VectorPtr>& projectedChildren,
    const RowVectorPtr& src,
    const std::vector<IdentityProjection>& projections,
    int32_t size,
    const BufferPtr& mapping) {
  projectChildren(
      projectedChildren, src->children(), projections, size, mapping);
}

void projectChildren(
    std::vector<VectorPtr>& projectedChildren,
    const std::vector<VectorPtr>& src,
    const std::vector<IdentityProjection>& projections,
    int32_t size,
    const BufferPtr& mapping) {
  for (auto [inputChannel, outputChannel] : projections) {
    if (outputChannel >= projectedChildren.size()) {
      projectedChildren.resize(outputChannel + 1);
    }
    projectedChildren[outputChannel] =
        wrapChild(size, mapping, src[inputChannel]);
  }
}

void projectChildren(
    std::vector<VectorPtr>& projectedChildren,
    const RowVectorPtr& src,
    const std::vector<IdentityProjection>& projections,
    int32_t size,
    const BufferPtr& mapping,
    WrapState* state) {
  projectChildren(
      projectedChildren, src->children(), projections, size, mapping, state);
}

void projectChildren(
    std::vector<VectorPtr>& projectedChildren,
    const std::vector<VectorPtr>& src,
    const std::vector<IdentityProjection>& projections,
    int32_t size,
    const BufferPtr& mapping,
    WrapState* state) {
  for (const auto& projection : projections) {
    projectedChildren[projection.outputChannel] = state
        ? wrapOne(size, mapping, src[projection.inputChannel], nullptr, *state)
        : wrapChild(size, mapping, src[projection.inputChannel]);
  }
}

std::unique_ptr<Operator> BlockedOperatorFactory::toOperator(
    DriverCtx* ctx,
    int32_t id,
    const core::PlanNodePtr& node) {
  if (std::dynamic_pointer_cast<const BlockedNode>(node)) {
    return std::make_unique<BlockedOperator>(
        ctx, id, node, std::move(blockedCb_));
  }
  return nullptr;
}
} // namespace kumo::pollux::exec
