// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//


#include <pollux/connectors/hive/hive_connector.h>

#include <pollux/common/base/fs.h>
#include <pollux/connectors/hive/hive_config.h>
#include <pollux/connectors/hive/hive_data_sink.h>
#include <pollux/connectors/hive/hive_data_source.h>
#include <pollux/connectors/hive/hive_partition_function.h>
#include <pollux/expression/expr_to_subfield_filter.h>
#include <pollux/expression/field_reference.h>

#include <boost/lexical_cast.hpp>
#include <memory>

using namespace kumo::pollux::exec;

namespace kumo::pollux::connector::hive {

namespace {
std::vector<std::unique_ptr<HiveConnectorMetadataFactory>>&
hiveConnectorMetadataFactories() {
  static std::vector<std::unique_ptr<HiveConnectorMetadataFactory>> factories;
  return factories;
}
} // namespace

HiveConnector::HiveConnector(
    const std::string& id,
    std::shared_ptr<const config::ConfigBase> config,
    melon::Executor* executor)
    : Connector(id),
      hiveConfig_(std::make_shared<HiveConfig>(config)),
      fileHandleFactory_(
          hiveConfig_->isFileHandleCacheEnabled()
              ? std::make_unique<SimpleLRUCache<std::string, FileHandle>>(
                    hiveConfig_->numCacheFileHandles())
              : nullptr,
          std::make_unique<FileHandleGenerator>(config)),
      executor_(executor) {
  if (hiveConfig_->isFileHandleCacheEnabled()) {
    KLOG(INFO) << "Hive connector " << connectorId()
              << " created with maximum of "
              << hiveConfig_->numCacheFileHandles()
              << " cached file handles with expiration of "
              << hiveConfig_->fileHandleExpirationDurationMs() << "ms.";
  } else {
    KLOG(INFO) << "Hive connector " << connectorId()
              << " created with file handle cache disabled";
  }
  for (auto& factory : hiveConnectorMetadataFactories()) {
    metadata_ = factory->create(this);
    if (metadata_ != nullptr) {
      break;
    }
  }
}

std::unique_ptr<DataSource> HiveConnector::createDataSource(
    const RowTypePtr& outputType,
    const std::shared_ptr<ConnectorTableHandle>& tableHandle,
    const std::unordered_map<
        std::string,
        std::shared_ptr<connector::ColumnHandle>>& columnHandles,
    ConnectorQueryCtx* connectorQueryCtx) {
  return std::make_unique<HiveDataSource>(
      outputType,
      tableHandle,
      columnHandles,
      &fileHandleFactory_,
      executor_,
      connectorQueryCtx,
      hiveConfig_);
}

std::unique_ptr<DataSink> HiveConnector::createDataSink(
    RowTypePtr inputType,
    std::shared_ptr<ConnectorInsertTableHandle> connectorInsertTableHandle,
    ConnectorQueryCtx* connectorQueryCtx,
    CommitStrategy commitStrategy) {
  auto hiveInsertHandle = std::dynamic_pointer_cast<HiveInsertTableHandle>(
      connectorInsertTableHandle);
  POLLUX_CHECK_NOT_NULL(
      hiveInsertHandle, "Hive connector expecting hive write handle!");
  return std::make_unique<HiveDataSink>(
      inputType,
      hiveInsertHandle,
      connectorQueryCtx,
      commitStrategy,
      hiveConfig_);
}

std::unique_ptr<core::PartitionFunction> HivePartitionFunctionSpec::create(
    int numPartitions,
    bool localExchange) const {
  std::vector<int> bucketToPartitions;
  if (bucketToPartition_.empty()) {
    // NOTE: if hive partition function spec doesn't specify bucket to partition
    // mapping, then we do round-robin mapping based on the actual number of
    // partitions.
    bucketToPartitions.resize(numBuckets_);
    for (int bucket = 0; bucket < numBuckets_; ++bucket) {
      bucketToPartitions[bucket] = bucket % numPartitions;
    }
    if (localExchange) {
      // Shuffle the map from bucket to partition for local exchange so we don't
      // use the same map for remote shuffle.
      std::shuffle(
          bucketToPartitions.begin(),
          bucketToPartitions.end(),
          std::mt19937{0});
    }
  }
  return std::make_unique<pollux::connector::hive::HivePartitionFunction>(
      numBuckets_,
      bucketToPartition_.empty() ? std::move(bucketToPartitions)
                                 : bucketToPartition_,
      channels_,
      constValues_);
}

std::string HivePartitionFunctionSpec::toString() const {
  std::ostringstream keys;
  size_t constIndex = 0;
  for (auto i = 0; i < channels_.size(); ++i) {
    if (i > 0) {
      keys << ", ";
    }
    auto channel = channels_[i];
    if (channel == kConstantChannel) {
      keys << "\"" << constValues_[constIndex++]->toString(0) << "\"";
    } else {
      keys << channel;
    }
  }

  return fmt::format("HIVE(({}) buckets: {})", keys.str(), numBuckets_);
}

melon::Dynamic HivePartitionFunctionSpec::serialize() const {
  melon::Dynamic obj = melon::Dynamic::object;
  obj["name"] = "HivePartitionFunctionSpec";
  obj["numBuckets"] = ISerializable::serialize(numBuckets_);
  obj["bucketToPartition"] = ISerializable::serialize(bucketToPartition_);
  obj["keys"] = ISerializable::serialize(channels_);
  std::vector<pollux::core::ConstantTypedExpr> constValueExprs;
  constValueExprs.reserve(constValues_.size());
  for (const auto& value : constValues_) {
    constValueExprs.emplace_back(value);
  }
  obj["constants"] = ISerializable::serialize(constValueExprs);
  return obj;
}

// static
core::PartitionFunctionSpecPtr HivePartitionFunctionSpec::deserialize(
    const melon::Dynamic& obj,
    void* context) {
  std::vector<column_index_t> channels =
      ISerializable::deserialize<std::vector<column_index_t>>(
          obj["keys"], context);
  const auto constTypedValues =
      ISerializable::deserialize<std::vector<pollux::core::ConstantTypedExpr>>(
          obj["constants"], context);
  std::vector<VectorPtr> constValues;
  constValues.reserve(constTypedValues.size());
  auto* pool = static_cast<memory::MemoryPool*>(context);
  for (const auto& value : constTypedValues) {
    constValues.emplace_back(value->toConstantVector(pool));
  }
  return std::make_shared<HivePartitionFunctionSpec>(
      ISerializable::deserialize<int>(obj["numBuckets"], context),
      ISerializable::deserialize<std::vector<int>>(
          obj["bucketToPartition"], context),
      std::move(channels),
      std::move(constValues));
}

void registerHivePartitionFunctionSerDe() {
  auto& registry = DeserializationWithContextRegistryForSharedPtr();
  registry.Register(
      "HivePartitionFunctionSpec", HivePartitionFunctionSpec::deserialize);
}

bool registerHiveConnectorMetadataFactory(
    std::unique_ptr<HiveConnectorMetadataFactory> factory) {
  hiveConnectorMetadataFactories().push_back(std::move(factory));
  return true;
}

} // namespace kumo::pollux::connector::hive
