// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//


#include <gtest/gtest.h>
#include <pollux/connectors/connector.h>
#include <pollux/connectors/hive/hive_connector.h>
#include <pollux/testing/exec/util/hive_connector_test_base.h>
#include <pollux/expression/expr_to_subfield_filter.h>

namespace kumo::pollux::connector::hive::test {
namespace {

using namespace kumo::pollux::exec;

class HiveConnectorSerDeTest : public exec::test::HiveConnectorTestBase {
 protected:
  HiveConnectorSerDeTest() {
    Type::registerSerDe();
    common::Filter::registerSerDe();
    core::ITypedExpr::registerSerDe();
    HiveTableHandle::registerSerDe();
    HiveColumnHandle::registerSerDe();
    LocationHandle::registerSerDe();
    HiveInsertTableHandle::registerSerDe();
    HiveBucketProperty::registerSerDe();
    HiveSortingColumn::registerSerDe();
    HiveConnectorSplit::registerSerDe();
    HiveInsertFileNameGenerator::registerSerDe();
  }

  template <typename T>
  static void testSerde(const T& handle) {
    auto str = handle.toString();
    auto obj = handle.serialize();
    auto clone = ISerializable::deserialize<T>(obj);
    ASSERT_EQ(clone->toString(), str);
  }

  static void testSerde(const HiveTableHandle& handle) {
    auto str = handle.toString();
    auto obj = handle.serialize();
    auto clone = ISerializable::deserialize<HiveTableHandle>(obj);
    ASSERT_EQ(clone->toString(), str);
    ASSERT_EQ(
        handle.remainingFilter()->type(), clone->remainingFilter()->type());

    auto& filters = handle.subfieldFilters();
    auto& cloneFilters = clone->subfieldFilters();
    ASSERT_EQ(filters.size(), cloneFilters.size());
    for (const auto& [subfield, filter] : handle.subfieldFilters()) {
      ASSERT_NE(cloneFilters.find(subfield), cloneFilters.end());
      ASSERT_TRUE(filter->testingEquals(*cloneFilters.at(subfield)));
    }
  }

  static void testSerde(const HiveConnectorSplit& split) {
    const auto str = split.toString();
    const auto obj = split.serialize();
    const auto clone = ISerializable::deserialize<HiveConnectorSplit>(obj);
    ASSERT_EQ(clone->toString(), str);
    ASSERT_EQ(split.partitionKeys.size(), clone->partitionKeys.size());
    for (const auto& [key, value] : split.partitionKeys) {
      ASSERT_EQ(value, clone->partitionKeys.at(key));
    }

    ASSERT_EQ(split.tableBucketNumber, clone->tableBucketNumber);
    if (split.bucketConversion.has_value()) {
      ASSERT_TRUE(clone->bucketConversion.has_value());
      ASSERT_EQ(
          clone->bucketConversion.value().tableBucketCount,
          split.bucketConversion.value().tableBucketCount);
      ASSERT_EQ(
          clone->bucketConversion.value().partitionBucketCount,
          split.bucketConversion.value().partitionBucketCount);
    }
    ASSERT_EQ(split.customSplitInfo.size(), clone->customSplitInfo.size());
    for (const auto& [key, value] : split.customSplitInfo) {
      ASSERT_EQ(value, clone->customSplitInfo.at(key));
    }

    if (split.extraFileInfo != nullptr) {
      ASSERT_EQ(*split.extraFileInfo, *clone->extraFileInfo);
    } else {
      ASSERT_EQ(clone->extraFileInfo, nullptr);
    }
    ASSERT_EQ(split.serdeParameters.size(), clone->serdeParameters.size());
    for (const auto& [key, value] : split.serdeParameters) {
      ASSERT_EQ(value, clone->serdeParameters.at(key));
    }

    ASSERT_EQ(split.infoColumns.size(), clone->infoColumns.size());
    for (const auto& [key, value] : split.infoColumns) {
      ASSERT_EQ(value, clone->infoColumns.at(key));
    }

    if (split.properties.has_value()) {
      ASSERT_TRUE(clone->properties.has_value());
      ASSERT_EQ(split.properties->fileSize, clone->properties->fileSize);
      ASSERT_EQ(
          split.properties->modificationTime,
          clone->properties->modificationTime);
    } else {
      ASSERT_FALSE(clone->properties.has_value());
    }
  }
};

TEST_F(HiveConnectorSerDeTest, hiveTableHandle) {
  auto rowType =
      ROW({"c0c0", "c1", "c2", "c3", "c4", "c5"},
          {INTEGER(), BIGINT(), DOUBLE(), BOOLEAN(), BIGINT(), VARCHAR()});
  auto tableHandle = makeTableHandle(
      common::test::SubfieldFiltersBuilder()
          .add("c0.c0", isNotNull())
          .add(
              "c1",
              lessThanOrEqualHugeint(std::numeric_limits<int128_t>::max()))
          .add("c2", greaterThanOrEqualDouble(3.1415))
          .add("c3", boolEqual(true))
          .add("c4", in({0xdeadbeaf, 0xcafecafe}))
          .add("c2", notIn({0xdeadbeaf, 0xcafecafe}))
          .add(
              "c5",
              orFilter(between("abc", "efg"), greaterThanOrEqual("dragon")))
          .build(),
      parseExpr("c1 > c4 and c3 = true", rowType),
      "hive_table",
      ROW({"c0", "c1"}, {BIGINT(), VARCHAR()}),
      true,
      {{dwio::common::TableParameter::kSkipHeaderLineCount, "1"}});
  testSerde(*tableHandle);
}

TEST_F(HiveConnectorSerDeTest, hiveColumnHandle) {
  auto columnType = ROW({
      {"c0c0", BIGINT()},
      {"c0c1",
       ARRAY(
           MAP(VARCHAR(),
               ROW({
                   {"c0c1c0", BIGINT()},
                   {"c0c1c1", BIGINT()},
               })))},
  });

  auto columnHandleTypes = {
      HiveColumnHandle::ColumnType::kPartitionKey,
      HiveColumnHandle::ColumnType::kRegular,
      HiveColumnHandle::ColumnType::kSynthesized,
      HiveColumnHandle::ColumnType::kRowIndex,
  };

  for (auto columnHandleType : columnHandleTypes) {
    auto columnHandle = exec::test::HiveConnectorTestBase::makeColumnHandle(
        "columnHandle",
        columnType,
        columnType,
        {"c0.c0c1[3][\"foo\"].c0c1c0"},
        columnHandleType);

    testSerde(*columnHandle);
  }
}

TEST_F(HiveConnectorSerDeTest, locationHandle) {
  auto locationHandle = exec::test::HiveConnectorTestBase::makeLocationHandle(
      "targetDirectory",
      std::optional("writeDirectory"),
      LocationHandle::TableType::kNew);
  testSerde(*locationHandle);
}

TEST_F(HiveConnectorSerDeTest, hiveInsertTableHandle) {
  auto tableColumnNames = std::vector<std::string>{"id", "row", "arr", "loc"};
  auto bigintType = TypeFactory<TypeKind::BIGINT>::create();
  auto rowType{
      ROW({"c0", "c1", "c2", "c3", "c4", "c5"},
          {BIGINT(), INTEGER(), SMALLINT(), REAL(), DOUBLE(), VARCHAR()})};
  auto arrType =
      ARRAY(ROW({{"c0c0", BIGINT()}, {"c0c1", BIGINT()}, {"c0c2", BIGINT()}}));
  auto varcharType = TypeFactory<TypeKind::VARCHAR>::create();
  std::vector<TypePtr> tableColumnTypes;
  tableColumnTypes.reserve(4);
  tableColumnTypes.emplace_back(bigintType);
  tableColumnTypes.emplace_back(rowType);
  tableColumnTypes.emplace_back(arrType);
  tableColumnTypes.emplace_back(varcharType);

  auto locationHandle = exec::test::HiveConnectorTestBase::makeLocationHandle(
      "targetDirectory",
      std::optional("writeDirectory"),
      LocationHandle::TableType::kNew);

  auto bucketProperty = std::make_shared<HiveBucketProperty>(
      HiveBucketProperty::Kind::kPrestoNative,
      1024,
      std::vector<std::string>{"id", "row"},
      std::vector<TypePtr>{VARCHAR(), BOOLEAN()},
      std::vector<std::shared_ptr<const HiveSortingColumn>>{
          std::make_shared<HiveSortingColumn>(
              "id", core::SortOrder{true, true})});

  std::unordered_map<std::string, std::string> serdeParameters = {
      {"key1", "value1"},
      {"key2", "value2"},
  };

  auto hiveInsertTableHandle =
      exec::test::HiveConnectorTestBase::makeHiveInsertTableHandle(
          tableColumnNames,
          tableColumnTypes,
          {"loc"},
          bucketProperty,
          locationHandle,
          dwio::common::FileFormat::NIMBLE,
          common::CompressionKind::CompressionKind_SNAPPY,
          serdeParameters);
  testSerde(*hiveInsertTableHandle);
}

TEST_F(HiveConnectorSerDeTest, hiveConnectorSplit) {
  const auto connectorId = "testSerde";
  constexpr auto splitWeight = 1;
  constexpr bool cacheable = false;
  constexpr auto filePath = "/testSerde/p";
  constexpr auto fileFormat = dwio::common::FileFormat::DWRF;
  constexpr auto start = 0;
  constexpr auto length = 1024;
  const std::unordered_map<std::string, std::optional<std::string>>
      partitionKeys{{"p0", "0"}, {"p1", "1"}};
  constexpr auto tableBucketNumber = std::optional<int32_t>(4);
  const std::unordered_map<std::string, std::string> customSplitInfo{
      {"s0", "0"}, {"s1", "1"}};
  const auto extraFileInfo = std::make_shared<std::string>("testSerdeFileInfo");
  const std::unordered_map<std::string, std::string> serdeParameters{
      {"k1", "1"}, {"k2", "v2"}};
  const std::unordered_map<std::string, std::string> storageParameters{
      {"k3", "3"}, {"k5", "v4"}};
  const std::unordered_map<std::string, std::string> infoColumns{
      {"c0", "0"}, {"c1", "1"}};
  FileProperties fileProperties{
      .fileSize = 2048, .modificationTime = std::nullopt};
  const auto properties = std::optional<FileProperties>(fileProperties);
  RowIdProperties rowIdProperties{
      .metadataVersion = 2, .partitionId = 3, .tableGuid = "test"};
  const auto split1 = HiveConnectorSplit(
      connectorId,
      filePath,
      fileFormat,
      start,
      length,
      partitionKeys,
      tableBucketNumber,
      customSplitInfo,
      extraFileInfo,
      serdeParameters,
      storageParameters,
      splitWeight,
      cacheable,
      infoColumns,
      properties,
      rowIdProperties);
  ASSERT_EQ(split1.cacheable, cacheable);
  testSerde(split1);

  const auto split2 = HiveConnectorSplit(
      connectorId,
      filePath,
      fileFormat,
      start,
      length,
      {},
      tableBucketNumber,
      customSplitInfo,
      nullptr,
      {},
      {},
      splitWeight,
      !cacheable,
      {},
      std::nullopt,
      std::nullopt);
  ASSERT_EQ(split2.cacheable, !cacheable);
  testSerde(split2);

  auto split3 = HiveConnectorSplit(connectorId, filePath, fileFormat);
  std::vector<std::shared_ptr<HiveColumnHandle>> handles;
  handles.push_back(makeColumnHandle("c0", INTEGER(), {}));
  split3.bucketConversion = {16, 2, std::move(handles)};
  testSerde(split3);
}

} // namespace
} // namespace kumo::pollux::connector::hive::test
