// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//


#include <melon/singleton.h>
#include <gtest/gtest.h>
#include <pollux/connectors/hive/storage_adapters/hdfs/HdfsFileSystem.h>
#include <pollux/connectors/hive/storage_adapters/hdfs/RegisterHdfsFileSystem.h>
#include <pollux/connectors/hive/storage_adapters/hdfs/tests/HdfsMiniCluster.h>
#include <pollux/exec/table_writer.h>
#include <pollux/testing/exec/util/assert_query_builder.h>
#include <pollux/testing/exec/util/hive_connector_test_base.h>
#include <pollux/plan/plan_builder.h>

using namespace kumo::pollux;
using namespace kumo::pollux::core;
using namespace kumo::pollux::exec;
using namespace kumo::pollux::exec::test;
using namespace kumo::pollux::connector;
using namespace kumo::pollux::connector::hive;
using namespace kumo::pollux::dwio::common;
using namespace kumo::pollux::test;

class InsertIntoHdfsTest : public HiveConnectorTestBase {
 public:
  void SetUp() override {
    HiveConnectorTestBase::SetUp();
    filesystems::registerHdfsFileSystem();
    if (miniCluster == nullptr) {
      miniCluster = std::make_shared<filesystems::test::HdfsMiniCluster>();
      miniCluster->start();
    }
  }

  void TearDown() override {
    HiveConnectorTestBase::TearDown();
    miniCluster->stop();
  }

  void setDataTypes(const RowTypePtr& inputType) {
    rowType_ = inputType;
  }

  static std::shared_ptr<filesystems::test::HdfsMiniCluster> miniCluster;
  RowTypePtr rowType_;
};

std::shared_ptr<filesystems::test::HdfsMiniCluster>
    InsertIntoHdfsTest::miniCluster = nullptr;

TEST_F(InsertIntoHdfsTest, insertIntoHdfsTest) {
  melon::SingletonVault::singleton()->registrationComplete();
  const int64_t expectedRows = 1000;
  setDataTypes(ROW(
      {"c0", "c1", "c2", "c3"}, {BIGINT(), INTEGER(), SMALLINT(), DOUBLE()}));

  auto input = make_row_vector(
      {make_flat_vector<int64_t>(expectedRows, [](auto row) { return row; }),
       make_flat_vector<int32_t>(expectedRows, [](auto row) { return row; }),
       make_flat_vector<int16_t>(expectedRows, [](auto row) { return row; }),
       make_flat_vector<double>(expectedRows, [](auto row) { return row; })});

  // INSERT INTO hdfs with one writer
  auto plan =
      PlanBuilder()
          .values({input})
          .tableWrite(
              std::string(miniCluster->url()), dwio::common::FileFormat::DWRF)
          .planNode();

  auto results = AssertQueryBuilder(plan).copyResults(pool());

  // First column has number of rows written in the first row and nulls in other
  // rows.
  auto rowCount = results->childAt(TableWriteTraits::kRowCountChannel)
                      ->as<FlatVector<int64_t>>();
  ASSERT_FALSE(rowCount->is_null_at(0));
  ASSERT_EQ(expectedRows, rowCount->value_at(0));
  ASSERT_TRUE(rowCount->is_null_at(1));

  // Second column contains details about written files.
  auto details = results->childAt(TableWriteTraits::kFragmentChannel)
                     ->as<FlatVector<StringView>>();
  ASSERT_TRUE(details->is_null_at(0));
  ASSERT_FALSE(details->is_null_at(1));
  melon::Dynamic obj = melon::parseJson(details->value_at(1));

  ASSERT_EQ(expectedRows, obj["rowCount"].asInt());
  auto fileWriteInfos = obj["fileWriteInfos"];
  ASSERT_EQ(1, fileWriteInfos.size());

  auto writeFileName = fileWriteInfos[0]["writeFileName"].asString();

  // Read from 'writeFileName' and verify the data matches the original.
  plan = PlanBuilder().tableScan(rowType_).planNode();

  auto splits = HiveConnectorTestBase::makeHiveConnectorSplits(
      fmt::format("{}/{}", miniCluster->url(), writeFileName),
      1,
      dwio::common::FileFormat::DWRF);
  auto copy = AssertQueryBuilder(plan).split(splits[0]).copyResults(pool());
  assertEqualResults({input}, {copy});
}
