// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//


#include <pollux/testing/gtest_utils.h>
#include <pollux/plan/plan_builder.h>
#include <pollux/functions/lib/aggregates/tests/utils/AggregationTestBase.h>
#include <pollux/functions/sparksql/aggregates/register.h>

using namespace kumo::pollux::exec::test;
using namespace kumo::pollux::functions::aggregate::test;

namespace kumo::pollux::functions::aggregate::sparksql::test {

namespace {

class AverageAggregationTest : public AggregationTestBase {
 protected:
  void SetUp() override {
    AggregationTestBase::SetUp();
    registerAggregateFunctions("spark_");
  }
};

TEST_F(AverageAggregationTest, avgAllNulls) {
  vector_size_t size = 1'000;
  // Have two row vectors a least as it triggers different code paths.
  std::vector<RowVectorPtr> vectors = {
      make_row_vector({
          make_all_null_flat_vector<int64_t>(size),
      }),
      make_row_vector({
          make_all_null_flat_vector<int64_t>(size),
      }),
  };
  testAggregations(vectors, {}, {"spark_avg(c0)"}, "SELECT NULL");

  auto plan = PlanBuilder()
                  .values(vectors)
                  .partialAggregation({}, {"spark_avg(c0)"})
                  .planNode();
  assertQuery(plan, "SELECT row(0, 0)");

  // Average with grouping key.
  // Have at least two row vectors as it triggers different code paths.
  vectors = {
      make_row_vector({
          make_nullable_flat_vector<int64_t>({0, 1, 2, 0, 1, 2, 0, 1, 2, 0}),
          make_nullable_flat_vector<int64_t>(
              {std::nullopt,
               std::nullopt,
               2,
               std::nullopt,
               10,
               9,
               std::nullopt,
               25,
               12,
               std::nullopt}),
      }),
      make_row_vector({
          make_nullable_flat_vector<int64_t>({0, 1, 2, 0, 1, 2, 0, 1, 2, 0}),
          make_nullable_flat_vector<int64_t>(
              {std::nullopt,
               10,
               20,
               std::nullopt,
               std::nullopt,
               25,
               std::nullopt,
               16,
               21,
               std::nullopt}),
      }),
  };
  createDuckDbTable(vectors);
  testAggregations(
      vectors,
      {"c0"},
      {"spark_avg(c1)"},
      "SELECT c0, avg(c1) FROM tmp GROUP BY c0");

  plan = PlanBuilder()
             .values(vectors)
             .partialAggregation({"c0"}, {"spark_avg(c1)"})
             .planNode();
  auto expected = make_row_vector(
      {"c0", "c1"},
      {
          make_flat_vector<int64_t>({0, 1, 2}),
          make_row_vector(
              {"sum", "count"},
              {
                  make_flat_vector<double>({0, 61, 89}),
                  make_flat_vector<int64_t>({0, 4, 6}),
              }),
      });
  assertQuery(plan, expected);
}

} // namespace
} // namespace kumo::pollux::functions::aggregate::sparksql::test
