// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//


#include <melon/init/init.h>
#include <gtest/gtest.h>

#include <pollux/testing/gtest_utils.h>
#include <pollux/dwio/parquet/register_parquet_writer.h>
#include <pollux/testing/exec/util/assert_query_builder.h>
#include <pollux/plan/plan_builder.h>
#include <pollux/functions/sparksql/aggregates/register.h>
#include <pollux/functions/sparksql/fuzzer/SparkQueryRunner.h>
#include <pollux/functions/sparksql/registration/register.h>
#include <pollux/parse/type_resolver.h>
#include <pollux/testing/vector/vector_test_base.h>

using namespace kumo;
using namespace kumo::pollux;
using namespace kumo::pollux::test;
using namespace kumo::pollux::plan;

namespace kumo::pollux::functions::sparksql::test {
    namespace {
        class SparkQueryRunnerTest : public ::testing::Test,
                                     public pollux::VectorBuilder {
        protected:
            static void SetUpTestCase() {
                memory::MemoryManager::testingSetInstance({});
                parquet::registerParquetWriterFactory();
            }

            void SetUp() override {
                pollux::functions::sparksql::registerFunctions("");
                pollux::functions::aggregate::sparksql::registerAggregateFunctions("");
                pollux::parse::registerTypeResolver();
            }
        };

        // This test requires a Spark coordinator running at localhost, so disable it
        // by default.
        TEST_F(SparkQueryRunnerTest, DISABLED_basic) {
            auto aggregatePool = rootPool_->addAggregateChild("basic");
            auto queryRunner = std::make_unique<fuzzer::SparkQueryRunner>(
                aggregatePool.get(), "localhost:15002", "test", "basic");

            auto input = make_row_vector({
                make_constant<int64_t>(1, 25),
            });
            auto outputType = ROW({"a"}, {BIGINT()});
            auto sparkResults =
                    queryRunner->execute("SELECT count(*) FROM tmp", {input}, outputType);
            auto expected = make_row_vector({
                make_constant<int64_t>(25, 1),
            });
            exec::test::assertEqualResults(sparkResults, outputType, {expected});

            input = make_row_vector({
                make_flat_vector<int64_t>({
                    0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2,
                    3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4
                }),
                make_flat_vector<int64_t>({
                    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
                    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
                }),
            });
            outputType = ROW({"a", "b"}, {BIGINT(), BIGINT()});
            sparkResults = queryRunner->execute(
                "SELECT c0, count(*) FROM tmp GROUP BY 1", {input}, outputType);
            expected = make_row_vector({
                make_flat_vector<int64_t>({0, 1, 2, 3, 4}),
                make_flat_vector<int64_t>({5, 5, 5, 5, 5}),
            });
            exec::test::assertEqualResults(sparkResults, outputType, {expected});
        }

        // This test requires a Spark coordinator running at localhost, so disable it
        // by default.
        TEST_F(SparkQueryRunnerTest, DISABLED_decimal) {
            auto aggregatePool = rootPool_->addAggregateChild("decimal");
            auto queryRunner = std::make_unique<fuzzer::SparkQueryRunner>(
                aggregatePool.get(), "localhost:15002", "test", "decimal");
            auto input = make_row_vector({
                make_constant<int128_t>(123456789, 25, DECIMAL(34, 2)),
            });
            auto outputType = ROW({"a"}, {DECIMAL(34, 2)});
            auto sparkResults =
                    queryRunner->execute("SELECT abs(c0) FROM tmp", {input}, outputType);
            exec::test::assertEqualResults(sparkResults, outputType, {input});
        }

        // This test requires a Spark coordinator running at localhost, so disable it
        // by default.
        TEST_F(SparkQueryRunnerTest, DISABLED_fuzzer) {
            auto data = make_row_vector({
                make_flat_vector<int64_t>({1, 2, 3, 4, 5}),
                make_nullable_flat_vector<int64_t>({std::nullopt, 1, 2, std::nullopt, 4, 5}),
            });

            auto plan = PlanBuilder()
                    .values({data})
                    .singleAggregation({}, {"sum(c0)", "collect_list(c1)"})
                    .project({"a0", "array_sort(a1)"})
                    .planNode();

            auto aggregatePool = rootPool_->addAggregateChild("fuzzer");
            auto queryRunner = std::make_unique<fuzzer::SparkQueryRunner>(
                aggregatePool.get(), "localhost:15002", "test", "fuzzer");
            auto sql = queryRunner->toSql(plan);
            ASSERT_TRUE(sql.has_value());

            auto sparkResults = queryRunner->execute(
                sql.value(), {data}, ROW({"a", "b"}, {BIGINT(), ARRAY(BIGINT())}));

            auto polluxResults = exec::test::AssertQueryBuilder(plan).copyResults(pool());
            exec::test::assertEqualResults(
                sparkResults, plan->outputType(), {polluxResults});
        }

        TEST_F(SparkQueryRunnerTest, toSql) {
            auto aggregatePool = rootPool_->addAggregateChild("toSql");
            auto queryRunner = std::make_unique<fuzzer::SparkQueryRunner>(
                aggregatePool.get(), "unused", "unused", "unused");

            auto dataType = ROW({"c0", "c1", "c2"}, {DOUBLE(), DOUBLE(), BOOLEAN()});
            auto plan = PlanBuilder()
                    .tableScan("tmp", dataType)
                    .singleAggregation({"c1"}, {"avg(c0)"})
                    .planNode();
            EXPECT_EQ(
                queryRunner->toSql(plan),
                "SELECT c1, avg(c0) as a0 FROM tmp GROUP BY c1");

            plan = PlanBuilder()
                    .tableScan("tmp", dataType)
                    .singleAggregation({"c1"}, {"sum(c0)"})
                    .project({"a0 / c1"})
                    .planNode();
            EXPECT_EQ(
                queryRunner->toSql(plan),
                "SELECT (a0 / c1) as p0 FROM (SELECT c1, sum(c0) as a0 FROM tmp GROUP BY c1)");

            plan = PlanBuilder()
                    .tableScan("tmp", dataType)
                    .singleAggregation({}, {"avg(c0)", "avg(c1)"}, {"c2"})
                    .planNode();
            EXPECT_EQ(
                queryRunner->toSql(plan),
                "SELECT avg(c0) filter (where c2) as a0, avg(c1) as a1 FROM tmp");

            auto data =
                    make_row_vector({make_flat_vector<int64_t>({}), make_flat_vector<int64_t>({})});
            plan = PlanBuilder()
                    .values({data})
                    .singleAggregation({}, {"sum(distinct c0)"})
                    .planNode();
            EXPECT_EQ(queryRunner->toSql(plan), "SELECT sum(distinct c0) as a0 FROM tmp");
        }
    } // namespace
} // namespace kumo::pollux::functions::sparksql::test

int main(int argc, char **argv) {
    testing::InitGoogleTest(&argc, argv);
    melon::Init init{&argc, &argv, false};
    return RUN_ALL_TESTS();
}
